[Ceph-community] Ceph performance IOPS

Davis Mendoza Paco davis.men.pa at gmail.com
Thu Apr 18 08:16:10 PDT 2019


Hello
I am new to Ceph and I wanted to ask for help to improve IOPS performance
since I have around 200 virtual machines and IO processes are very slow
maybe I have to change from SATA to SSD
You who recommend me?

Thanks for your time. I'm sorry for my English :)

I currently use ceph with OpenStack, on 9 servers with SO Debian Stretch:
* 3 controller
* 3 compute
* 3 ceph-osd
  network: bond lacp 10GB
  RAM: 96GB
  HD: 11 disk SATA-3TB (bluestore)

ceph -s
 cluster:
    id:     414507dd-8a16-4548-86b7-906b0c9905e1
    health: HEALTH_OK
 services:
    mon: 3 daemons, quorum controller01,controller02,controller03
    mgr: controller01(active), standbys: controller02, controller03
    osd: 33 osds: 33 up, 33 in
 data:
    pools:   8 pools, 496 pgs
    objects: 3.65M objects, 13.9TiB
    usage:   41.7TiB used, 51.1TiB / 92.8TiB avail
    pgs:     494 active+clean
             2   active+clean+scrubbing+deep
 io:
    client:   460KiB/s rd, 3.41MiB/s wr, 122op/s rd, 197op/s wr

the configuration of ceph.conf

[global]
fsid = 414507dd-8a16-4548-86b7-906b0c9905e1
mon_initial_members = controller01,controller02,controller03
mon_host = 192.168.31.11,192.168.31.12,192.168.31.13
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx

public network = 192.168.31.0/24
cluster network = 192.168.33.0/24

[osd]
osd_scrub_begin_hour = 22
osd_scrub_end_hour = 6


the version of Ceph is luminous
ceph tell osd.* version
osd.0: {
    "version": "ceph version 12.2.12
(1436006594665279fe734b4c15d7e08c13ebd777) luminous (stable)"
}


the configuration of OSD

ceph daemon osd.1 config diff

{
    "diff": {
        "current": {
            "admin_socket": "/var/run/ceph/ceph-osd.1.asok",
            "auth_client_required": "cephx",
            "cluster_addr": "192.168.33.31:0/0",
            "cluster_network": "192.168.33.0/24",
            "err_to_stderr": "true",
            "fsid": "414507dd-8a16-4548-86b7-906b0c9905e1",
            "internal_safe_to_start_threads": "true",
            "keyring": "/var/lib/ceph/osd/ceph-1/keyring",
            "leveldb_log": "",
            "log_file": "/var/log/ceph/ceph-osd.1.log",
            "log_max_recent": "10000",
            "log_to_stderr": "false",
            "mds_data": "/var/lib/ceph/mds/ceph-1",
            "mgr_data": "/var/lib/ceph/mgr/ceph-1",
            "mon_cluster_log_file":
"default=/var/log/ceph/ceph.$channel.log cluster=/var/log/ceph/ceph.log",
            "mon_data": "/var/lib/ceph/mon/ceph-1",
            "mon_debug_dump_location": "/var/log/ceph/ceph-osd.1.tdump",
            "mon_host": "192.168.31.11,192.168.31.12,192.168.31.13",
            "mon_initial_members": "controller01,controller02,controller03",
            "osd_data": "/var/lib/ceph/osd/ceph-1",
            "osd_journal": "/var/lib/ceph/osd/ceph-1/journal",
            "osd_objectstore": "bluestore",
            "osd_scrub_begin_hour": "22",
            "osd_scrub_end_hour": "6",
            "public_addr": "192.168.31.31:0/0",
            "public_network": "192.168.31.0/24",
            "rgw_data": "/var/lib/ceph/radosgw/ceph-1",
            "setgroup": "ceph",
            "setuser": "ceph"
        },
        "defaults": {
            "admin_socket": "",
            "auth_client_required": "cephx, none",
            "cluster_addr": "-",
            "cluster_network": "",
            "err_to_stderr": "false",
            "fsid": "00000000-0000-0000-0000-000000000000",
            "internal_safe_to_start_threads": "false",
            "keyring":
"/etc/ceph/$cluster.$name.keyring,/etc/ceph/$cluster.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,",
            "leveldb_log": "/dev/null",
            "log_file": "",
            "log_max_recent": "500",
            "log_to_stderr": "true",
            "mds_data": "/var/lib/ceph/mds/$cluster-$id",
            "mgr_data": "/var/lib/ceph/mgr/$cluster-$id",
            "mon_cluster_log_file":
"default=/var/log/ceph/$cluster.$channel.log
cluster=/var/log/ceph/$cluster.log",
            "mon_data": "/var/lib/ceph/mon/$cluster-$id",
            "mon_debug_dump_location": "/var/log/ceph/$cluster-$name.tdump",
            "mon_host": "",
            "mon_initial_members": "",
            "osd_data": "/var/lib/ceph/osd/$cluster-$id",
            "osd_journal": "/var/lib/ceph/osd/$cluster-$id/journal",
            "osd_objectstore": "filestore",
            "osd_scrub_begin_hour": "0",
            "osd_scrub_end_hour": "24",
            "public_addr": "-",
            "public_network": "",
            "rgw_data": "/var/lib/ceph/radosgw/$cluster-$id",
            "setgroup": "",
            "setuser": ""
        }
    },
    "unknown": []
}

-- 
*Davis Mendoza P.*
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.ceph.com/pipermail/ceph-community-ceph.com/attachments/20190418/00f61b61/attachment.html>


More information about the Ceph-community mailing list