1先描述下我的部署环境:
2台OSD,1台monitor,1台管理服务器,1台client,都是24核,64G内存,1.6T的SSD闪存卡,千兆网卡;目前安装的ceph版本是 0.94.7。
2 目前的现状
我用dd命令写5G数据,使用iostat 观察到%util 立马100%呀,同时await指标都是4000多,而且此时网络带宽也才使用10M左右。
使用fio工具顺序写1G的数据,测试出的bw只有7M多,iops只有800多,
fio --name=seqWrite --filename=/mnt/test --numjobs=2 --bs=4k -size=1G --ioengine=libaio --iodepth=32 --direct=1 --rw=write --group_reporting --randrepeat=0
目前的配置文件:
[global]
fsid = 87c84fff-c652-4873-953b-85549fa400b9
mon_initial_members = cmon
mon_host = 172.16.1.144
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
filestore_xattr_use_omap = true
osd_pool_default_size = 2
osd_pool_default_min_size = 1
osd_pool_default_pg_num = 128
osd_pool_default_pgp_num = 128
filestore_fd_cache_size = 204800
filestore_omap_header_cache_size = 204800
filestore_fiemap = true
filestore_wbthrottle_xfs_bytes_start_flusher = 500000000
filestore_wbthrottle_xfs_indoes_start_flusher = 500
filestore_wbthrottle_xfs_indoes_hard_limit = 500000
filestore_wbthrottle_xfs_ios_start_flusher = 50000
filestore_wbthrottle_xfs_bytes_hard_limit = 500000000
filestore_wbthrottle_xfs_ios_hard_limit = 500000
filestore_queue_max_ops = 5000
filestore_queue_max_bytes = 1024000000
filestore_queue_committing_max_ops = 50000
journal_queue_max_ops = 500000
journal_queue_max_bytes = 10240000000
osd_journal_size = 10000
ms_dispatch_throttle_bytes = 104857600000
osd op threads = 4
osd disk threads = 2
filestore op threads = 6
osd_mount_options_xfs = "rw,noexec,nodev,noatime,nodiratime,nobarrier"