Ceph: Commands and Cheatsheet
Jump to navigation
Jump to search
Working with Ceph pools
Check the pools
[root@deploy-ext kolla]# ceph osd pool ls device_health_metrics images volumes backups manila_data manila_metadata .rgw.root default.rgw.log default.rgw.control default.rgw.meta default.rgw.buckets.index default.rgw.buckets.data default.rgw.buckets.non-ec
Create a pool
ceph osd pool create dptest 128 128 [root@deploy-ext kolla]# rbd create --size 20480 --pool dptest vol01 [root@deploy-ext kolla]# rbd info dptest/vol01 rbd image 'vol01': size 20 GiB in 5120 objects order 22 (4 MiB objects) snapshot_count: 0 id: 180b9ee11c2183 block_name_prefix: rbd_data.180b9ee11c2183 format: 2 features: layering, exclusive-lock, object-map, fast-diff, deep-flatten op_features: flags: create_timestamp: Thu Sep 2 07:28:16 2021 access_timestamp: Thu Sep 2 07:28:16 2021 modify_timestamp: Thu Sep 2 07:28:16 2021
Resize a pool
[root@deploy-ext kolla]# rbd resize dptest/vol01 --size 51200 Resizing image: 100% complete...done. [root@deploy-ext kolla]# rbd info dptest/vol01 rbd image 'vol01': size 50 GiB in 12800 objects order 22 (4 MiB objects) snapshot_count: 0 id: 180b9ee11c2183 block_name_prefix: rbd_data.180b9ee11c2183 format: 2 features: layering, exclusive-lock, object-map, fast-diff, deep-flatten op_features: flags: create_timestamp: Thu Sep 2 07:28:16 2021 access_timestamp: Thu Sep 2 07:28:16 2021 modify_timestamp: Thu Sep 2 07:28:16 2021
Map a device / mount a volume
[root@deploy-ext kolla]# rbd map dptest/vol01 /dev/rbd0 [root@deploy-ext kolla]# mkfs.xfs /dev/rbd/dptest/vol01 [root@deploy-ext kolla]# mount /dev/rbd/dptest/vol01 ./ceph-vol/ [root@deploy-ext kolla]# df -h | grep ceph-vol /dev/rbd0 50G 390M 50G 1% /root/kolla/ceph-vol
Ceph maintenance
Stop rebalancing (useful for rebooting systems etc)
ceph osd set noout; ceph osd set norebalance # perform reboot / maintenance ;
Ceph locks
# here we had an instance loose connectivity to ceph # in openstack set state to active (node reset-state --active UUID) # openstack server stop UUID # then remove the lock as below # then we could snapshot ceph osd unset norebalance; ceph osd unset noout # checking volume info (openstack env timed out, went into error root@str-237:~# rbd info volumes/volume-417feeef-d79d-4a31-af13-f1bee971284b rbd image 'volume-417feeef-d79d-4a31-af13-f1bee971284b': size 3.9 TiB in 512000 objects order 23 (8 MiB objects) snapshot_count: 0 id: 5ba8f5a87a6a8 block_name_prefix: rbd_data.5ba8f5a87a6a8 format: 2 features: layering, exclusive-lock, object-map, fast-diff, deep-flatten op_features: flags: create_timestamp: Tue Feb 14 19:37:14 2023 access_timestamp: Thu Feb 23 19:18:17 2023 modify_timestamp: Thu Feb 23 19:18:21 2023 parent: volumes/volume-7aedaaa5-f547-401a-b4cb-1d2271eb1c7d@snapshot-509cbdc3-4eeb-4acf-ae0e-0cf64aa9f824 overlap: 3.9 TiB root@str-237:~# rbd lock ls volumes/volume-417feeef-d79d-4a31-af13-f1bee971284b There is 1 exclusive lock on this image. Locker ID Address client.6007059 auto 139760250032464 10.16.31.12:0/3416256037 root@str-237:~# rbd lock rm volumes/volume-417feeef-d79d-4a31-af13-f1bee971284b "auto 139760250032464" client.6007059 root@str-237:~# rbd lock ls volumes/volume-417feeef-d79d-4a31-af13-f1bee971284b