Commands used in this video on admin node and client:
# Install CEPH packages
ceph-deploy install admin mon1 mon2 mon3 osd1 osd2 osd3 osd4 osd5 osd6 osd7 client
# Initialize cluster with 3 mons.
ceph-deploy new mon1 mon2 mon3
# Add the initial monitor(s) and gather the keys.
ceph-deploy mon create-initial
# Add OSDs erase/prepare/activate
ceph-deploy disk zap osd1:sdb osd2:sdb osd3:sdb
ceph-deploy osd prepare osd1:sdb osd2:sdb osd3:sdb
ceph-deploy osd activate osd1:sdb1:sdb2 osd2:sdb1:sdb2 osd3:sdb1:sdb2
# copy the configuration file and admin key to your admin node and your Ceph Nodes
ceph-deploy admin admin mon1 mon2 mon3 osd1 osd2 osd3 client
# Add more OSDs
ceph-deploy disk zap osd4:sdb osd5:sdb osd6:sdb
ceph-deploy disk zap osd7:sdb osd7:sdc
ceph-deploy osd prepare osd4:sdb osd5:sdb osd6:sdb
ceph-deploy osd prepare osd7:sdb osd7:sdc
ceph-deploy osd activate osd4:sdb1:sdb2 osd5:sdb1:sdb2 osd6:sdb1:sdb2
ceph-deploy osd activate osd7:sdb1:sdb2 osd7:sdc1:sdc2
ceph-deploy admin osd4 osd5 osd6
ceph-deploy admin osd7
#
# pools
#
# list pools
rados lspools
# create a pool data01
ceph osd pool create data01 256 256
# create a 4G volume
rbd create data01/vm_disk01 --size 4096
# list rbd volumes in pool
rbd list data01
# info volume
rbd info data01/vm_disk01
#
# client side
#
# create local map
sudo rbd map data01/vm_disk01
# show mapped devices
rbd showmapped
# create fs on mapped bd
mkfs.ext4 /dev/rbd1
mount /dev/rbd1 /mnt
# grow volume
rbd resize data01/vm_disk01 --size 15360
xfs_growfs /mnt