1. /etc/hosts ¼öÁ¤
2. hostname ¼öÁ¤
3.À¯Àú»ý¼º
# useradd -d /home/ceph -m ceph
# passwd ceph
# echo "ceph ALL = (root) NOPASSWD:ALL" | tee /etc/sudoers.d/ceph
# chmod 0440 /etc/sudoers.d/ceph
4. ssh key copy
ceph@mgmt:~$ ssh-copy-id ceph@osd3
5. ceph install
ceph@mgmt:~$ ceph-deploy install osd3
ceph@mgmt:~/cephcluster$ ceph-deploy admin osd3
ceph@mgmt:~/cephcluster$ ceph-deploy disk zap osd3:sda
ceph@mgmt:~/cephcluster$ ceph-deploy osd prepare osd3:sda
ceph@mgmt:~/cephcluster$ ceph-deploy osd activate osd3:sda1
ceph@mgmt:~/cephcluster$ ceph -s
cluster 38e6bdde-e19e-4801-8dc0-0e7a47734611
health HEALTH_WARN too few pgs per osd (16 < min 20)
monmap e1: 3 mons at {mon0=115.68.200.60:6789/0,mon1=115.68.200.61:6789/0,mon2=115.68.200.62:6789/0}, election epoch 8, quorum 0,1,2 mon0,mon1,mon2
osdmap e19: 4 osds: 4 up, 4 in
pgmap v38: 64 pgs, 1 pools, 0 bytes data, 0 objects
142 MB used, 67027 GB / 67027 GB avail
64 active+clean
root@osd3:~# df -Th
Filesystem Type Size Used Avail Use% Mounted on
/dev/sdb1 ext4 15G 2.4G 12G 18% /
none tmpfs 4.0K 0 4.0K 0% /sys/fs/cgroup
udev devtmpfs 7.7G 4.0K 7.7G 1% /dev
tmpfs tmpfs 1.6G 480K 1.6G 1% /run
none tmpfs 5.0M 0 5.0M 0% /run/lock
none tmpfs 7.7G 8.0K 7.7G 1% /run/shm
none tmpfs 100M 0 100M 0% /run/user
/dev/sda1 xfs 17T 36M 17T 1% /var/lib/ceph/osd/ceph-3