°Ô½Ã¹° 1,369°Ç
   
ceph-deploy activate err
±Û¾´ÀÌ : ÃÖ°í°ü¸®ÀÚ ³¯Â¥ : 2016-01-20 (¼ö) 14:43 Á¶È¸ : 4764
±ÛÁÖ¼Ò :
                                
Âü°í : http://blog.csdn.net/bobpen/article/details/40424069


ceph@mgmt:~/ceph$ ceph-deploy osd activate osd-1:sdb1:/dev/sda1
[ceph_deploy.conf][DEBUG ] found configuration file at: /home/ceph/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (1.5.31): /usr/bin/ceph-deploy osd activate osd-1:sdb1:/dev/sda1
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  subcommand                    : activate
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7fb00b126f80>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7fb00b0fd500>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  disk                          : [('osd-1', '/dev/sdb1', '/dev/sda1')]
[ceph_deploy.osd][DEBUG ] Activating cluster ceph disks osd-1:/dev/sdb1:/dev/sda1
[osd-1][DEBUG ] connection detected need for sudo
[osd-1][DEBUG ] connected to host: osd-1 
[osd-1][DEBUG ] detect platform information from remote host
[osd-1][DEBUG ] detect machine type
[osd-1][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO  ] Distro info: Ubuntu 14.04 trusty
[ceph_deploy.osd][DEBUG ] activating host osd-1 disk /dev/sdb1
[ceph_deploy.osd][DEBUG ] will use init type: upstart
[osd-1][INFO  ] Running command: sudo ceph-disk -v activate --mark-init upstart --mount /dev/sdb1
[osd-1][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sdb1 uuid path is /sys/dev/block/8:17/dm/uuid
[osd-1][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sdb1 uuid path is /sys/dev/block/8:17/dm/uuid
[osd-1][WARNIN] INFO:ceph-disk:Running command: /sbin/sgdisk -i 1 /dev/sdb
[osd-1][WARNIN] INFO:ceph-disk:Running command: /sbin/blkid -p -s TYPE -ovalue -- /dev/sdb1
[osd-1][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mount_options_xfs
[osd-1][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_mount_options_xfs
[osd-1][WARNIN] DEBUG:ceph-disk:Mounting /dev/sdb1 on /var/lib/ceph/tmp/mnt.nIApwz with options noatime,inode64
[osd-1][WARNIN] INFO:ceph-disk:Running command: /bin/mount -t xfs -o noatime,inode64 -- /dev/sdb1 /var/lib/ceph/tmp/mnt.nIApwz
[osd-1][WARNIN] DEBUG:ceph-disk:Cluster uuid is a309536a-7f09-4cb2-a61f-1634484a249c
[osd-1][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-osd --cluster=ceph --show-config-value=fsid
[osd-1][WARNIN] ERROR:ceph-disk:Failed to activate
[osd-1][WARNIN] DEBUG:ceph-disk:Unmounting /var/lib/ceph/tmp/mnt.nIApwz
[osd-1][WARNIN] INFO:ceph-disk:Running command: /bin/umount -- /var/lib/ceph/tmp/mnt.nIApwz
[osd-1][WARNIN] Traceback (most recent call last):
[osd-1][WARNIN]   File "/usr/sbin/ceph-disk", line 3576, in <module>
[osd-1][WARNIN]     main(sys.argv[1:])
[osd-1][WARNIN]   File "/usr/sbin/ceph-disk", line 3530, in main
[osd-1][WARNIN]     args.func(args)
[osd-1][WARNIN]   File "/usr/sbin/ceph-disk", line 2424, in main_activate
[osd-1][WARNIN]     dmcrypt_key_dir=args.dmcrypt_key_dir,
[osd-1][WARNIN]   File "/usr/sbin/ceph-disk", line 2197, in mount_activate
[osd-1][WARNIN]     (osd_id, cluster) = activate(path, activate_key_template, init)
[osd-1][WARNIN]   File "/usr/sbin/ceph-disk", line 2331, in activate
[osd-1][WARNIN]     raise Error('No cluster conf found in ' + SYSCONFDIR + ' with fsid %s' % ceph_fsid)
[osd-1][WARNIN] __main__.Error: Error: No cluster conf found in /etc/ceph with fsid a309536a-7f09-4cb2-a61f-1634484a249c
[osd-1][ERROR ] RuntimeError: command returned non-zero exit status: 1
[ceph_deploy][ERROR ] RuntimeError: Failed to execute command: ceph-disk -v activate --mark-init upstart --mount /dev/sdb1


ceph@mgmt:~/ceph$ ceph osd tree
ID WEIGHT TYPE NAME    UP/DOWN REWEIGHT PRIMARY-AFFINITY 
-1      0 root default                                   
 0      0 osd.0           down        0          1.00000 
 1      0 osd.1           down        0          1.00000 
 2      0 osd.2           down        0          1.00000 
 3      0 osd.3           down        0          1.00000 
 4      0 osd.4           down        0          1.00000 

ceph@mgmt:~/ceph$ ceph auth del osd.1
updated

ceph@mgmt:~/ceph$ ceph osd rm 1
removed osd.1

ceph@mgmt:~/ceph$ ceph osd tree
ID WEIGHT TYPE NAME    UP/DOWN REWEIGHT PRIMARY-AFFINITY 
-1      0 root default                                   
 0      0 osd.0           down        0          1.00000 
 2      0 osd.2           down        0          1.00000 
 3      0 osd.3           down        0          1.00000 
 4      0 osd.4           down        0          1.00000 


ceph@mgmt:~/ceph$ ceph-deploy osd prepare osd-1:sdb1:sda1
[ceph_deploy.conf][DEBUG ] found configuration file at: /home/ceph/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (1.5.31): /usr/bin/ceph-deploy osd prepare osd-1:sdb1:sda1
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  disk                          : [('osd-1', '/dev/sdb1', '/dev/sda1')]
[ceph_deploy.cli][INFO  ]  dmcrypt                       : False
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  subcommand                    : prepare
[ceph_deploy.cli][INFO  ]  dmcrypt_key_dir               : /etc/ceph/dmcrypt-keys
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f5f07708f80>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  fs_type                       : xfs
[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7f5f076df500>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  zap_disk                      : False
[ceph_deploy.osd][DEBUG ] Preparing cluster ceph disks osd-1:/dev/sdb1:/dev/sda1
[osd-1][DEBUG ] connection detected need for sudo
[osd-1][DEBUG ] connected to host: osd-1 
[osd-1][DEBUG ] detect platform information from remote host
[osd-1][DEBUG ] detect machine type
[osd-1][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO  ] Distro info: Ubuntu 14.04 trusty
[ceph_deploy.osd][DEBUG ] Deploying osd to osd-1
[osd-1][DEBUG ] write cluster configuration to /etc/ceph/{cluster}.conf
[ceph_deploy.osd][DEBUG ] Preparing host osd-1 disk /dev/sdb1 journal /dev/sda1 activate False
[osd-1][INFO  ] Running command: sudo ceph-disk -v prepare --cluster ceph --fs-type xfs -- /dev/sdb1 /dev/sda1
[osd-1][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-osd --check-allows-journal -i 0 --cluster ceph
[osd-1][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-osd --check-wants-journal -i 0 --cluster ceph
[osd-1][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-osd --check-needs-journal -i 0 --cluster ceph
[osd-1][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sdb1 uuid path is /sys/dev/block/8:17/dm/uuid
[osd-1][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sdb1 uuid path is /sys/dev/block/8:17/dm/uuid
[osd-1][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sda1 uuid path is /sys/dev/block/8:1/dm/uuid
[osd-1][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-osd --cluster=ceph --show-config-value=fsid
[osd-1][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mkfs_options_xfs
[osd-1][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_mkfs_options_xfs
[osd-1][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mount_options_xfs
[osd-1][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_mount_options_xfs
[osd-1][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-osd --cluster=ceph --show-config-value=osd_journal_size
[osd-1][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_cryptsetup_parameters
[osd-1][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_dmcrypt_key_size
[osd-1][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_dmcrypt_type
[osd-1][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sdb1 uuid path is /sys/dev/block/8:17/dm/uuid
[osd-1][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sda1 uuid path is /sys/dev/block/8:1/dm/uuid
[osd-1][WARNIN] DEBUG:ceph-disk:Journal /dev/sda1 is a partition
[osd-1][WARNIN] WARNING:ceph-disk:OSD will not be hot-swappable if journal is not the same device as the osd data
[osd-1][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sda1 uuid path is /sys/dev/block/8:1/dm/uuid
[osd-1][WARNIN] INFO:ceph-disk:Running command: /sbin/sgdisk -i 1 /dev/sda
[osd-1][WARNIN] DEBUG:ceph-disk:Journal /dev/sda1 was previously prepared with ceph-disk. Reusing it.
[osd-1][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sda1 uuid path is /sys/dev/block/8:1/dm/uuid
[osd-1][WARNIN] INFO:ceph-disk:Running command: /sbin/sgdisk -i 1 /dev/sda
[osd-1][WARNIN] DEBUG:ceph-disk:Reusing journal with uuid b5248e22-0fab-464b-9103-a8be926e0888
[osd-1][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sdb1 uuid path is /sys/dev/block/8:17/dm/uuid
[osd-1][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sdb1 uuid path is /sys/dev/block/8:17/dm/uuid
[osd-1][WARNIN] DEBUG:ceph-disk:OSD data device /dev/sdb1 is a partition
[osd-1][WARNIN] DEBUG:ceph-disk:Creating xfs fs on /dev/sdb1
[osd-1][WARNIN] INFO:ceph-disk:Running command: /sbin/mkfs -t xfs -f -i size=2048 -- /dev/sdb1
[osd-1][DEBUG ] meta-data=/dev/sdb1              isize=2048   agcount=4, agsize=23340991 blks
[osd-1][DEBUG ]          =                       sectsz=512   attr=2, projid32bit=0
[osd-1][DEBUG ] data     =                       bsize=4096   blocks=93363963, imaxpct=25
[osd-1][DEBUG ]          =                       sunit=0      swidth=0 blks
[osd-1][DEBUG ] naming   =version 2              bsize=4096   ascii-ci=0
[osd-1][DEBUG ] log      =internal log           bsize=4096   blocks=45587, version=2
[osd-1][DEBUG ]          =                       sectsz=512   sunit=0 blks, lazy-count=1
[osd-1][DEBUG ] realtime =none                   extsz=4096   blocks=0, rtextents=0
[osd-1][WARNIN] DEBUG:ceph-disk:Mounting /dev/sdb1 on /var/lib/ceph/tmp/mnt.yqRj7Z with options noatime,inode64
[osd-1][WARNIN] INFO:ceph-disk:Running command: /bin/mount -t xfs -o noatime,inode64 -- /dev/sdb1 /var/lib/ceph/tmp/mnt.yqRj7Z
[osd-1][WARNIN] DEBUG:ceph-disk:Preparing osd data dir /var/lib/ceph/tmp/mnt.yqRj7Z
[osd-1][WARNIN] DEBUG:ceph-disk:Creating symlink /var/lib/ceph/tmp/mnt.yqRj7Z/journal -> /dev/disk/by-partuuid/b5248e22-0fab-464b-9103-a8be926e0888
[osd-1][WARNIN] INFO:ceph-disk:Running command: /bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.yqRj7Z/ceph_fsid.4217.tmp
[osd-1][WARNIN] INFO:ceph-disk:Running command: /bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.yqRj7Z/fsid.4217.tmp
[osd-1][WARNIN] INFO:ceph-disk:Running command: /bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.yqRj7Z/journal_uuid.4217.tmp
[osd-1][WARNIN] INFO:ceph-disk:Running command: /bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.yqRj7Z/magic.4217.tmp
[osd-1][WARNIN] INFO:ceph-disk:Running command: /bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.yqRj7Z
[osd-1][WARNIN] DEBUG:ceph-disk:Unmounting /var/lib/ceph/tmp/mnt.yqRj7Z
[osd-1][WARNIN] INFO:ceph-disk:Running command: /bin/umount -- /var/lib/ceph/tmp/mnt.yqRj7Z
[osd-1][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sdb1 uuid path is /sys/dev/block/8:17/dm/uuid
[osd-1][INFO  ] checking OSD status...
[osd-1][INFO  ] Running command: sudo ceph --cluster=ceph osd stat --format=json
[osd-1][WARNIN] there are 4 OSDs down
[osd-1][WARNIN] there are 4 OSDs out
[ceph_deploy.osd][DEBUG ] Host osd-1 is now ready for osd use.


ceph@mgmt:~/ceph$ ceph-deploy osd activate osd-1:sdb1:sda1
[ceph_deploy.conf][DEBUG ] found configuration file at: /home/ceph/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (1.5.31): /usr/bin/ceph-deploy osd activate osd-1:sdb1:sda1
[ceph_deploy.cli][INFO  ] ceph-deploy options:
[ceph_deploy.cli][INFO  ]  username                      : None
[ceph_deploy.cli][INFO  ]  verbose                       : False
[ceph_deploy.cli][INFO  ]  overwrite_conf                : False
[ceph_deploy.cli][INFO  ]  subcommand                    : activate
[ceph_deploy.cli][INFO  ]  quiet                         : False
[ceph_deploy.cli][INFO  ]  cd_conf                       : <ceph_deploy.conf.cephdeploy.Conf instance at 0x7f76047cdf80>
[ceph_deploy.cli][INFO  ]  cluster                       : ceph
[ceph_deploy.cli][INFO  ]  func                          : <function osd at 0x7f76047a4500>
[ceph_deploy.cli][INFO  ]  ceph_conf                     : None
[ceph_deploy.cli][INFO  ]  default_release               : False
[ceph_deploy.cli][INFO  ]  disk                          : [('osd-1', '/dev/sdb1', '/dev/sda1')]
[ceph_deploy.osd][DEBUG ] Activating cluster ceph disks osd-1:/dev/sdb1:/dev/sda1
[osd-1][DEBUG ] connection detected need for sudo
[osd-1][DEBUG ] connected to host: osd-1 
[osd-1][DEBUG ] detect platform information from remote host
[osd-1][DEBUG ] detect machine type
[osd-1][DEBUG ] find the location of an executable
[ceph_deploy.osd][INFO  ] Distro info: Ubuntu 14.04 trusty
[ceph_deploy.osd][DEBUG ] activating host osd-1 disk /dev/sdb1
[ceph_deploy.osd][DEBUG ] will use init type: upstart
[osd-1][INFO  ] Running command: sudo ceph-disk -v activate --mark-init upstart --mount /dev/sdb1
[osd-1][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sdb1 uuid path is /sys/dev/block/8:17/dm/uuid
[osd-1][WARNIN] DEBUG:ceph-disk:get_dm_uuid /dev/sdb1 uuid path is /sys/dev/block/8:17/dm/uuid
[osd-1][WARNIN] INFO:ceph-disk:Running command: /sbin/sgdisk -i 1 /dev/sdb
[osd-1][WARNIN] INFO:ceph-disk:Running command: /sbin/blkid -p -s TYPE -ovalue -- /dev/sdb1
[osd-1][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mount_options_xfs
[osd-1][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_mount_options_xfs
[osd-1][WARNIN] DEBUG:ceph-disk:Mounting /dev/sdb1 on /var/lib/ceph/tmp/mnt.3z5fzh with options noatime,inode64
[osd-1][WARNIN] INFO:ceph-disk:Running command: /bin/mount -t xfs -o noatime,inode64 -- /dev/sdb1 /var/lib/ceph/tmp/mnt.3z5fzh
[osd-1][WARNIN] DEBUG:ceph-disk:Cluster uuid is 7e0770b6-1f29-4e3e-91da-1a23f604f61d
[osd-1][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-osd --cluster=ceph --show-config-value=fsid
[osd-1][WARNIN] DEBUG:ceph-disk:Cluster name is ceph
[osd-1][WARNIN] DEBUG:ceph-disk:OSD uuid is ea63db8a-8694-45ea-8435-35a9cd063c61
[osd-1][WARNIN] DEBUG:ceph-disk:Allocating OSD id...
[osd-1][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring osd create --concise ea63db8a-8694-45ea-8435-35a9cd063c61
[osd-1][WARNIN] INFO:ceph-disk:Running command: /bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.3z5fzh/whoami.4409.tmp
[osd-1][WARNIN] DEBUG:ceph-disk:OSD id is 1
[osd-1][WARNIN] DEBUG:ceph-disk:Initializing OSD...
[osd-1][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/tmp/mnt.3z5fzh/activate.monmap
[osd-1][WARNIN] got monmap epoch 1
[osd-1][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-osd --cluster ceph --mkfs --mkkey -i 1 --monmap /var/lib/ceph/tmp/mnt.3z5fzh/activate.monmap --osd-data /var/lib/ceph/tmp/mnt.3z5fzh --osd-journal /var/lib/ceph/tmp/mnt.3z5fzh/journal --osd-uuid ea63db8a-8694-45ea-8435-35a9cd063c61 --keyring /var/lib/ceph/tmp/mnt.3z5fzh/keyring --setuser ceph --setgroup ceph
[osd-1][WARNIN] 2016-01-20 14:39:08.843568 7f979f0ed940 -1 journal check: ondisk fsid 69e651e1-9842-4eda-9140-c4edae7e27fe doesn't match expected ea63db8a-8694-45ea-8435-35a9cd063c61, invalid (someone else's?) journal
[osd-1][WARNIN] 2016-01-20 14:39:08.845284 7f979f0ed940 -1 filestore(/var/lib/ceph/tmp/mnt.3z5fzh) could not find -1/23c2fcde/osd_superblock/0 in index: (2) No such file or directory
[osd-1][WARNIN] 2016-01-20 14:39:08.847145 7f979f0ed940 -1 created object store /var/lib/ceph/tmp/mnt.3z5fzh journal /var/lib/ceph/tmp/mnt.3z5fzh/journal for osd.1 fsid a309536a-7f09-4cb2-a61f-1634484a249c
[osd-1][WARNIN] 2016-01-20 14:39:08.847163 7f979f0ed940 -1 auth: error reading file: /var/lib/ceph/tmp/mnt.3z5fzh/keyring: can't open /var/lib/ceph/tmp/mnt.3z5fzh/keyring: (2) No such file or directory
[osd-1][WARNIN] 2016-01-20 14:39:08.847279 7f979f0ed940 -1 created new key in keyring /var/lib/ceph/tmp/mnt.3z5fzh/keyring
[osd-1][WARNIN] DEBUG:ceph-disk:Marking with init system upstart
[osd-1][WARNIN] DEBUG:ceph-disk:Authorizing OSD key...
[osd-1][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring auth add osd.1 -i /var/lib/ceph/tmp/mnt.3z5fzh/keyring osd allow * mon allow profile osd
[osd-1][WARNIN] added key for osd.1
[osd-1][WARNIN] INFO:ceph-disk:Running command: /bin/chown -R ceph:ceph /var/lib/ceph/tmp/mnt.3z5fzh/active.4409.tmp
[osd-1][WARNIN] DEBUG:ceph-disk:ceph osd.1 data dir is ready at /var/lib/ceph/tmp/mnt.3z5fzh
[osd-1][WARNIN] DEBUG:ceph-disk:Moving mount to final location...
[osd-1][WARNIN] INFO:ceph-disk:Running command: /bin/mount -o noatime,inode64 -- /dev/sdb1 /var/lib/ceph/osd/ceph-1
[osd-1][WARNIN] INFO:ceph-disk:Running command: /bin/umount -l -- /var/lib/ceph/tmp/mnt.3z5fzh
[osd-1][WARNIN] DEBUG:ceph-disk:Starting ceph osd.1...
[osd-1][WARNIN] INFO:ceph-disk:Running command: /sbin/initctl emit --no-wait -- ceph-osd cluster=ceph id=1
[osd-1][INFO  ] checking OSD status...
[osd-1][INFO  ] Running command: sudo ceph --cluster=ceph osd stat --format=json
[osd-1][WARNIN] there are 4 OSDs down
[osd-1][WARNIN] there are 4 OSDs out


ceph@mgmt:~/ceph$ ceph osd tree
ID WEIGHT  TYPE NAME      UP/DOWN REWEIGHT PRIMARY-AFFINITY 
-1 0.34760 root default                                     
-2 0.34760     host osd-1                                   
 1 0.34760         osd.1       up  1.00000          1.00000 
 0       0 osd.0             down        0          1.00000 
 2       0 osd.2             down        0          1.00000 
 3       0 osd.3             down        0          1.00000 
 4       0 osd.4             down        0          1.00000 


À̸§ Æнº¿öµå
ºñ¹Ð±Û (üũÇÏ¸é ±Û¾´À̸¸ ³»¿ëÀ» È®ÀÎÇÒ ¼ö ÀÖ½À´Ï´Ù.)
¿ÞÂÊÀÇ ±ÛÀÚ¸¦ ÀÔ·ÂÇϼ¼¿ä.
   

 



 
»çÀÌÆ®¸í : ¸ðÁö¸®³× | ´ëÇ¥ : ÀÌ°æÇö | °³ÀÎÄ¿¹Â´ÏƼ : ·©Å°´åÄÄ ¿î¿µÃ¼Á¦(OS) | °æ±âµµ ¼º³²½Ã ºÐ´ç±¸ | ÀüÀÚ¿ìÆí : mojily°ñ¹ðÀÌchonnom.com Copyright ¨Ï www.chonnom.com www.kyunghyun.net www.mojily.net. All rights reserved.