°Ô½Ã¹° 1,369°Ç
   
ceph osd --help
±Û¾´ÀÌ : ÃÖ°í°ü¸®ÀÚ ³¯Â¥ : 2014-09-22 (¿ù) 17:56 Á¶È¸ : 8239
±ÛÁÖ¼Ò :
                                
root@ceph:/etc/ceph# ceph osd  --help

General usage: 
==============
usage: ceph [-h] [-c CEPHCONF] [-i INPUT_FILE] [-o OUTPUT_FILE]
            [--id CLIENT_ID] [--name CLIENT_NAME] [--cluster CLUSTER]
            [--admin-daemon ADMIN_SOCKET] [--admin-socket ADMIN_SOCKET_NOPE]
            [-s] [-w] [--watch-debug] [--watch-info] [--watch-sec]
            [--watch-warn] [--watch-error] [--version] [--verbose] [--concise]
            [-f {json,json-pretty,xml,xml-pretty,plain}]
            [--connect-timeout CLUSTER_TIMEOUT]

Ceph administration tool

optional arguments:
  -h, --help            request mon help
  -c CEPHCONF, --conf CEPHCONF
                        ceph configuration file
  -i INPUT_FILE, --in-file INPUT_FILE
                        input file
  -o OUTPUT_FILE, --out-file OUTPUT_FILE
                        output file
  --id CLIENT_ID, --user CLIENT_ID
                        client id for authentication
  --name CLIENT_NAME, -n CLIENT_NAME
                        client name for authentication
  --cluster CLUSTER     cluster name
  --admin-daemon ADMIN_SOCKET
                        submit admin-socket commands ("help" for help
  --admin-socket ADMIN_SOCKET_NOPE
                        you probably mean --admin-daemon
  -s, --status          show cluster status
  -w, --watch           watch live cluster changes
  --watch-debug         watch debug events
  --watch-info          watch info events
  --watch-sec           watch security events
  --watch-warn          watch warn events
  --watch-error         watch error events
  --version, -v         display version
  --verbose             make verbose
  --concise             make less verbose
  -f {json,json-pretty,xml,xml-pretty,plain}, --format {json,json-pretty,xml,xml-pretty,plain}
  --connect-timeout CLUSTER_TIMEOUT
                        set a timeout for connecting to the cluster

Monitor commands: 
=================
[Contacting monitor, timeout after 5 seconds]
osd blacklist add|rm <EntityAddr>        add (optionally until <expire> seconds 
 {<float[0.0-]>}                          from now) or remove <addr> from 
                                          blacklist
osd blacklist ls                         show blacklisted clients
osd create {<uuid>}                      create new osd (with optional UUID)
osd crush add <osdname (id|osd.id)>      add or update crushmap position and 
 <float[0.0-]> <args> [<args>...]         weight for <name> with <weight> and 
                                          location <args>
osd crush add-bucket <name> <type>       add no-parent (probably root) crush 
                                          bucket <name> of type <type>
osd crush create-or-move <osdname (id|   create entry or move existing entry 
 osd.id)> <float[0.0-]> <args> [<args>..  for <name> <weight> at/to location 
 .]                                       <args>
osd crush dump                           dump crush map
osd crush link <name> <args> [<args>...] link existing entry for <name> under 
                                          location <args>
osd crush move <name> <args> [<args>...] move existing entry for <name> to 
                                          location <args>
osd crush remove <name> {<ancestor>}     remove <name> from crush map (
                                          everywhere, or just at <ancestor>)
osd crush reweight <name> <float[0.0-]>  change <name>'s weight to <weight> in 
                                          crush map
osd crush rm <name> {<ancestor>}         remove <name> from crush map (
                                          everywhere, or just at <ancestor>)
osd crush rule create-erasure <name>     create crush rule <name> for erasure 
 {<profile>}                              coded pool created with <profile> (
                                          default default)
osd crush rule create-simple <name>      create crush rule <name> to start from 
 <root> <type> {firstn|indep}             <root>, replicate across buckets of 
                                          type <type>, using a choose mode of 
                                          <firstn|indep> (default firstn; indep 
                                          best for erasure pools)
osd crush rule dump {<name>}             dump crush rule <name> (default all)
osd crush rule list                      list crush rules
osd crush rule ls                        list crush rules
osd crush rule rm <name>                 remove crush rule <name>
osd crush set                            set crush map from input file
osd crush set <osdname (id|osd.id)>      update crushmap position and weight 
 <float[0.0-]> <args> [<args>...]         for <name> to <weight> with location 
                                          <args>
osd crush show-tunables                  show current crush tunables
osd crush tunables legacy|argonaut|      set crush tunables values to <profile>
 bobtail|firefly|optimal|default         
osd crush unlink <name> {<ancestor>}     unlink <name> from crush map (
                                          everywhere, or just at <ancestor>)
osd deep-scrub <who>                     initiate deep scrub on osd <who>
osd down <ids> [<ids>...]                set osd(s) <id> [<id>...] down
osd dump {<int[0-]>}                     print summary of OSD map
osd erasure-code-profile get <name>      get erasure code profile <name>
osd erasure-code-profile ls              list all erasure code profiles
osd erasure-code-profile rm <name>       remove erasure code profile <name>
osd erasure-code-profile set <name>      create erasure code profile <name> 
 {<profile> [<profile>...]}               with [<key[=value]> ...] pairs. Add a 
                                          --force at the end to override an 
                                          existing profile (VERY DANGEROUS)
osd find <int[0-]>                       find osd <id> in the CRUSH map and 
                                          show its location
osd getcrushmap {<int[0-]>}              get CRUSH map
osd getmap {<int[0-]>}                   get OSD map
osd getmaxosd                            show largest OSD id
osd in <ids> [<ids>...]                  set osd(s) <id> [<id>...] in
osd lost <int[0-]> {--yes-i-really-mean- mark osd as permanently lost. THIS 
 it}                                      DESTROYS DATA IF NO MORE REPLICAS 
                                          EXIST, BE CAREFUL
osd ls {<int[0-]>}                       show all OSD ids
osd lspools {<int>}                      list pools
osd map <poolname> <objectname>          find pg for <object> in <pool>
osd metadata <int[0-]>                   fetch metadata for osd <id>
osd out <ids> [<ids>...]                 set osd(s) <id> [<id>...] out
osd pause                                pause osd
osd perf                                 print dump of OSD perf summary stats
osd pg-temp <pgid> {<id> [<id>...]}      set pg_temp mapping pgid:[<id> [<id>...
                                          ]] (developers only)
osd pool create <poolname> <int[0-]>     create pool
 {<int[0-]>} {replicated|erasure}        
 {<erasure_code_profile>} {<ruleset>}    
osd pool delete <poolname> {<poolname>}  delete pool
 {--yes-i-really-really-mean-it}         
osd pool get <poolname> size|min_size|   get pool parameter <var>
 crash_replay_interval|pg_num|pgp_num|   
 crush_ruleset|hit_set_type|hit_set_     
 period|hit_set_count|hit_set_fpp|auid   
osd pool mksnap <poolname> <snap>        make snapshot <snap> in <pool>
osd pool rename <poolname> <poolname>    rename <srcpool> to <destpool>
osd pool rmsnap <poolname> <snap>        remove snapshot <snap> from <pool>
osd pool set <poolname> size|min_size|   set pool parameter <var> to <val>
 crash_replay_interval|pg_num|pgp_num|   
 crush_ruleset|hashpspool|hit_set_type|  
 hit_set_period|hit_set_count|hit_set_   
 fpp|debug_fake_ec_pool|target_max_      
 bytes|target_max_objects|cache_target_  
 dirty_ratio|cache_target_full_ratio|    
 cache_min_flush_age|cache_min_evict_    
 age|auid <val> {--yes-i-really-mean-it} 
osd pool set-quota <poolname> max_       set object or byte limit on pool
 objects|max_bytes <val>                 
osd pool stats {<name>}                  obtain stats from all pools, or from 
                                          specified pool
osd primary-affinity <osdname (id|osd.   adjust osd primary-affinity from 0.0 <=
 id)> <float[0.0-1.0]>                     <weight> <= 1.0
osd primary-temp <pgid> <id>             set primary_temp mapping pgid:<id>|-1 (
                                          developers only)
osd repair <who>                         initiate repair on osd <who>
osd reweight <int[0-]> <float[0.0-1.0]>  reweight osd to 0.0 < <weight> < 1.0
osd reweight-by-utilization {<int[100-   reweight OSDs by utilization [overload-
 ]>}                                      percentage-for-consideration, default 
                                          120]
osd rm <ids> [<ids>...]                  remove osd(s) <id> [<id>...] in
osd scrub <who>                          initiate scrub on osd <who>
osd set pause|noup|nodown|noout|noin|    set <key>
 nobackfill|norecover|noscrub|nodeep-    
 scrub|notieragent                       
osd setcrushmap                          set crush map from input file
osd setmaxosd <int[0-]>                  set new maximum osd value
osd stat                                 print summary of OSD map
osd thrash <int[0-]>                     thrash OSDs for <num_epochs>
osd tier add <poolname> <poolname> {--   add the tier <tierpool> (the second 
 force-nonempty}                          one) to base pool <pool> (the first 
                                          one)
osd tier add-cache <poolname>            add a cache <tierpool> (the second one)
 <poolname> <int[0-]>                     of size <size> to existing pool 
                                          <pool> (the first one)
osd tier cache-mode <poolname> none|     specify the caching mode for cache 
 writeback|forward|readonly               tier <pool>
osd tier remove <poolname> <poolname>    remove the tier <tierpool> (the second 
                                          one) from base pool <pool> (the first 
                                          one)
osd tier remove-overlay <poolname>       remove the overlay pool for base pool 
                                          <pool>
osd tier set-overlay <poolname>          set the overlay pool for base pool 
 <poolname>                               <pool> to be <overlaypool>
osd tree {<int[0-]>}                     print OSD tree
osd unpause                              unpause osd
osd unset pause|noup|nodown|noout|noin|  unset <key>
 nobackfill|norecover|noscrub|nodeep-    
 scrub|notieragent                       


À̸§ Æнº¿öµå
ºñ¹Ð±Û (üũÇÏ¸é ±Û¾´À̸¸ ³»¿ëÀ» È®ÀÎÇÒ ¼ö ÀÖ½À´Ï´Ù.)
¿ÞÂÊÀÇ ±ÛÀÚ¸¦ ÀÔ·ÂÇϼ¼¿ä.
   

 



 
»çÀÌÆ®¸í : ¸ðÁö¸®³× | ´ëÇ¥ : ÀÌ°æÇö | °³ÀÎÄ¿¹Â´ÏƼ : ·©Å°´åÄÄ ¿î¿µÃ¼Á¦(OS) | °æ±âµµ ¼º³²½Ã ºÐ´ç±¸ | ÀüÀÚ¿ìÆí : mojily°ñ¹ðÀÌchonnom.com Copyright ¨Ï www.chonnom.com www.kyunghyun.net www.mojily.net. All rights reserved.