ソースを参照

HADOOP-9381. Document dfs cp -f option. Contributed by Keegan Witt and Suresh Srinivas.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514089 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas 11 年 前
コミット
0cb5f08149

+ 2 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -389,6 +389,8 @@ Release 2.1.1-beta - UNRELEASED
     HADOOP-9857. Tests block and sometimes timeout on Windows due to invalid
     HADOOP-9857. Tests block and sometimes timeout on Windows due to invalid
     entropy source. (cnauroth)
     entropy source. (cnauroth)
 
 
+    HADOOP-9381. Document dfs cp -f option. (Keegan Witt, suresh via suresh)
+
 Release 2.1.0-beta - 2013-08-06
 Release 2.1.0-beta - 2013-08-06
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 4 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java

@@ -133,7 +133,8 @@ class CopyCommands {
       "Copy files that match the file pattern <src> to a\n" +
       "Copy files that match the file pattern <src> to a\n" +
       "destination.  When copying multiple files, the destination\n" +
       "destination.  When copying multiple files, the destination\n" +
       "must be a directory. Passing -p preserves access and\n" +
       "must be a directory. Passing -p preserves access and\n" +
-      "modification times, ownership and the mode.\n";
+      "modification times, ownership and the mode. Passing -f\n" +
+      "overwrites the destination if it already exists.\n";
     
     
     @Override
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
     protected void processOptions(LinkedList<String> args) throws IOException {
@@ -186,7 +187,8 @@ class CopyCommands {
       "into fs. Copying fails if the file already\n" +
       "into fs. Copying fails if the file already\n" +
       "exists, unless the -f flag is given. Passing\n" +
       "exists, unless the -f flag is given. Passing\n" +
       "-p preserves access and modification times,\n" +
       "-p preserves access and modification times,\n" +
-      "ownership and the mode.\n";
+      "ownership and the mode. Passing -f overwrites\n" +
+      "the destination if it already exists.\n";
 
 
     @Override
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
     protected void processOptions(LinkedList<String> args) throws IOException {

+ 42 - 19
hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm

@@ -86,11 +86,14 @@ chgrp
 
 
    Usage: <<<hdfs dfs -chgrp [-R] GROUP URI [URI ...]>>>
    Usage: <<<hdfs dfs -chgrp [-R] GROUP URI [URI ...]>>>
 
 
-   Change group association of files. With -R, make the change recursively
-   through the directory structure. The user must be the owner of files, or
+   Change group association of files. The user must be the owner of files, or
    else a super-user. Additional information is in the
    else a super-user. Additional information is in the
    {{{betterurl}Permissions Guide}}.
    {{{betterurl}Permissions Guide}}.
 
 
+   Options
+
+     * The -R option will make the change recursively through the directory structure.
+
 chmod
 chmod
 
 
    Usage: <<<hdfs dfs -chmod [-R] <MODE[,MODE]... | OCTALMODE> URI [URI ...]>>>
    Usage: <<<hdfs dfs -chmod [-R] <MODE[,MODE]... | OCTALMODE> URI [URI ...]>>>
@@ -100,14 +103,21 @@ chmod
    else a super-user. Additional information is in the
    else a super-user. Additional information is in the
    {{{betterurl}Permissions Guide}}.
    {{{betterurl}Permissions Guide}}.
 
 
+   Options
+
+     * The -R option will make the change recursively through the directory structure.
+
 chown
 chown
 
 
    Usage: <<<hdfs dfs -chown [-R] [OWNER][:[GROUP]] URI [URI ]>>>
    Usage: <<<hdfs dfs -chown [-R] [OWNER][:[GROUP]] URI [URI ]>>>
 
 
-   Change the owner of files. With -R, make the change recursively through the
-   directory structure. The user must be a super-user. Additional information
+   Change the owner of files. The user must be a super-user. Additional information
    is in the {{{betterurl}Permissions Guide}}.
    is in the {{{betterurl}Permissions Guide}}.
 
 
+   Options
+
+     * The -R option will make the change recursively through the directory structure.
+
 copyFromLocal
 copyFromLocal
 
 
    Usage: <<<hdfs dfs -copyFromLocal <localsrc> URI>>>
    Usage: <<<hdfs dfs -copyFromLocal <localsrc> URI>>>
@@ -115,6 +125,10 @@ copyFromLocal
    Similar to put command, except that the source is restricted to a local
    Similar to put command, except that the source is restricted to a local
    file reference.
    file reference.
 
 
+   Options:
+
+     * The -f option will overwrite the destination if it already exists.
+
 copyToLocal
 copyToLocal
 
 
    Usage: <<<hdfs dfs -copyToLocal [-ignorecrc] [-crc] URI <localdst> >>>
    Usage: <<<hdfs dfs -copyToLocal [-ignorecrc] [-crc] URI <localdst> >>>
@@ -145,11 +159,15 @@ count
 
 
 cp
 cp
 
 
-   Usage: <<<hdfs dfs -cp URI [URI ...] <dest> >>>
+   Usage: <<<hdfs dfs -cp [-f] URI [URI ...] <dest> >>>
 
 
    Copy files from source to destination. This command allows multiple sources
    Copy files from source to destination. This command allows multiple sources
    as well in which case the destination must be a directory.
    as well in which case the destination must be a directory.
 
 
+    Options:
+
+      * The -f option will overwrite the destination if it already exists.
+
    Example:
    Example:
 
 
      * <<<hdfs dfs -cp /user/hadoop/file1 /user/hadoop/file2>>>
      * <<<hdfs dfs -cp /user/hadoop/file1 /user/hadoop/file2>>>
@@ -232,7 +250,7 @@ ls
 permissions number_of_replicas userid groupid filesize modification_date modification_time filename
 permissions number_of_replicas userid groupid filesize modification_date modification_time filename
 +---+
 +---+
 
 
-   For a directory it returns list of its direct children as in unix.A directory is listed as:
+   For a directory it returns list of its direct children as in Unix. A directory is listed as:
 
 
 +---+
 +---+
 permissions userid groupid modification_date modification_time dirname
 permissions userid groupid modification_date modification_time dirname
@@ -256,8 +274,11 @@ mkdir
 
 
    Usage: <<<hdfs dfs -mkdir [-p] <paths> >>>
    Usage: <<<hdfs dfs -mkdir [-p] <paths> >>>
 
 
-   Takes path uri's as argument and creates directories.  With -p the behavior
-   is much like unix mkdir -p creating parent directories along the path.
+   Takes path uri's as argument and creates directories.
+
+   Options:
+
+     * The -p option behavior is much like Unix mkdir -p, creating parent directories along the path.
 
 
    Example:
    Example:
 
 
@@ -362,8 +383,11 @@ setrep
 
 
    Usage: <<<hdfs dfs -setrep [-R] <path> >>>
    Usage: <<<hdfs dfs -setrep [-R] <path> >>>
 
 
-   Changes the replication factor of a file. -R option is for recursively
-   increasing the replication factor of files within a directory.
+   Changes the replication factor of a file.
+
+   Options:
+
+     * The -R option will recursively increase the replication factor of files within a directory.
 
 
    Example:
    Example:
 
 
@@ -390,8 +414,11 @@ tail
 
 
    Usage: <<<hdfs dfs -tail [-f] URI>>>
    Usage: <<<hdfs dfs -tail [-f] URI>>>
 
 
-   Displays last kilobyte of the file to stdout. -f option can be used as in
-   Unix.
+   Displays last kilobyte of the file to stdout.
+
+   Options:
+
+     * The -f option will output appended data as the file grows, as in Unix.
 
 
    Example:
    Example:
 
 
@@ -406,13 +433,9 @@ test
 
 
    Options:
    Options:
 
 
-*----+------------+
-| -e | check to see if the file exists. Return 0 if true.
-*----+------------+
-| -z | check to see if the file is zero length. Return 0 if true.
-*----+------------+
-| -d | check to see if the path is directory. Return 0 if true.
-*----+------------+
+     * The -e option will check to see if the file exists, returning 0 if true.
+     * The -z option will check to see if the file is zero length, returning 0 if true.
+     * The -d option will check to see if the path is directory, returning 0 if true.
 
 
    Example:
    Example:
 
 

+ 10 - 2
hadoop-common-project/hadoop-common/src/test/resources/testConf.xml

@@ -296,7 +296,11 @@
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*modification times, ownership and the mode.( )*</expected-output>
+          <expected-output>^( |\t)*modification times, ownership and the mode. Passing -f( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*overwrites the destination if it already exists.( )*</expected-output>
         </comparator>
         </comparator>
       </comparators>
       </comparators>
     </test>
     </test>
@@ -400,7 +404,11 @@
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*ownership and the mode.( )*</expected-output>
+          <expected-output>^( |\t)*ownership and the mode. Passing -f overwrites( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*the destination if it already exists.( )*</expected-output>
         </comparator>
         </comparator>
       </comparators>
       </comparators>
     </test>
     </test>