Browse Source

HADOOP-12358. Add -safely flag to rm to prompt when deleting many files. Contributed by Xiaoyu Yao.

Andrew Wang 10 năm trước cách đây
mục cha
commit
e1feaf6db0
18 tập tin đã thay đổi với 275 bổ sung35 xóa
  1. 3 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 6 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
  3. 48 13
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
  4. 11 0
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  5. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/TestCLI.java
  6. 4 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLICommand.java
  7. 4 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLITestCmd.java
  8. 3 3
      hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
  9. 5 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java
  10. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java
  11. 4 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java
  12. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java
  13. 92 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestDeleteCLI.java
  14. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
  15. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java
  16. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
  17. 83 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testDeleteConf.xml
  18. 2 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/cli/CLITestCmdMR.java

+ 3 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -765,6 +765,9 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12369. Point hadoop-project/pom.xml java.security.krb5.conf
     within target folder. (wang)
 
+    HADOOP-12358. Add -safely flag to rm to prompt when deleting many files.
+    (xyao via wang)
+
   OPTIMIZATIONS
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp

+ 6 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

@@ -381,5 +381,11 @@ public class CommonConfigurationKeysPublic {
       "hadoop.shell.missing.defaultFs.warning";
   public static final boolean HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_DEFAULT =
       false;
+
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES =
+      "hadoop.shell.safely.delete.limit.num.files";
+  public static final long HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES_DEFAULT =
+      100;
 }
 

+ 48 - 13
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java

@@ -25,6 +25,7 @@ import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.PathIOException;
 import org.apache.hadoop.fs.PathIsDirectoryException;
@@ -32,9 +33,13 @@ import org.apache.hadoop.fs.PathIsNotDirectoryException;
 import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.PathNotFoundException;
 import org.apache.hadoop.fs.Trash;
+import org.apache.hadoop.util.ToolRunner;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES_DEFAULT;
 
 /**
- * Classes that delete paths
+ * Classes that delete paths.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
@@ -50,28 +55,36 @@ class Delete {
   /** remove non-directory paths */
   public static class Rm extends FsCommand {
     public static final String NAME = "rm";
-    public static final String USAGE = "[-f] [-r|-R] [-skipTrash] <src> ...";
+    public static final String USAGE = "[-f] [-r|-R] [-skipTrash] " +
+        "[-safely] <src> ...";
     public static final String DESCRIPTION =
-      "Delete all files that match the specified file pattern. " +
-      "Equivalent to the Unix command \"rm <src>\"\n" +
-      "-skipTrash: option bypasses trash, if enabled, and immediately " +
-      "deletes <src>\n" +
-      "-f: If the file does not exist, do not display a diagnostic " +
-      "message or modify the exit status to reflect an error.\n" +
-      "-[rR]:  Recursively deletes directories";
+        "Delete all files that match the specified file pattern. " +
+            "Equivalent to the Unix command \"rm <src>\"\n" +
+            "-f: If the file does not exist, do not display a diagnostic " +
+            "message or modify the exit status to reflect an error.\n" +
+            "-[rR]:  Recursively deletes directories.\n" +
+            "-skipTrash: option bypasses trash, if enabled, and immediately " +
+            "deletes <src>.\n" +
+            "-safely: option requires safety confirmation,if enabled, " +
+            "requires confirmation before deleting large directory with more " +
+            "than <hadoop.shell.delete.limit.num.files> files. Delay is " +
+            "expected when walking over large directory recursively to count " +
+            "the number of files to be deleted before the confirmation.\n";
 
     private boolean skipTrash = false;
     private boolean deleteDirs = false;
     private boolean ignoreFNF = false;
-    
+    private boolean safeDelete = false;
+
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
       CommandFormat cf = new CommandFormat(
-          1, Integer.MAX_VALUE, "f", "r", "R", "skipTrash");
+          1, Integer.MAX_VALUE, "f", "r", "R", "skipTrash", "safely");
       cf.parse(args);
       ignoreFNF = cf.getOpt("f");
       deleteDirs = cf.getOpt("r") || cf.getOpt("R");
       skipTrash = cf.getOpt("skipTrash");
+      safeDelete = cf.getOpt("safely");
     }
 
     @Override
@@ -102,7 +115,7 @@ class Delete {
       // problem (ie. creating the trash dir, moving the item to be deleted,
       // etc), then the path will just be deleted because moveToTrash returns
       // false and it falls thru to fs.delete.  this doesn't seem right
-      if (moveToTrash(item)) {
+      if (moveToTrash(item) || !canBeSafelyDeleted(item)) {
         return;
       }
       if (!item.fs.delete(item.path, deleteDirs)) {
@@ -111,6 +124,28 @@ class Delete {
       out.println("Deleted " + item);
     }
 
+    private boolean canBeSafelyDeleted(PathData item)
+        throws IOException {
+      boolean shouldDelete = true;
+      if (safeDelete) {
+        final long deleteLimit = getConf().getLong(
+            HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES,
+            HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES_DEFAULT);
+        if (deleteLimit > 0) {
+          ContentSummary cs = item.fs.getContentSummary(item.path);
+          final long numFiles = cs.getFileCount();
+          if (numFiles > deleteLimit) {
+            if (!ToolRunner.confirmPrompt("Proceed deleting " + numFiles +
+                " files?")) {
+              System.err.println("Delete aborted at user request.\n");
+              shouldDelete = false;
+            }
+          }
+        }
+      }
+      return shouldDelete;
+    }
+
     private boolean moveToTrash(PathData item) throws IOException {
       boolean success = false;
       if (!skipTrash) {
@@ -122,7 +157,7 @@ class Delete {
           String msg = ioe.getMessage();
           if (ioe.getCause() != null) {
             msg += ": " + ioe.getCause().getMessage();
-	  }
+          }
           throw new IOException(msg + ". Consider using -skipTrash option", ioe);
         }
       }

+ 11 - 0
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -1962,4 +1962,15 @@ for ldap providers in the same way as above does.
     <name>hadoop.shell.missing.defaultFs.warning</name>
     <value>false</value>
   </property>
+
+  <property>
+    <name>hadoop.shell.safely.delete.limit.num.files</name>
+    <value>100</value>
+    <description>Used by -safely option of hadoop fs shell -rm command to avoid
+      accidental deletion of large directories. When enabled, the -rm command
+      requires confirmation if the number of files to be deleted is greater than
+      this limit.  The default limit is 100 files. The warning is disabled if
+      the limit is 0 or the -safely is not specified in -rm command.
+    </description>
+  </property>
 </configuration>

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/TestCLI.java

@@ -42,7 +42,7 @@ public class TestCLI extends CLITestHelper {
 
   @Override
   protected CommandExecutor.Result execute(CLICommand cmd) throws Exception {
-    return cmd.getExecutor("").executeCommand(cmd.getCmd());
+    return cmd.getExecutor("", conf).executeCommand(cmd.getCmd());
 
   }
   

+ 4 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLICommand.java

@@ -17,11 +17,14 @@
  */
 package org.apache.hadoop.cli.util;
 
+import org.apache.hadoop.conf.Configuration;
+
 /**
  * This interface is to generalize types of test command for upstream projects
  */
 public interface CLICommand {
-  public CommandExecutor getExecutor(String tag) throws IllegalArgumentException;
+  public CommandExecutor getExecutor(String tag, Configuration conf)
+      throws IllegalArgumentException;
   public CLICommandTypes getType();
   public String getCmd();
   @Override

+ 4 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLITestCmd.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.cli.util;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FsShell;
 
 /**
@@ -32,9 +33,10 @@ public class CLITestCmd implements CLICommand {
   }
 
   @Override
-  public CommandExecutor getExecutor(String tag) throws IllegalArgumentException {
+  public CommandExecutor getExecutor(String tag, Configuration conf)
+      throws IllegalArgumentException {
     if (getType() instanceof CLICommandFS)
-      return new FSCmdExecutor(tag, new FsShell());
+      return new FSCmdExecutor(tag, new FsShell(conf));
     throw new
         IllegalArgumentException("Unknown type of test command: " + getType());
   }

+ 3 - 3
hadoop-common-project/hadoop-common/src/test/resources/testConf.xml

@@ -391,7 +391,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rm \[-f\] \[-r\|-R\] \[-skipTrash\] &lt;src&gt; \.\.\. :\s*</expected-output>
+          <expected-output>^-rm \[-f\] \[-r\|-R\] \[-skipTrash\] \[-safely\] &lt;src&gt; \.\.\. :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
@@ -403,7 +403,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s*-skipTrash\s+option bypasses trash, if enabled, and immediately deletes &lt;src&gt;( )*</expected-output>
+          <expected-output>^\s*-skipTrash\s+option bypasses trash, if enabled, and immediately deletes &lt;src&gt;\.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
@@ -415,7 +415,7 @@
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s+-\[rR\]\s+Recursively deletes directories\s*</expected-output>
+          <expected-output>^\s+-\[rR\]\s+Recursively deletes directories\.\s*</expected-output>
         </comparator>
       </comparators>
     </test>

+ 5 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/CLITestCmdDFS.java

@@ -22,6 +22,7 @@ import org.apache.hadoop.cli.util.CLICommandTypes;
 import org.apache.hadoop.cli.util.CLITestCmd;
 import org.apache.hadoop.cli.util.CommandExecutor;
 import org.apache.hadoop.cli.util.FSCmdExecutor;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.tools.DFSAdmin;
 
 public class CLITestCmdDFS extends CLITestCmd {
@@ -30,9 +31,10 @@ public class CLITestCmdDFS extends CLITestCmd {
   }
 
   @Override
-  public CommandExecutor getExecutor(String tag) throws IllegalArgumentException {
+  public CommandExecutor getExecutor(String tag, Configuration conf)
+      throws IllegalArgumentException {
     if (getType() instanceof CLICommandDFSAdmin)
-      return new FSCmdExecutor(tag, new DFSAdmin());
-    return super.getExecutor(tag);
+      return new FSCmdExecutor(tag, new DFSAdmin(conf));
+    return super.getExecutor(tag, conf);
   }
 }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestAclCLI.java

@@ -73,7 +73,7 @@ public class TestAclCLI extends CLITestHelperDFS {
 
   @Override
   protected Result execute(CLICommand cmd) throws Exception {
-    return cmd.getExecutor(namenode).executeCommand(cmd.getCmd());
+    return cmd.getExecutor(namenode, conf).executeCommand(cmd.getCmd());
   }
 
   @Test

+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCacheAdminCLI.java

@@ -29,6 +29,7 @@ import org.apache.hadoop.cli.util.CLITestCmd;
 import org.apache.hadoop.cli.util.CacheAdminCmdExecutor;
 import org.apache.hadoop.cli.util.CommandExecutor;
 import org.apache.hadoop.cli.util.CommandExecutor.Result;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -119,18 +120,18 @@ public class TestCacheAdminCLI extends CLITestHelper {
     }
 
     @Override
-    public CommandExecutor getExecutor(String tag)
+    public CommandExecutor getExecutor(String tag, Configuration conf)
         throws IllegalArgumentException {
       if (getType() instanceof CLICommandCacheAdmin) {
         return new CacheAdminCmdExecutor(tag, new CacheAdmin(conf));
       }
-      return super.getExecutor(tag);
+      return super.getExecutor(tag, conf);
     }
   }
 
   @Override
   protected Result execute(CLICommand cmd) throws Exception {
-    return cmd.getExecutor("").executeCommand(cmd.getCmd());
+    return cmd.getExecutor("", conf).executeCommand(cmd.getCmd());
   }
 
   @Test

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestCryptoAdminCLI.java

@@ -149,18 +149,18 @@ public class TestCryptoAdminCLI extends CLITestHelperDFS {
     }
 
     @Override
-    public CommandExecutor getExecutor(String tag)
+    public CommandExecutor getExecutor(String tag, Configuration conf)
         throws IllegalArgumentException {
       if (getType() instanceof CLICommandCryptoAdmin) {
         return new CryptoAdminCmdExecutor(tag, new CryptoAdmin(conf));
       }
-      return super.getExecutor(tag);
+      return super.getExecutor(tag, conf);
     }
   }
 
   @Override
   protected Result execute(CLICommand cmd) throws Exception {
-    return cmd.getExecutor(namenode).executeCommand(cmd.getCmd());
+    return cmd.getExecutor(namenode, conf).executeCommand(cmd.getCmd());
   }
 
   @Test

+ 92 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestDeleteCLI.java

@@ -0,0 +1,92 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.cli;
+
+import static org.junit.Assert.assertTrue;
+
+import org.apache.hadoop.cli.util.CLICommand;
+import org.apache.hadoop.cli.util.CommandExecutor.Result;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestDeleteCLI extends CLITestHelperDFS {
+  protected MiniDFSCluster dfsCluster = null;
+  protected FileSystem fs = null;
+  protected String namenode = null;
+
+  @Before
+  @Override
+  public void setUp() throws Exception {
+    super.setUp();
+    conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
+    conf.setLong(CommonConfigurationKeysPublic.
+        HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES, 5);
+
+    dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    dfsCluster.waitClusterUp();
+    namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
+
+    fs = dfsCluster.getFileSystem();
+    assertTrue("Not an HDFS: " + fs.getUri(),
+        fs instanceof DistributedFileSystem);
+  }
+
+  @After
+  @Override
+  public void tearDown() throws Exception {
+    if (fs != null) {
+      fs.close();
+    }
+    if (dfsCluster != null) {
+      dfsCluster.shutdown();
+    }
+    Thread.sleep(2000);
+    super.tearDown();
+  }
+
+  @Override
+  protected String getTestFile() {
+    return "testDeleteConf.xml";
+  }
+
+  @Override
+  protected String expandCommand(final String cmd) {
+    String expCmd = cmd;
+    expCmd = expCmd.replaceAll("NAMENODE", namenode);
+    expCmd = super.expandCommand(expCmd);
+    return expCmd;
+  }
+
+  @Override
+  protected Result execute(CLICommand cmd) throws Exception {
+    return cmd.getExecutor(namenode, conf).executeCommand(cmd.getCmd());
+  }
+
+  @Test
+  @Override
+  public void testAll () {
+    super.testAll();
+  }
+}

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java

@@ -47,7 +47,7 @@ public class TestHDFSCLI extends CLITestHelperDFS {
     
     // Many of the tests expect a replication value of 1 in the output
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
-    
+
     // Build racks and hosts configuration to test dfsAdmin -printTopology
     String [] racks =  {"/rack1", "/rack1", "/rack2", "/rack2",
                         "/rack2", "/rack3", "/rack4", "/rack4" };
@@ -95,7 +95,7 @@ public class TestHDFSCLI extends CLITestHelperDFS {
   
   @Override
   protected Result execute(CLICommand cmd) throws Exception {
-    return cmd.getExecutor(namenode).executeCommand(cmd.getCmd());
+    return cmd.getExecutor(namenode, conf).executeCommand(cmd.getCmd());
   }
   
   @Test

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestXAttrCLI.java

@@ -87,7 +87,7 @@ public class TestXAttrCLI  extends CLITestHelperDFS {
   
   @Override
   protected Result execute(CLICommand cmd) throws Exception {
-    return cmd.getExecutor(namenode).executeCommand(cmd.getCmd());
+    return cmd.getExecutor(namenode, conf).executeCommand(cmd.getCmd());
   }
   
   @Test

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java

@@ -275,7 +275,8 @@ public class TestStorageRestore {
       String cmd = "-fs NAMENODE -restoreFailedStorage false";
       String namenode = config.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
       CommandExecutor executor =
-          new CLITestCmdDFS(cmd, new CLICommandDFSAdmin()).getExecutor(namenode);
+          new CLITestCmdDFS(cmd,
+              new CLICommandDFSAdmin()).getExecutor(namenode, config);
 
       executor.executeCommand(cmd);
       restore = fsi.getStorage().getRestoreFailedStorage();

+ 83 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testDeleteConf.xml

@@ -0,0 +1,83 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="testConf.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration>
+    <!-- Normal mode is test. To run just the commands and dump the output
+         to the log, set it to nocompare -->
+    <mode>test</mode>
+
+    <!--  Comparator types:
+             ExactComparator
+             SubstringComparator
+             RegexpComparator
+             TokenComparator
+             -->
+    <tests>
+        <test>  <!-- TESTED -->
+            <description>rm -r directory that meet warning criteria when -safely is not used</description>
+            <test-commands>
+                <command>-fs NAMENODE -mkdir /dir0</command>
+                <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes /dir0/data15bytes</command>
+                <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data30bytes /dir0/data30bytes</command>
+                <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data60bytes /dir0/data60bytes</command>
+                <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data120bytes /dir0/data120bytes</command>
+                <command>-fs NAMENODE -mkdir /dir0/dir00</command>
+                <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes /dir0/dir00/data15bytes</command>
+                <command>-fs NAMENODE -mkdir /dir0/dir01</command>
+                <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data30bytes /dir0/dir01/data30bytes</command>
+                <command>-fs NAMENODE -ls /dir0</command>
+                <command>-fs NAMENODE -rm -r /dir0</command>
+            </test-commands>
+            <cleanup-commands>
+                <command>-fs NAMENODE -rm -r /dir0</command>
+            </cleanup-commands>
+            <comparators>
+                <comparator>
+                    <type>RegexpComparator</type>
+                    <expected-output>Deleted /dir0</expected-output>
+                </comparator>
+            </comparators>
+        </test>
+        <test>  <!-- TESTED -->
+            <description>rm -r directory that does not meet warning criteria when -safely is used</description>
+            <test-commands>
+                <command>-fs NAMENODE -mkdir /dir0</command>
+                <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes /dir0/data15bytes</command>
+                <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data30bytes /dir0/data30bytes</command>
+                <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data60bytes /dir0/data60bytes</command>
+                <command>-fs NAMENODE -mkdir /dir0/dir00</command>
+                <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes /dir0/dir00/data15bytes</command>
+                <command>-fs NAMENODE -mkdir /dir0/dir01</command>
+                <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data30bytes /dir0/dir01/data30bytes</command>
+                <command>-fs NAMENODE -ls /dir0</command>
+                <command>-fs NAMENODE -rm -r -safely /dir0</command>
+            </test-commands>
+            <cleanup-commands>
+                <command>-fs NAMENODE -rm -r /dir0</command>
+            </cleanup-commands>
+            <comparators>
+                <comparator>
+                    <type>RegexpComparator</type>
+                    <expected-output>Deleted /dir0</expected-output>
+                </comparator>
+            </comparators>
+        </test>
+    </tests>
+</configuration>

+ 2 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/cli/CLITestCmdMR.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.cli;
 import org.apache.hadoop.cli.util.CLICommandTypes;
 import org.apache.hadoop.cli.util.CLITestCmd;
 import org.apache.hadoop.cli.util.CommandExecutor;
+import org.apache.hadoop.conf.Configuration;
 
 public class CLITestCmdMR extends CLITestCmd {
   public CLITestCmdMR(String str, CLICommandTypes type) {
@@ -34,7 +35,7 @@ public class CLITestCmdMR extends CLITestCmd {
    * of the test method.
    */
   @Override
-  public CommandExecutor getExecutor(String tag)
+  public CommandExecutor getExecutor(String tag, Configuration conf)
       throws IllegalArgumentException {
     throw new IllegalArgumentException("Method isn't supported");
   }