瀏覽代碼

HADOOP-4687 Moving directories around

git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/branches/HADOOP-4687/hdfs@776176 13f79535-47bb-0310-9956-ffa450edef68
Owen O'Malley 16 年之前
父節點
當前提交
415ccedbd2
共有 100 個文件被更改,包括 39934 次插入0 次删除
  1. 125 0
      src/test/org/apache/hadoop/cli/TestHDFSCLI.java
  2. 8 0
      src/test/org/apache/hadoop/cli/clitest_data/data120bytes
  3. 1 0
      src/test/org/apache/hadoop/cli/clitest_data/data15bytes
  4. 2 0
      src/test/org/apache/hadoop/cli/clitest_data/data30bytes
  5. 4 0
      src/test/org/apache/hadoop/cli/clitest_data/data60bytes
  6. 16837 0
      src/test/org/apache/hadoop/cli/testHDFSConf.xml
  7. 431 0
      src/test/org/apache/hadoop/fs/TestGlobPaths.java
  8. 155 0
      src/test/org/apache/hadoop/fs/TestUrlStreamHandler.java
  9. 155 0
      src/test/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java
  10. 252 0
      src/test/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java
  11. 311 0
      src/test/org/apache/hadoop/fs/permission/TestStickyBit.java
  12. 119 0
      src/test/org/apache/hadoop/hdfs/AppendTestUtil.java
  13. 234 0
      src/test/org/apache/hadoop/hdfs/BenchmarkThroughput.java
  14. 284 0
      src/test/org/apache/hadoop/hdfs/DFSTestUtil.java
  15. 240 0
      src/test/org/apache/hadoop/hdfs/DataNodeCluster.java
  16. 848 0
      src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java
  17. 70 0
      src/test/org/apache/hadoop/hdfs/TestAbandonBlock.java
  18. 64 0
      src/test/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
  19. 225 0
      src/test/org/apache/hadoop/hdfs/TestCrcCorruption.java
  20. 100 0
      src/test/org/apache/hadoop/hdfs/TestDFSClientRetries.java
  21. 127 0
      src/test/org/apache/hadoop/hdfs/TestDFSFinalize.java
  22. 76 0
      src/test/org/apache/hadoop/hdfs/TestDFSMkdirs.java
  23. 992 0
      src/test/org/apache/hadoop/hdfs/TestDFSPermission.java
  24. 98 0
      src/test/org/apache/hadoop/hdfs/TestDFSRename.java
  25. 244 0
      src/test/org/apache/hadoop/hdfs/TestDFSRollback.java
  26. 1267 0
      src/test/org/apache/hadoop/hdfs/TestDFSShell.java
  27. 122 0
      src/test/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java
  28. 213 0
      src/test/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
  29. 249 0
      src/test/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
  30. 252 0
      src/test/org/apache/hadoop/hdfs/TestDFSUpgrade.java
  31. 203 0
      src/test/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
  32. 350 0
      src/test/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
  33. 438 0
      src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
  34. 417 0
      src/test/org/apache/hadoop/hdfs/TestDatanodeDeath.java
  35. 87 0
      src/test/org/apache/hadoop/hdfs/TestDatanodeReport.java
  36. 297 0
      src/test/org/apache/hadoop/hdfs/TestDecommission.java
  37. 60 0
      src/test/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java
  38. 232 0
      src/test/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
  39. 348 0
      src/test/org/apache/hadoop/hdfs/TestFSInputChecker.java
  40. 131 0
      src/test/org/apache/hadoop/hdfs/TestFSOutputSummer.java
  41. 312 0
      src/test/org/apache/hadoop/hdfs/TestFileAppend.java
  42. 427 0
      src/test/org/apache/hadoop/hdfs/TestFileAppend2.java
  43. 270 0
      src/test/org/apache/hadoop/hdfs/TestFileAppend3.java
  44. 171 0
      src/test/org/apache/hadoop/hdfs/TestFileCorruption.java
  45. 750 0
      src/test/org/apache/hadoop/hdfs/TestFileCreation.java
  46. 145 0
      src/test/org/apache/hadoop/hdfs/TestFileCreationClient.java
  47. 99 0
      src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java
  48. 80 0
      src/test/org/apache/hadoop/hdfs/TestFileCreationEmpty.java
  49. 24 0
      src/test/org/apache/hadoop/hdfs/TestFileCreationNamenodeRestart.java
  50. 143 0
      src/test/org/apache/hadoop/hdfs/TestFileStatus.java
  51. 176 0
      src/test/org/apache/hadoop/hdfs/TestGetBlocks.java
  52. 50 0
      src/test/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
  53. 243 0
      src/test/org/apache/hadoop/hdfs/TestHDFSServerPorts.java
  54. 64 0
      src/test/org/apache/hadoop/hdfs/TestHDFSTrash.java
  55. 199 0
      src/test/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
  56. 66 0
      src/test/org/apache/hadoop/hdfs/TestLease.java
  57. 140 0
      src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java
  58. 151 0
      src/test/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
  59. 95 0
      src/test/org/apache/hadoop/hdfs/TestLocalDFS.java
  60. 117 0
      src/test/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
  61. 187 0
      src/test/org/apache/hadoop/hdfs/TestModTime.java
  62. 220 0
      src/test/org/apache/hadoop/hdfs/TestPread.java
  63. 620 0
      src/test/org/apache/hadoop/hdfs/TestQuota.java
  64. 310 0
      src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
  65. 453 0
      src/test/org/apache/hadoop/hdfs/TestReplication.java
  66. 81 0
      src/test/org/apache/hadoop/hdfs/TestRestartDFS.java
  67. 111 0
      src/test/org/apache/hadoop/hdfs/TestSafeMode.java
  68. 156 0
      src/test/org/apache/hadoop/hdfs/TestSeekBug.java
  69. 189 0
      src/test/org/apache/hadoop/hdfs/TestSetTimes.java
  70. 28 0
      src/test/org/apache/hadoop/hdfs/TestSetrepDecreasing.java
  71. 77 0
      src/test/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
  72. 115 0
      src/test/org/apache/hadoop/hdfs/TestSmallBlock.java
  73. 390 0
      src/test/org/apache/hadoop/hdfs/UpgradeUtilities.java
  74. 二進制
      src/test/org/apache/hadoop/hdfs/hadoop-14-dfs-dir.tgz
  75. 67 0
      src/test/org/apache/hadoop/hdfs/hadoop-dfs-dir.txt
  76. 299 0
      src/test/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
  77. 239 0
      src/test/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java
  78. 658 0
      src/test/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
  79. 254 0
      src/test/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
  80. 50 0
      src/test/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
  81. 361 0
      src/test/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java
  82. 153 0
      src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
  83. 114 0
      src/test/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
  84. 294 0
      src/test/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java
  85. 210 0
      src/test/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java
  86. 93 0
      src/test/org/apache/hadoop/hdfs/server/namenode/FileNameGenerator.java
  87. 1185 0
      src/test/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
  88. 250 0
      src/test/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
  89. 714 0
      src/test/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
  90. 57 0
      src/test/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java
  91. 51 0
      src/test/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java
  92. 152 0
      src/test/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
  93. 171 0
      src/test/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java
  94. 376 0
      src/test/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
  95. 88 0
      src/test/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java
  96. 97 0
      src/test/org/apache/hadoop/hdfs/server/namenode/TestHost2NodesMap.java
  97. 40 0
      src/test/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java
  98. 391 0
      src/test/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
  99. 136 0
      src/test/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
  100. 107 0
      src/test/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java

+ 125 - 0
src/test/org/apache/hadoop/cli/TestHDFSCLI.java

@@ -0,0 +1,125 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.cli;
+
+import org.apache.hadoop.cli.util.CommandExecutor;
+import org.apache.hadoop.cli.util.CLITestData.TestCmd;
+import org.apache.hadoop.cli.util.CommandExecutor.Result;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HDFSPolicyProvider;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.apache.hadoop.security.authorize.PolicyProvider;
+import org.apache.hadoop.util.ToolRunner;
+
+public class TestHDFSCLI extends TestCLI{
+
+  protected MiniDFSCluster dfsCluster = null;
+  protected DistributedFileSystem dfs = null;
+  protected String namenode = null;
+  protected DFSAdminCmdExecutor dfsAdmCmdExecutor = null;
+  protected FSCmdExecutor fsCmdExecutor = null;
+  
+  public void setUp() throws Exception {
+    super.setUp();
+    conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
+        HDFSPolicyProvider.class, PolicyProvider.class);
+    
+    // Many of the tests expect a replication value of 1 in the output
+    conf.setInt("dfs.replication", 1);
+    
+    // Build racks and hosts configuration to test dfsAdmin -printTopology
+    String [] racks =  {"/rack1", "/rack1", "/rack2", "/rack2",
+                        "/rack2", "/rack3", "/rack4", "/rack4" };
+    String [] hosts = {"host1", "host2", "host3", "host4",
+                       "host5", "host6", "host7", "host8" };
+    dfsCluster = new MiniDFSCluster(conf, 8, true, racks, hosts);
+    
+    namenode = conf.get("fs.default.name", "file:///");
+    
+    username = System.getProperty("user.name");
+    dfsAdmCmdExecutor = new DFSAdminCmdExecutor(namenode);
+    fsCmdExecutor =  new FSCmdExecutor(namenode);
+
+    FileSystem fs = dfsCluster.getFileSystem();
+    assertTrue("Not a HDFS: "+fs.getUri(),
+               fs instanceof DistributedFileSystem);
+    dfs = (DistributedFileSystem) fs;
+  }
+
+  protected String getTestFile() {
+    return "testHDFSConf.xml";
+  }
+  
+  public void tearDown() throws Exception {
+    dfs.close();
+    dfsCluster.shutdown();
+    Thread.sleep(2000);
+    super.tearDown();
+  }
+
+  protected String expandCommand(final String cmd) {
+    String expCmd = cmd;
+    expCmd = expCmd.replaceAll("NAMENODE", namenode);
+    expCmd = super.expandCommand(cmd);
+    return expCmd;
+  }
+  
+  protected Result execute(TestCmd cmd) throws Exception {
+    CommandExecutor executor = null;
+    switch(cmd.getType()) {
+    case DFSADMIN:
+      executor = dfsAdmCmdExecutor;
+      break;
+    case FS:
+      executor = fsCmdExecutor;
+      break;
+    default:
+      throw new Exception("Unknow type of Test command:"+ cmd.getType());
+    }
+    return executor.executeCommand(cmd.getCmd());
+  }
+  
+  public static class DFSAdminCmdExecutor extends CommandExecutor {
+    private String namenode = null;
+    public DFSAdminCmdExecutor(String namenode) {
+      this.namenode = namenode;
+    }
+    
+    protected void execute(final String cmd) throws Exception{
+      DFSAdmin shell = new DFSAdmin();
+      String[] args = getCommandAsArgs(cmd, "NAMENODE", this.namenode);
+      ToolRunner.run(shell, args);
+    }
+  }
+  
+  public static class FSCmdExecutor extends CommandExecutor {
+    private String namenode = null;
+    public FSCmdExecutor(String namenode) {
+      this.namenode = namenode;
+    }
+    protected void execute(final String cmd) throws Exception{
+      FsShell shell = new FsShell();
+      String[] args = getCommandAsArgs(cmd, "NAMENODE", this.namenode);
+      ToolRunner.run(shell, args);
+    }
+  }
+}

+ 8 - 0
src/test/org/apache/hadoop/cli/clitest_data/data120bytes

@@ -0,0 +1,8 @@
+12345678901234
+12345678901234
+12345678901234
+12345678901234
+12345678901234
+12345678901234
+12345678901234
+12345678901234

+ 1 - 0
src/test/org/apache/hadoop/cli/clitest_data/data15bytes

@@ -0,0 +1 @@
+12345678901234

+ 2 - 0
src/test/org/apache/hadoop/cli/clitest_data/data30bytes

@@ -0,0 +1,2 @@
+12345678901234
+12345678901234

+ 4 - 0
src/test/org/apache/hadoop/cli/clitest_data/data60bytes

@@ -0,0 +1,4 @@
+12345678901234
+12345678901234
+12345678901234
+12345678901234

+ 16837 - 0
src/test/org/apache/hadoop/cli/testHDFSConf.xml

@@ -0,0 +1,16837 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="testConf.xsl"?>
+
+<configuration>
+  <!-- Normal mode is test. To run just the commands and dump the output
+       to the log, set it to nocompare -->
+  <mode>test</mode>
+  
+  <!--  Comparator types:
+           ExactComparator
+           SubstringComparator
+           RegexpComparator
+           TokenComparator
+           -->
+  <tests>
+    <!-- Tests for ls -->
+    <test> <!-- TESTED -->
+      <description>ls: file using absolute path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -ls /file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>ls: file using relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -ls file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>ls: files using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -touchz file2</command>
+        <command>-fs NAMENODE -touchz file3</command>
+        <command>-fs NAMENODE -touchz file4</command>
+        <command>-fs NAMENODE -ls file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>ls: directory using absolute path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir1</command>
+        <command>-fs NAMENODE -ls /</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>ls: directory using relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir1</command>
+        <command>-fs NAMENODE -ls </command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>ls: directory using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir1</command>
+        <command>-fs NAMENODE -mkdir dir2</command>
+        <command>-fs NAMENODE -mkdir dir3</command>
+        <command>-fs NAMENODE -mkdir dir4</command>
+        <command>-fs NAMENODE -ls </command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>ls: file/directory that does not exist in /</description>
+      <test-commands>
+        <command>-fs NAMENODE -ls /file1</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^ls: Cannot access /file1: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>ls: file/directory that does not exist in home directory (/user/username)</description>
+      <test-commands>
+        <command>-fs NAMENODE -ls /user</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^ls: Cannot access /user: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>ls: Non-URI input file at Namenode's path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -ls NAMENODE/file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>ls: file at hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -ls hdfs:///file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>ls: Non-URI input file at Namenode's path using globing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file2</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file3</command>
+        <command>-fs NAMENODE -ls NAMENODE/file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>ls: file at hdfs:// path using globing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///file2</command>
+        <command>-fs NAMENODE -touchz hdfs:///file3</command>
+        <command>-fs NAMENODE -ls hdfs:///file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>ls: Non-URI input dir at Namenode's path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/user/dir1</command>
+        <command>-fs NAMENODE -ls hdfs:///user/</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///user/dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Found [0-9] items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/dir1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>ls: dir at hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///user/dir1</command>
+        <command>-fs NAMENODE -ls hdfs:///user/</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///user/dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Found [0-9] items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/dir1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>ls: Non-URI input dir at Namenode's path using globing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/user/dir1</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/user/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/user/dir3</command>
+        <command>-fs NAMENODE -ls hdfs:///user/</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///user/dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Found [0-9] items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/dir3</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>ls: dir at hdfs:// path using globing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///user/dir1</command>
+        <command>-fs NAMENODE -mkdir hdfs:///user/dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///user/dir3</command>
+        <command>-fs NAMENODE -ls hdfs:///user/</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///user/dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Found [0-9] items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/dir3</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>ls:  non-existent file/directory at hdfs:// path </description>
+      <test-commands>
+        <command>-fs NAMENODE -ls hdfs:///file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <!-- no cleanup -->
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^ls: Cannot access hdfs:///file1: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>ls:  non-existent file/directory in Namenode's path </description>
+      <test-commands>
+        <command>-fs NAMENODE -ls NAMENODE/file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <!-- no cleanup -->
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^ls: Cannot access hdfs://localhost[.a-z]*:[0-9]+/file1: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <!-- Tests for lsr -->
+    <test> <!-- TESTED -->
+      <description>lsr: files/directories using absolute path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1/dir1</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1/dir2</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir2</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir2/dir1</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir2/dir2</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file2</command>
+        <command>-fs NAMENODE -touchz /dir0/dir2/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir2/file2</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/dir1/file2</command>
+        <command>-fs NAMENODE -touchz /dir0/dir2/dir2/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir2/dir2/file2</command>
+        <command>-fs NAMENODE -lsr /dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2/dir2/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir2/dir2/file2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>lsr: files/directories using relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1/dir1</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1/dir2</command>
+        <command>-fs NAMENODE -mkdir dir0/dir2</command>
+        <command>-fs NAMENODE -mkdir dir0/dir2/dir1</command>
+        <command>-fs NAMENODE -mkdir dir0/dir2/dir2</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -touchz dir0/dir2/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir2/file2</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/dir1/file2</command>
+        <command>-fs NAMENODE -touchz dir0/dir2/dir2/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir2/dir2/file2</command>
+        <command>-fs NAMENODE -lsr dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2/dir2/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2/dir2/file2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>lsr: files/directories using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1/dir1</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1/dir2</command>
+        <command>-fs NAMENODE -mkdir dir0/dir2</command>
+        <command>-fs NAMENODE -mkdir dir0/dir2/dir1</command>
+        <command>-fs NAMENODE -mkdir dir0/dir2/dir2</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -touchz dir0/dir2/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir2/file2</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/dir1/file2</command>
+        <command>-fs NAMENODE -touchz dir0/dir2/dir2/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir2/dir2/file2</command>
+        <command>-fs NAMENODE -lsr dir0/*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <!-- JIRA?
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^/user/[a-z]*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^/user/[a-z]*/dir0/dir2</expected-output>
+        </comparator>
+       -->
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2/dir2/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir2/dir2/file2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>lsr: file/directory that does not exist in /</description>
+      <test-commands>
+        <command>-fs NAMENODE -lsr /file1</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^lsr: Cannot access /file1: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>lsr: file/directory that does not exist in home directory (/user/username)</description>
+      <test-commands>
+        <command>-fs NAMENODE -lsr /user</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^lsr: Cannot access /user: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>lsr: dir at hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -lsr hdfs:///</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///dir0</command>
+        <command>-fs NAMENODE -rm hdfs:///file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>lsr: files/directories in hdfs:// path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir0/*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+     
+    <test> <!-- TESTED -->
+      <description>lsr: Non-existent file/directory in hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -lsr hdfs:///file1</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^lsr: Cannot access hdfs:///file1: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>lsr: dir at Namenode's path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -lsr NAMENODE/</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///dir0</command>
+        <command>-fs NAMENODE -rm hdfs:///file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>lsr: Non-URI input files/directories in Namenode's path </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir0/*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>lsr: Non-URI input for non-existent file/directory in Namenode's path </description>
+      <test-commands>
+        <command>-fs NAMENODE -lsr NAMENODE/file1</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^lsr: Cannot access hdfs://localhost[.a-z]*:[0-9]+/file1: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>lsr: Test for /*/* globbing </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -lsr /\*/\*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <!-- Tests for du -->
+    <test> <!-- TESTED -->
+      <description>du: file using absolute path</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /data15bytes</command>
+        <command>-fs NAMENODE -du /data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /data15bytes</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/data15bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>du: file using relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes data15bytes</command>
+        <command>-fs NAMENODE -du data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/data15bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>du: files using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes data120bytes</command>
+        <command>-fs NAMENODE -du data*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 4 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/data15bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/data30bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^60( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/data60bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^120( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/data120bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>du: directory using absolute path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /dir0/data15bytes</command>
+        <command>-fs NAMENODE -du /dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data15bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>du: directory using relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes dir0/data15bytes</command>
+        <command>-fs NAMENODE -du dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0/data15bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>du: directory using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /dir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes /dir0/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes /dir0/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes /dir0/data120bytes</command>
+        <command>-fs NAMENODE -du /dir0/*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 4 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data15bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data30bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^60( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data60bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^120( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data120bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+   
+    <test> <!-- TESTED -->
+      <description>du: Test for hdfs:// path - file</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes hdfs:///data15bytes</command>
+        <command>-fs NAMENODE -du hdfs:///data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///data15bytes</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/data15bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>du: Test for hdfs:// path - files using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes hdfs:///data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes hdfs:///data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes hdfs:///data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes hdfs:///data120bytes</command>
+        <command>-fs NAMENODE -du hdfs:///data*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 4 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/data15bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/data30bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^60( |\t)*hdfs://localhost[.a-z]*:[0-9]*/data60bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^120( |\t)*hdfs://localhost[.a-z]*:[0-9]*/data120bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>du: Test for hdfs:// path - directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes hdfs:///dir0/data15bytes</command>
+        <command>-fs NAMENODE -du hdfs:///dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data15bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>du: Test for hdfs:// path - directory using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes hdfs:///dir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes hdfs:///dir0/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes hdfs:///dir0/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes hdfs:///dir0/data120bytes</command>
+        <command>-fs NAMENODE -du hdfs:///dir0/*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 4 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data15bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data30bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^60( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data60bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^120( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data120bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>du: Test for Namenode's path - file</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes NAMENODE/data15bytes</command>
+        <command>-fs NAMENODE -du NAMENODE/data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/data15bytes</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/data15bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>du: Test for Namenode's path - files using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes NAMENODE/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes NAMENODE/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes NAMENODE/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes NAMENODE/data120bytes</command>
+        <command>-fs NAMENODE -du NAMENODE/data*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 4 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/data15bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/data30bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^60( |\t)*hdfs://localhost[.a-z]*:[0-9]*/data60bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^120( |\t)*hdfs://localhost[.a-z]*:[0-9]*/data120bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>du: Test for Namenode's path - directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes NAMENODE/dir0/data15bytes</command>
+        <command>-fs NAMENODE -du NAMENODE/dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data15bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>du: Test for Namenode's path - directory using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes NAMENODE/dir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes NAMENODE/dir0/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes NAMENODE/dir0/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes NAMENODE/dir0/data120bytes</command>
+        <command>-fs NAMENODE -du NAMENODE/dir0/*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 4 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data15bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data30bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^60( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data60bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^120( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data120bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <!-- Tests for dus -->
+    <test> <!-- TESTED -->
+      <description>dus: directories/files using absolute path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1/dir1</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1/dir2</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir2</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir2/dir1</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir2/dir2</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /dir0/dir1/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes /dir0/dir1/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /dir0/dir2/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes /dir0/dir2/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes /dir0/dir1/dir1/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes /dir0/dir1/dir2/data120bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes /dir0/dir2/dir1/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes /dir0/dir2/dir2/data120bytes</command>
+        <command>-fs NAMENODE -dus /dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir0( |\t)*450</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>dus: directories/files using relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1/dir1</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1/dir2</command>
+        <command>-fs NAMENODE -mkdir dir0/dir2</command>
+        <command>-fs NAMENODE -mkdir dir0/dir2/dir1</command>
+        <command>-fs NAMENODE -mkdir dir0/dir2/dir2</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes dir0/dir1/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes dir0/dir1/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes dir0/dir2/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes dir0/dir2/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes dir0/dir1/dir1/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes dir0/dir1/dir2/data120bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes dir0/dir2/dir1/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes dir0/dir2/dir2/data120bytes</command>
+        <command>-fs NAMENODE -dus dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0( |\t)*450</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>dus: directories/files using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1/dir1</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1/dir2</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir2</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir2/dir1</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir2/dir2</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /dir0/dir1/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes /dir0/dir1/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /dir0/dir2/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes /dir0/dir2/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes /dir0/dir1/dir1/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes /dir0/dir1/dir2/data120bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes /dir0/dir2/dir1/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes /dir0/dir2/dir2/data120bytes</command>
+        <command>-fs NAMENODE -mkdir /donotcountdir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /donotcountdir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /donotcountdir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /donotcountdir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /donotcountdir0/data15bytes</command>
+        <command>-fs NAMENODE -dus /dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+        <command>-fs NAMENODE -rmr /donotcountdir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir0( |\t)*450</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <!-- Tests for dus -->
+    <test> <!-- TESTED -->
+      <description>dus: Test for hdfs:// path - directories/files</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1/dir1</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1/dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir2/dir1</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir2/dir2</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes hdfs:///dir0/dir1/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes hdfs:///dir0/dir1/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes hdfs:///dir0/dir2/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes hdfs:///dir0/dir2/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes hdfs:///dir0/dir1/dir1/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes hdfs:///dir0/dir1/dir2/data120bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes hdfs:///dir0/dir2/dir1/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes hdfs:///dir0/dir2/dir2/data120bytes</command>
+        <command>-fs NAMENODE -dus hdfs:///dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir0( |\t)*450</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>dus: Test for hdfs:// path - directories/files using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1/dir1</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1/dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir2/dir1</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir2/dir2</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes hdfs:///dir0/dir1/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes hdfs:///dir0/dir1/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes hdfs:///dir0/dir2/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes hdfs:///dir0/dir2/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes hdfs:///dir0/dir1/dir1/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes hdfs:///dir0/dir1/dir2/data120bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes hdfs:///dir0/dir2/dir1/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes hdfs:///dir0/dir2/dir2/data120bytes</command>
+        <command>-fs NAMENODE -mkdir /donotcountdir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes hdfs:///donotcountdir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes hdfs:///donotcountdir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes hdfs:///donotcountdir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes hdfs:///donotcountdir0/data15bytes</command>
+        <command>-fs NAMENODE -dus hdfs:///dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+        <command>-fs NAMENODE -rmr hdfs:///donotcountdir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir0( |\t)*450</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>dus: Test for Namenode's path - directories/files</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1/dir1</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir2/dir1</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir2/dir2</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes NAMENODE/dir0/dir1/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes NAMENODE/dir0/dir1/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes NAMENODE/dir0/dir2/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes NAMENODE/dir0/dir2/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes NAMENODE/dir0/dir1/dir1/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes NAMENODE/dir0/dir1/dir2/data120bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes NAMENODE/dir0/dir2/dir1/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes NAMENODE/dir0/dir2/dir2/data120bytes</command>
+        <command>-fs NAMENODE -dus NAMENODE/dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir0( |\t)*450</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>dus: Test for Namenode's path - directories/files using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1/dir1</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir2/dir1</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir2/dir2</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes NAMENODE/dir0/dir1/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes NAMENODE/dir0/dir1/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes NAMENODE/dir0/dir2/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes NAMENODE/dir0/dir2/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes NAMENODE/dir0/dir1/dir1/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes NAMENODE/dir0/dir1/dir2/data120bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes NAMENODE/dir0/dir2/dir1/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes NAMENODE/dir0/dir2/dir2/data120bytes</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/donotcountdir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes NAMENODE/donotcountdir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes NAMENODE/donotcountdir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes NAMENODE/donotcountdir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes NAMENODE/donotcountdir0/data15bytes</command>
+        <command>-fs NAMENODE -dus NAMENODE/dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0</command>
+        <command>-fs NAMENODE -rmr NAMENODE/donotcountdir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir0( |\t)*450</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <!-- Tests for mv -->
+    <test> <!-- TESTED -->
+      <description>mv: file (absolute path) to file (absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -mv /file1 /file2</command>
+        <command>-fs NAMENODE -ls /file*</command>        
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file2</command>
+      </cleanup-commands>:
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file[^1]</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mv: file (absolute path) to file (relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -mv /file1 file2</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^mv: Failed to rename hdfs://localhost[.a-z]*:[0-9]*/file1 to file2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mv: file (absolute path) to directory (absolute path); keep the same name at the destination</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mv /file1 /dir0</command>
+        <command>-fs NAMENODE -lsr /dir0</command>        
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mv: file (absolute path) to directory (absolute path); keep the same name at the destination [ TIED to previous test ]</description>
+      <test-commands>
+        <command>-fs NAMENODE -ls /file1</command>        
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>ls: Cannot access /file1: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mv: file (absolute path) to directory (absolute path); change the name at the destination</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mv /file1 /dir0/file2</command>
+        <command>-fs NAMENODE -ls /dir0</command>        
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mv: file (absolute path) to directory (absolute path); change the name at the destination [ TIED to previous test ]</description>
+      <test-commands>
+        <command>-fs NAMENODE -ls /file1</command>        
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>ls: Cannot access /file1: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mv: files (absolute path) to directory (absolute path) using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -touchz /file2</command>
+        <command>-fs NAMENODE -touchz /file3</command>
+        <command>-fs NAMENODE -touchz /file4</command>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mv /file* /dir0</command>
+        <command>-fs NAMENODE -lsr /*</command>        
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mv: files (absolute path) to directory (absolute path) using globbing [ TIED to previous test ]</description>
+      <test-commands>
+        <command>-fs NAMENODE -ls /file*</command>        
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>ls: Cannot access /file*: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mv: file (relative) to file (relative)</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -mv file1 file2</command>
+        <command>-fs NAMENODE -ls file*</command>        
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file[^1]</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mv: moving file to file(rename in for hdfs:// path) </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -mv hdfs:///file1 hdfs:///file2</command>
+        <command>-fs NAMENODE -ls hdfs:///file*</command>        
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file2</command>
+      </cleanup-commands>:
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file[^1]</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mv: moving file to directory (keep the same name at the destination) in hdfs:// path - </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mv hdfs:///file1 hdfs:///dir0</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir0</command>        
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mv: moving files to directory in hdfs:// path -  [ TIED to previous test ]</description>
+      <test-commands>
+        <command>-fs NAMENODE -ls hdfs:///file*</command>        
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>ls: Cannot access hdfs:///file*: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>mv: moving file that does not exist in hdfs:// path </description>
+      <test-commands>
+        <command>-fs NAMENODE -mv hdfs:///file1 hdfs:///file2</command>        
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>mv: hdfs:/file1: No such file or directory</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mv: moving file to directory (different name at the destination) in hdfs:// path </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mv hdfs:///file1 hdfs:///dir0/file2</command>
+        <command>-fs NAMENODE -ls hdfs:///dir0</command>        
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mv: moving file to directory (different name at the destination) in hdfs:// path [ TIED to previous test ]</description>
+      <test-commands>
+        <command>-fs NAMENODE -ls hdfs:///file1</command>        
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>ls: Cannot access hdfs:///file1: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mv: moving group of files to directory using globbing in hdfs:// path - </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///file2</command>
+        <command>-fs NAMENODE -touchz hdfs:///file3</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mv hdfs:///file* hdfs:///dir0</command>
+        <command>-fs NAMENODE -lsr hdfs:///*</command>        
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file3</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mv: moving files to directory using globbing in hdfs:// [ TIED to previous test ]</description>
+      <test-commands>
+        <command>-fs NAMENODE -ls hdfs:///file*</command>        
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>ls: Cannot access hdfs:///file*: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>mv: moving file to file(rename) in Namenode's path - </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -mv NAMENODE/file1 NAMENODE/file2</command>
+        <command>-fs NAMENODE -ls NAMENODE/file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file2</command>
+      </cleanup-commands>:
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file[^1]</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>mv: moving file to directory (keep the same name at the destination) in Namenode's path </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mv NAMENODE/file1 NAMENODE/dir0</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>mv: moving files to directory in Namenode's path -  [ TIED to previous test ]</description>
+      <test-commands>
+        <command>-fs NAMENODE -ls NAMENODE/file1</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^ls: Cannot access hdfs:\/\/localhost[.a-z]*:[0-9]+\/file1: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>mv: moving file that does not exist in Namenode's path </description>
+      <test-commands>
+        <command>-fs NAMENODE -mv NAMENODE/file1 NAMENODE/file2</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^mv: hdfs://localhost[.a-z]*:[0-9]+/file1: No such file or directory</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>mv: moving file to directory (different name at the destination) in Namenode's path </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mv NAMENODE/file1 NAMENODE/dir0/file2</command>
+        <command>-fs NAMENODE -ls NAMENODE/dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>mv: moving file to directory (different name at the destination) in Namenode's path [ TIED to previous test ]</description>
+      <test-commands>
+        <command>-fs NAMENODE -ls NAMENODE/file1</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^ls: Cannot access hdfs:\/\/localhost[.a-z]*:[0-9]+\/file1: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>mv: moving group of files to directory using globbing in Namenode's path </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file2</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file3</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mv NAMENODE/file* NAMENODE/dir0</command>
+        <command>-fs NAMENODE -lsr NAMENODE/*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file3</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>mv: moving files to directory using globbing in Namenode's path [ TIED to previous test ]</description>
+      <test-commands>
+        <command>-fs NAMENODE -ls NAMENODE/file*</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^ls: Cannot access hdfs:\/\/localhost[.a-z]*:[0-9]+\/file\*: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>mv: moving directory to directory in hdfs:// path </description>
+      <test-commands>
+	<command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+	<command>-fs NAMENODE -mkdir hdfs:///dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file1</command>
+        <command>-fs NAMENODE -mv hdfs:///dir0 hdfs:///dir1</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir1</command>        
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir1/dir0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir1/dir0/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+   
+    <test> <!-- TESTED -->
+      <description>mv: moving directory to directory in Namenode's path </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file1</command>
+        <command>-fs NAMENODE -mv NAMENODE/dir0 NAMENODE/dir1</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir1/dir0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir1/dir0/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <!-- Tests for cp-->
+    <test> <!-- TESTED -->
+      <description>cp: file (absolute path) to file (absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -cp /file1 /file2</command>
+        <command>-fs NAMENODE -ls /file*</command>        
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file*</command>
+      </cleanup-commands>:
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cp: file (absolute path) to file (relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -cp /file1 file2</command>
+        <command>-fs NAMENODE -ls /file1 file2</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /file1 file2</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cp: file (relative path) to file (absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -cp file1 /file2</command>
+        <command>-fs NAMENODE -ls file1 /file2</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr file1 /file2</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cp: file (relative path) to file (relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -cp file1 file2</command>
+        <command>-fs NAMENODE -ls file1 file2</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr file1 file2</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cp: file (absolute path) to directory (absolute path); keep the same name at the destination</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -cp /file1 /dir0</command>
+        <command>-fs NAMENODE -ls /file1 /dir0</command>        
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cp: file (absolute path) to directory (absolute path); change the name at the destination</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -cp /file1 /dir0/file2</command>
+        <command>-fs NAMENODE -ls /file1 /dir0</command>        
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cp: files to directory (absolute path) using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -touchz /file2</command>
+        <command>-fs NAMENODE -touchz /file3</command>
+        <command>-fs NAMENODE -touchz /file4</command>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -cp /file* /dir0</command>
+        <command>-fs NAMENODE -lsr /*</command>        
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cp: files to directory (absolute path) without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -touchz /file2</command>
+        <command>-fs NAMENODE -touchz /file3</command>
+        <command>-fs NAMENODE -touchz /file4</command>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -cp /file1 /file2 /file3 /file4 /dir0</command>
+        <command>-fs NAMENODE -lsr /*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cp: copying non existent file (absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -cp /file /file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>:
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^cp: File does not exist: /file</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cp: copying non existent file (relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -cp file1 file2</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>:
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^cp: File does not exist: file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cp: files to an existent file using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -touchz /file2</command>
+        <command>-fs NAMENODE -touchz /file3</command>
+        <command>-fs NAMENODE -touchz /file4</command>
+        <command>-fs NAMENODE -touchz /file5</command>
+        <command>-fs NAMENODE -cp /file* /file5</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^cp: When copying multiple files, destination should be a directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cp: files to an existent file without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -touchz /file2</command>
+        <command>-fs NAMENODE -touchz /file3</command>
+        <command>-fs NAMENODE -touchz /file4</command>
+        <command>-fs NAMENODE -touchz /file5</command>
+        <command>-fs NAMENODE -cp /file1 /file2 /file3 /file4 /file5</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^cp: When copying multiple files, destination /file5 should be a directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cp: files to a non existent directory using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -touchz /file2</command>
+        <command>-fs NAMENODE -touchz /file3</command>
+        <command>-fs NAMENODE -touchz /file4</command>
+        <command>-fs NAMENODE -cp /file* dir</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^cp: When copying multiple files, destination should be a directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cp: files to a non existent directory without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -touchz /file2</command>
+        <command>-fs NAMENODE -touchz /file3</command>
+        <command>-fs NAMENODE -touchz /file4</command>
+        <command>-fs NAMENODE -cp /file1 /file2 /file3 /file4 dir</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^cp: When copying multiple files, destination dir should be a directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+   
+    <test> <!-- TESTED -->
+      <description>cp: file to file copy in hdfs:// path </description>
+      <test-commands>
+       <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -cp hdfs:///file1 hdfs:///file2</command>
+        <command>-fs NAMENODE -ls hdfs:///file*</command>        
+      </test-commands>
+      <cleanup-commands>
+       <command>-fs NAMENODE -rm hdfs:///file*</command>
+      </cleanup-commands>:
+      <comparators>
+       <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+         <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cp: file to directory copy (same name at the destination) in hdfs:// path</description>
+     <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -cp hdfs:///file1 hdfs:///dir0</command>
+        <command>-fs NAMENODE -ls hdfs:///file1 hdfs:///dir0</command>        
+      </test-commands>
+     <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0/ hdfs://file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cp: file to directory (different name at the destination) in hdfs:// path </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -cp hdfs:///file1 hdfs:///dir0/file2</command>
+        <command>-fs NAMENODE -ls hdfs:///file1 hdfs:///dir0</command>        
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0/</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cp: files to directory using globbing in hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///file2</command>
+        <command>-fs NAMENODE -touchz hdfs:///file3</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -cp hdfs:///file* hdfs:///dir0</command>
+        <command>-fs NAMENODE -lsr hdfs:///*</command>        
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///file*</command>
+        <command>-fs NAMENODE -rmr hdfs:///dir0/</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file3</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cp: files to directory in hdfs:// path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///file2</command>
+        <command>-fs NAMENODE -touchz hdfs:///file3</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -cp hdfs:///file1 hdfs:///file2 hdfs:///file3 hdfs:///dir0</command>
+        <command>-fs NAMENODE -lsr hdfs:///*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///file*</command>
+        <command>-fs NAMENODE -rmr hdfs:///dir0/</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file3</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cp: copying non existent file in hdfs:// path </description>
+      <test-commands>
+        <command>-fs NAMENODE -cp hdfs:///file hdfs:///file1</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>:
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^cp: File does not exist: hdfs:/file</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>cp: copying files to an existent file in hdfs:// path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///file2</command>
+        <command>-fs NAMENODE -touchz hdfs:///file3</command>
+        <command>-fs NAMENODE -touchz hdfs:///file4</command>
+        <command>-fs NAMENODE -touchz hdfs:///file5</command>
+        <command>-fs NAMENODE -cp hdfs:///file* hdfs:///file5</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^cp: When copying multiple files, destination should be a directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cp: copying files to an existent file in hdfs:// path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///file2</command>
+        <command>-fs NAMENODE -touchz hdfs:///file3</command>
+        <command>-fs NAMENODE -touchz hdfs:///file4</command>
+        <command>-fs NAMENODE -touchz hdfs:///file5</command>
+        <command>-fs NAMENODE -cp hdfs:///file1 hdfs:///file2 hdfs:///file3 hdfs:///file4 hdfs:///file5</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^cp: When copying multiple files, destination hdfs:///file5 should be a directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cp: copying files to a non existent directory in hdfs:// path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///file2</command>
+        <command>-fs NAMENODE -touchz hdfs:///file3</command>
+        <command>-fs NAMENODE -touchz hdfs:///file4</command>
+        <command>-fs NAMENODE -cp hdfs:///file* hdfs:///dir</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^cp: When copying multiple files, destination should be a directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cp: copying files to a non existent directory in hdfs:// path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///file2</command>
+        <command>-fs NAMENODE -touchz hdfs:///file3</command>
+        <command>-fs NAMENODE -touchz hdfs:///file4</command>
+        <command>-fs NAMENODE -cp hdfs:///file1 hdfs:///file2 hdfs:///file3 hdfs:///file4 hdfs:///dir</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^cp: When copying multiple files, destination hdfs:///dir should be a directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>cp: copying non existent directory in hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir1</command>
+        <command>-fs NAMENODE -cp hdfs:///dir0 hdfs:///dir1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMNEODE -rmr hdfs:///dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^cp: File does not exist: hdfs:/dir0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>cp: copying directory to directory in hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir1</command>
+	<command>-fs NAMENODE -touchz hdfs:///dir0/file1</command>
+       <command>-fs NAMENODE -cp hdfs:///dir0 hdfs:///dir1</command>
+	<command>-fs NAMENODE -lsr hdfs:///dir*</command>
+      </test-commands>
+      <cleanup-commands>
+	<command>-fs NAMENODE -rmr hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir1/dir0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir1/dir0/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>cp: copying multiple directories to directory using globbing in hdfs:// path </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+	<command>-fs NAMENODE -mkdir hdfs:///dir1</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dest</command>
+        <command>-fs NAMENODE -cp hdfs:///dir* hdfs:///dest</command>
+        <command>-fs NAMENODE -lsr hdfs:///d*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMNEODE -rmr hdfs:///d*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dest/dir0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dest/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dest/dir2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>cp: copying multiple directories to directory in hdfs:// path without using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir1</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dest</command>
+        <command>-fs NAMENODE -cp hdfs:///dir0 hdfs:///dir1 hdfs:///dir2 hdfs:///dest</command>
+        <command>-fs NAMENODE -lsr hdfs:///d*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMNEODE -rmr hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dest/dir0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dest/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dest/dir2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>cp: file to file copy in Namenode's path </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -cp NAMENODE/file1 NAMENODE/file2</command>
+        <command>-fs NAMENODE -ls NAMENODE/file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file*</command>
+      </cleanup-commands>:
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>cp: file to directory copy (same name at the destination) in Namenode's path </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -cp NAMENODE/file1 NAMENODE/dir0</command>
+        <command>-fs NAMENODE -ls NAMENODE/file1 NAMENODE/dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0/ NAMENODE/file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>cp: file to directory (different name at the destination) in Namenode's path </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -cp NAMENODE/file1 NAMENODE/dir0/file2</command>
+        <command>-fs NAMENODE -ls NAMENODE/file1 NAMENODE/dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0/</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>cp: files to directory in Namenode's path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file2</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file3</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -cp NAMENODE/file* NAMENODE/dir0</command>
+        <command>-fs NAMENODE -lsr NAMENODE/*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/file*</command>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0/</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file3</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>cp: files to directory in Namenode's path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file2</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file3</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -cp NAMENODE/file1 NAMENODE/file2 NAMENODE/file3 NAMENODE/dir0</command>
+        <command>-fs NAMENODE -lsr NAMENODE/*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/file*</command>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0/</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file3</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>cp: copying non existent file in Namenode's path </description>
+      <test-commands>
+        <command>-fs NAMENODE -cp NAMENODE/file NAMENODE/file1</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>:
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^cp: File does not exist: hdfs://localhost[.a-z]*:[0-9]+/file</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>cp: copying files to an existent file in Namenode's path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file2</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file3</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file4</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file5</command>
+        <command>-fs NAMENODE -cp NAMENODE/file* NAMENODE/file5</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^cp: When copying multiple files, destination should be a directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>cp: copying files to an existent file in Namenode's path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file2</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file3</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file4</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file5</command>
+        <command>-fs NAMENODE -cp NAMENODE/file1 NAMENODE/file2 NAMENODE/file3 NAMENODE/file4 NAMENODE/file5</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^cp: When copying multiple files, destination hdfs://localhost[.a-z]*:[0-9]+/file5 should be a directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>cp: copying files to a non existent directory in Namenode's path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file2</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file3</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file4</command>
+        <command>-fs NAMENODE -cp NAMENODE/file* NAMENODE/dir</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^cp: When copying multiple files, destination should be a directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+     <description>cp: copying files to a non existent directory in Namenode's path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file2</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file3</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file4</command>
+        <command>-fs NAMENODE -cp NAMENODE/file1 NAMENODE/file2 NAMENODE/file3 NAMENODE/file4 NAMENODE/dir</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^cp: When copying multiple files, destination hdfs://localhost[.a-z]*:[0-9]+/dir should be a directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>cp: copying directory to directory in Namenode's path </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file1</command>
+        <command>-fs NAMENODE -cp NAMENODE/dir0 NAMENODE/dir1</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir1/dir0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir1/dir0/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>cp: copying multiple directories to directory in Namenode's path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir1</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dest</command>
+        <command>-fs NAMENODE -cp NAMENODE/dir* NAMENODE/dest</command>
+        <command>-fs NAMENODE -lsr NAMENODE/d*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMNEODE -rmr NAMENODE/d*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dest/dir0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dest/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dest/dir2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>cp: copying multiple directories to directory in Namenode's path without using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir1</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dest</command>
+        <command>-fs NAMENODE -cp NAMENODE/dir0 NAMENODE/dir1 NAMENODE/dir2 NAMENODE/dest</command>
+        <command>-fs NAMENODE -lsr NAMENODE/d*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMNEODE -rmr NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dest/dir0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dest/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dest/dir2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <!-- Tests for rm -->
+    <test> <!-- TESTED -->
+      <description>rm: removing a file (absolute path) </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -rm /dir0/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>rm: removing a file (relative path) </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file0</command>
+        <command>-fs NAMENODE -rm file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>rm: removing files by globbing (absolute path) </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/file2</command>
+        <command>-fs NAMENODE -touchz /dir0/file3</command>
+        <command>-fs NAMENODE -rm /dir0/file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir0/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir0/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir0/file3</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>rm: removing files by globbing (relative path) </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file0</command>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -touchz file2</command>
+        <command>-fs NAMENODE -touchz file3</command>
+        <command>-fs NAMENODE -rm file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file3</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>rm: removing a directory (absolute path) </description>
+      <test-commands>
+        <command>-fs NAMENODE mkdir /dir0</command>
+        <command>-fs NAMENODE -rm /dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^rm: Cannot remove directory "hdfs://localhost[.a-z]*:[0-9]*/dir0", use -rmr instead</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>rm: removing a directory (relative path) </description>
+      <test-commands>
+        <command>-fs NAMENODE mkdir dir0</command>
+        <command>-fs NAMENODE -rm dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^rm: cannot remove dir0: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>rm: removing a nonexistent file (absolute path) </description>
+      <test-commands>
+        <command>-fs NAMENODE -rm /dir0/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^rm: cannot remove /dir0/file0: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>rm: removing a nonexistent file (relative path) </description>
+      <test-commands>
+        <command>-fs NAMENODE -rm file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^rm: cannot remove file0: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>rm: removing a file in hdfs:// path </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file0</command>
+        <command>-fs NAMENODE -rm hdfs:///file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+     
+    <test> <!-- TESTED -->
+      <description>rm: removing files by globbing in hdfs:// path </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///file2</command>
+        <command>-fs NAMENODE -touchz hdfs:///file3</command>
+        <command>-fs NAMENODE -rm hdfs:///file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/file3</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+     <description>rm: removing a directory in hdfs:// path </description>
+      <test-commands>
+        <command>-fs NAMENODE mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -rm hdfs:///dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^rm: Cannot remove directory "hdfs://localhost[.a-z]*:[0-9]*/dir0", use -rmr instead</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    
+    <test> <!-- TESTED -->
+      <description>rm: removing a nonexistent file or dirctory in hdfs:// path </description>
+      <test-commands>
+        <command>-fs NAMENODE -rm hdfs:///file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^rm: cannot remove hdfs:/file0: No such file or directory.</expected-output>
+        </comparator>
+     </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>rm: removing files without globbing in hdfs:// path </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///file2</command>
+        <command>-fs NAMENODE -touchz hdfs:///file3</command>
+        <command>-fs NAMENODE -rm hdfs:///file0 hdfs:///file1 hdfs:///file2 hdfs:///file3</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/file0</expected-output>
+       </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/file1</expected-output>
+       </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/file2</expected-output>
+       </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/file3</expected-output>
+       </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>rm: removing a file in Namenode's path </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file0</command>
+        <command>-fs NAMENODE -rm NAMENODE/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+   
+    <test> <!-- TESTED -->
+      <description>rm: removing files by globbing in Namenode's path </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file2</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file3</command>
+        <command>-fs NAMENODE -rm NAMENODE/file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/file3</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>rm: removing a directory in Namenode's path </description>
+      <test-commands>
+        <command>-fs NAMENODE mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -rm NAMENODE/dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>rm: cannot remove hdfs://localhost[.a-z]*:[0-9]*/dir0: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    
+    <test> <!-- TESTED -->
+      <description>rm: removing a nonexistent file or directory in Namenode's path </description>
+      <test-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^rm: cannot remove hdfs://localhost[.a-z]*:[0-9]*/file0: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>rm: Tremoving files without globbing in Namenode path </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file2</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file3</command>
+        <command>-fs NAMENODE -rm NAMENODE/file0 NAMENODE/file1 NAMENODE/file2 NAMENODE/file3</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/file3</expected-output>
+       </comparator>
+      </comparators>
+    </test>
+    
+    <!--Tests for rmr-->
+    <test> <!-- TESTED -->
+      <description>rmr: removing a file (absolute path) </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -rmr /dir0/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>rmr: removing a file (relative path) </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file0</command>
+        <command>-fs NAMENODE -rmr file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>rmr: removing a directory (absolute path) </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /dir0</command>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>rmr: removing a directory (relative path) </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -rmr dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>rmr: removing directories by globbing (absolute path) </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir1</command>
+        <command>-fs NAMENODE -mkdir /dir2</command>
+        <command>-fs NAMENODE -mkdir /dir3</command>
+        <command>-fs NAMENODE -rmr /dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir3</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>rmr: removing directories by globbing (relative path) </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir1</command>
+        <command>-fs NAMENODE -mkdir dir2</command>
+        <command>-fs NAMENODE -mkdir dir3</command>
+        <command>-fs NAMENODE -rmr dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir3</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!--TESTED-->
+      <description>rmr: removing a nonexistent file (absolute path) </description>
+      <test-commands>
+        <command>-fs NAMENODE -rmr /dir0/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^rmr: cannot remove /dir0/file0: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>rmr: removing a nonexistent file (relative path) </description>
+      <test-commands>
+        <command>-fs NAMENODE -rmr file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^rmr: cannot remove file0: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>rmr: removing a file in hdfs:// path </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file0</command>
+        <command>-fs NAMENODE -rmr hdfs:///file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>rmr: removing a directory in hdfs:// path </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///dir0</command>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>rmr: removing directories by globbing in hdfs:// path </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir1</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir3</command>
+        <command>-fs NAMENODE -rmr hdfs:///dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir3</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!--TESTED-->
+      <description>rmr: removing a nonexistent file or directory in hdfs:// path </description>
+      <test-commands>
+        <command>-fs NAMENODE -rmr hdfs:///file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^rmr: cannot remove hdfs:/file0: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+   <test> <!-- TESTED -->
+      <description>rmr: removing directories without globbing in hdfs:// path </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir1</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir3</command>
+        <command>-fs NAMENODE -rmr hdfs:///dir0 hdfs:///dir1 hdfs:///dir2 hdfs:///dir3</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir3</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>rmr: Test for Namenode's path - removing a file</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file0</command>
+        <command>-fs NAMENODE -rmr NAMENODE/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>rmr: Test for Namenode's path - removing a directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0</command>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>rmr: removing directories by globbing in Namenode's path </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir1</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir3</command>
+        <command>-fs NAMENODE -rmr NAMENODE/dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir3</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!--TESTED-->
+      <description>rmr: removing a nonexistent file or directory in Namenode's path </description>
+      <test-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^rmr: cannot remove hdfs://localhost[.a-z]*:[0-9]*/file0: No such file or directory.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>rmr: removing directories without globbing in Namenode's path </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir1</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir3</command>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0 NAMENODE/dir1 NAMENODE/dir2 NAMENODE/dir3</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Deleted hdfs://localhost[.a-z]*:[0-9]*/dir3</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <!-- Tests for expunge -->
+    <!-- Not yet implemented -->
+
+    <!-- Tests for put -->
+    <test> <!-- TESTED -->
+      <description>put: putting file into a file (absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /data15bytes</command>
+        <command>-fs NAMENODE -du /data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /data15bytes</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/data15bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>put: putting file into a file (relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes data15bytes</command>
+        <command>-fs NAMENODE -du data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/data15bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>put: putting file into a directory(absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA /dir0/dir1/data</command>
+        <command>-fs NAMENODE -du /dir0/dir1/data</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/dir1/data/data15bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/dir1/data/data30bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^60( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/dir1/data/data60bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^120( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/dir1/data/data120bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>put: putting file into a directory(relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA dir0/dir1/data</command>
+        <command>-fs NAMENODE -du dir0/dir1/data</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0/dir1/data/data15bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0/dir1/data/data30bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^60( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0/dir1/data/data60bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^120( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0/dir1/data/data120bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>put: putting many files into an existing directory(absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes /dir0</command>
+        <command>-fs NAMENODE -du /dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 2 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data15bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data30bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>put: putting many files into an existing directory(relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes dir0</command>
+        <command>-fs NAMENODE -du dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 2 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0/data15bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0/data30bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>put: putting non existent file(absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -put /user/wrongdata file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>put: File /user/wrongdata does not exist</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>put: putting non existent file(relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -put wrongdata file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>put: File wrongdata does not exist</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>put: putting file into an already existing destination(absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /user/file0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /user/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>put: Target /user/file0 already exists</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>put: putting file into an already existing destination(relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>put: Target file0 already exists</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>put: putting many files into an existing file</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes /data30bytes</command>
+        <command>-fs NAMENODE -touchz file0</command>
+        <command>-fs NAMENODE -put /data15bytes /data30bytes file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^put: copying multiple files, but last argument `file0' is not a directory</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>put: putting many files into a non existent directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes /data30bytes</command>
+        <command>-fs NAMENODE -put /data15bytes /data30bytes wrongdir</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^put: `wrongdir': specified destination directory doest not exist</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>put: putting a local file into hdfs:// path </description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes hdfs:///data15bytes</command>
+        <command>-fs NAMENODE -du hdfs:///data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///data15bytes</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/data15bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>put: putting file into a directory in hdfs:// path </description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA hdfs:///dir1/data</command>
+        <command>-fs NAMENODE -du hdfs:///dir1/data</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir1/data</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir1/data/data15bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir1/data/data30bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^60( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir1/data/data60bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^120( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir1/data/data120bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>put: putting many local files into an existing directory in hdfs:// path </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes hdfs:///dir0</command>
+        <command>-fs NAMENODE -du hdfs:///dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 2 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data15bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data30bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>put: putting non existent file in hdfs:// path </description>
+      <test-commands>
+        <command>-fs NAMENODE -put /user/wrongdata hdfs:///file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>put: File /user/wrongdata does not exist</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>put: putting local file into an already existing destination in hdfs:// path </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///user/file0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes hdfs:///user/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>put: Target hdfs:/user/file0 already exists</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>put: putting many local files into an existing file in hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes hdfs:///file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^put: copying multiple files, but last argument `hdfs:/file0' is not a directory</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>put: putting file into a non existent directory in hdfs:// path </description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes hdfs:///wrongdir</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^put: `hdfs:/wrongdir': specified destination directory doest not exist</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>put: putting a local file into in Namenode's path </description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes NAMENODE/data15bytes</command>
+        <command>-fs NAMENODE -du NAMENODE/data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/data15bytes</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/data15bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>put: putting file into a directory in Namenode's path </description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA NAMENODE/dir1/data</command>
+        <command>-fs NAMENODE -du NAMENODE/dir1/data</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir1/data</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir1/data/data15bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir1/data/data30bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^60( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir1/data/data60bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^120( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir1/data/data120bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>put: putting many local files into an existing directory in Namenode's path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes NAMENODE/dir0</command>
+        <command>-fs NAMENODE -du NAMENODE/dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 2 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data15bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data30bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>put: putting non existent file in Namenode's path </description>
+      <test-commands>
+        <command>-fs NAMENODE -put /user/wrongdata NAMENODE/file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^put: File /user/wrongdata does not exist.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>put: putting local file into an already existing destination in Namenode's path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/user/file0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes NAMENODE/user/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^put: Target hdfs://localhost[.a-z]*:[0-9]+/user/file0 already exists</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>put: putting many local files into an existing file in Namenode's path </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes NAMENODE/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^put: copying multiple files, but last argument `hdfs://localhost[.a-z]*:[0-9]+/file0' is not a directory</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>put: putting many files into a non existent directory in Namenode's path </description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes NAMENODE/wrongdir</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^put: `hdfs://localhost[.a-z]*:[0-9]+/wrongdir': specified destination directory doest not exist</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <!-- Tests for copyFromLocal -->
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: copying file into a file (absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes /data15bytes</command>
+        <command>-fs NAMENODE -du /data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /data15bytes</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/data15bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: copying file into a file (relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes data15bytes</command>
+        <command>-fs NAMENODE -du data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/data15bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: copying file into a directory(absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA /dir0/dir1/data</command>
+        <command>-fs NAMENODE -du /dir0/dir1/data</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/dir1/data/data15bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/dir1/data/data30bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^60( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/dir1/data/data60bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^120( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/dir1/data/data120bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: copying file into a directory(relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA dir0/dir1/data</command>
+        <command>-fs NAMENODE -du dir0/dir1/data</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0/dir1/data/data15bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0/dir1/data/data30bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^60( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0/dir1/data/data60bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^120( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0/dir1/data/data120bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: copying many files into an existing directory(absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes /dir0</command>
+        <command>-fs NAMENODE -du /dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 2 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data15bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data30bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: copying many files into an existing directory(relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes dir0</command>
+        <command>-fs NAMENODE -du dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 2 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0/data15bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0/data30bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: copying non existent file(absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -copyFromLocal /user/wrongdata file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>copyFromLocal: File /user/wrongdata does not exist</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: copying non existent file(relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -copyFromLocal wrongdata file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>copyFromLocal: File wrongdata does not exist</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: copying file into an already existing destination(absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /user/file0</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes /user/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>copyFromLocal: Target /user/file0 already exists</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: copying file into an already existing destination(relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file0</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>copyFromLocal: Target file0 already exists</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: copying many files into an existing file</description>
+      <test-commands>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes /data15bytes</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data30bytes /data30bytes</command>
+        <command>-fs NAMENODE -touchz file0</command>
+        <command>-fs NAMENODE -copyFromLocal /data15bytes /data30bytes file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^copyFromLocal: copying multiple files, but last argument `file0' is not a directory</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: copying many files into a non existent directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes /data15bytes</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data30bytes /data30bytes</command>
+        <command>-fs NAMENODE -copyFromLocal /data15bytes /data30bytes wrongdir</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^copyFromLocal: `wrongdir': specified destination directory doest not exist</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: Test for hdfs:// path - copying local file into a hdfs://file</description>
+      <test-commands>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes hdfs:///data15bytes</command>
+        <command>-fs NAMENODE -du hdfs:///data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///data15bytes</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/data15bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+     
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: Test for hdfs:// path - copying local file into a hdfs directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1/data</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes hdfs:///dir0/dir1/data/</command>
+        <command>-fs NAMENODE -du hdfs:///dir0/dir1/data/data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/dir1/data/data15bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+     
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: Test for hdfs:// path copying many local files into an existing hdfs directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes hdfs:///dir0</command>
+        <command>-fs NAMENODE -du hdfs:///dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 2 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data15bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data30bytes</expected-output>
+        </comparator>
+     </comparators>
+    </test>
+   
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: Test for hdfs:// path - copying non existent file</description>
+      <test-commands>
+        <command>-fs NAMENODE -copyFromLocal /user/wrongdata hdfs:///file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>copyFromLocal: File /user/wrongdata does not exist</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: Test for hdfs:// path - copying local file into an already existing destination</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///user/file0</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes hdfs:///user/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>copyFromLocal: Target hdfs:/user/file0 already exists</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: Test for hdfs:// path - copying many local files into an existing file</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file0</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes hdfs:///file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^copyFromLocal: copying multiple files, but last argument `hdfs:/file0' is not a directory</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: Test for hdfs:// path - copying many local files into a non existent directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes hdfs:///wrongdir</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^copyFromLocal: `hdfs:/wrongdir': specified destination directory doest not exist</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <!-- Tests for copyFromLocal using Namenode's path-->
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: Test for Namenode's path - copying local file into a hdfs://file</description>
+      <test-commands>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes NAMENODE/data15bytes</command>
+        <command>-fs NAMENODE -du NAMENODE/data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/data15bytes</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/data15bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: Test for Namenode's path - copying local file into a hdfs directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1/data</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes NAMENODE/dir0/dir1/data/</command>
+        <command>-fs NAMENODE -du NAMENODE/dir0/dir1/data/data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/dir1/data/data15bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: Test for Namenode's path copying many local files into an existing hdfs directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes NAMENODE/dir0</command>
+        <command>-fs NAMENODE -du NAMENODE/dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 2 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^15( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data15bytes</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^30( |\t)*hdfs://localhost[.a-z]*:[0-9]*/dir0/data30bytes</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: Test for Namenode's path - copying non existent file</description>
+      <test-commands>
+        <command>-fs NAMENODE -copyFromLocal /user/wrongdata NAMENODE/file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>copyFromLocal: File /user/wrongdata does not exist</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: Test for Namenode's path - copying local file into an already existing destination</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/user/file0</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes NAMENODE/user/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>copyFromLocal: Target hdfs://localhost[.a-z]*:[0-9]+/user/file0 already exists</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: Test for Namenode's path - copying many local files into an existing file</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file0</command>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes NAMENODE/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^copyFromLocal: copying multiple files, but last argument `hdfs://localhost[.a-z]*:[0-9]+/file0' is not a directory</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>copyFromLocal: Test for Namenode's path - copying many local files into a non existent directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -copyFromLocal CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes NAMENODE/wrongdir</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^copyFromLocal: `hdfs://localhost[.a-z]*:[0-9]+/wrongdir': specified destination directory doest not exist</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <!-- Tests for get -->
+    <test> <!-- TESTED -->
+      <description>get: getting non existent(absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -get /user/file CLITEST_DATA/file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>get: null</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>get: getting non existent file(relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -get file CLITEST_DATA/file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>get: null</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>get: Test for hdfs:// path - getting non existent</description>
+      <test-commands>
+        <command>-fs NAMENODE -get hdfs:///user/file CLITEST_DATA/file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>get: null</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>get: Test for Namenode's path - getting non existent</description>
+      <test-commands>
+        <command>-fs NAMENODE -get NAMENODE/user/file CLITEST_DATA/file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>get: null</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <!-- Tests for getmerge -->
+    <!-- Manual Testing -->
+
+    <!-- Tests for cat -->
+    <test> <!-- TESTED -->
+      <description>cat: contents of file(absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /data15bytes</command>
+        <command>-fs NAMENODE -cat /data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /data15bytes</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>12345678901234</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test>
+      <description>cat: contents of file(relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes data15bytes</command>
+        <command>-fs NAMENODE -cat data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr  /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>12345678901234</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test>  <!-- TESTED -->
+      <description>cat: contents of files(absolute path) using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /dir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes /dir0/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes /dir0/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes /dir0/data120bytes</command>
+        <command>-fs NAMENODE -cat /dir0/data*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>12345678901234.*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED-->
+      <description>cat: contents of files(relative path) using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes dir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes dir0/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes dir0/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes dir0/data120bytes</command>
+        <command>-fs NAMENODE -cat dir0/data*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>12345678901234.*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test>  <!-- TESTED -->
+      <description>cat: contents of files(absolute path) without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /dir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes /dir0/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes /dir0/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes /dir0/data120bytes</command>
+        <command>-fs NAMENODE -cat /dir0/data15bytes /dir0/data30bytes /dir0/data60bytes /dir0/data120bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>12345678901234.*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED-->
+      <description>cat: contents of files(relative path) without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes dir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes dir0/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes dir0/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes dir0/data120bytes</command>
+        <command>-fs NAMENODE -cat dir0/data15bytes dir0/data30bytes dir0/data60bytes dir0/data120bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>12345678901234.*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    
+    <test> <!-- TESTED -->
+      <description>cat: contents of file(absolute path) that does not exist</description>
+      <test-commands>
+        <command>-fs NAMENODE -cat /file</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^cat: File does not exist: /file</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cat: contents of file(relative path) that does not exist</description>
+      <test-commands>
+        <command>-fs NAMENODE -cat file</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^cat: File does not exist: file</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cat: contents of directory(absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir1</command>
+        <command>-fs NAMENODE -cat /dir1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^cat: Source must be a file.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cat: contents of directory(relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir1</command>
+        <command>-fs NAMENODE -cat dir</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^cat: File does not exist: dir</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+   
+    <test> <!-- TESTED -->
+      <description>cat: Test for hdfs:// path - content of file</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes hdfs:///data15bytes</command>
+        <command>-fs NAMENODE -cat hdfs:///data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///data15bytes</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>12345678901234</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test>  <!-- TESTED -->
+      <description>cat: Test for hdfs:// path - contents of files using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes hdfs:///dir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes hdfs:///dir0/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes hdfs:///dir0/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes hdfs:///dir0/data120bytes</command>
+        <command>-fs NAMENODE -cat hdfs:///dir0/data*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>12345678901234.*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test>  <!-- TESTED -->
+      <description>cat: Test for hdfs:// path - contents of files without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes hdfs:///dir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes hdfs:///dir0/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes hdfs:///dir0/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes hdfs:///dir0/data120bytes</command>
+        <command>-fs NAMENODE -cat hdfs:///dir0/data15bytes hdfs:///dir0/data30bytes hdfs:///dir0/data60bytes hdfs:///dir0/data120bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>12345678901234.*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cat: Test for hdfs:// path - contents of file that does not exist</description>
+      <test-commands>
+        <command>-fs NAMENODE -cat hdfs:///file</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^cat: File does not exist: hdfs:/file</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cat: Test for hdfs:// path - contents of directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir1</command>
+        <command>-fs NAMENODE -cat hdfs:///dir1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^cat: Source must be a file.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <!-- Tests for cat using Namenode's path -->
+    <test> <!-- TESTED -->
+      <description>cat: Test for Namenode's path - content of file</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes NAMENODE/data15bytes</command>
+        <command>-fs NAMENODE -cat NAMENODE/data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/data15bytes</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>12345678901234</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test>  <!-- TESTED -->
+      <description>cat: Test for Namenode's path - contents of files using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes NAMENODE/dir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes NAMENODE/dir0/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes NAMENODE/dir0/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes NAMENODE/dir0/data120bytes</command>
+        <command>-fs NAMENODE -cat NAMENODE/dir0/data*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>12345678901234.*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test>  <!-- TESTED -->
+      <description>cat: Test for Namenode's path - contents of files without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes NAMENODE/dir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes NAMENODE/dir0/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes NAMENODE/dir0/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes NAMENODE/dir0/data120bytes</command>
+        <command>-fs NAMENODE -cat NAMENODE/dir0/data15bytes NAMENODE/dir0/data30bytes NAMENODE/dir0/data60bytes NAMENODE/dir0/data120bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>12345678901234.*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cat: Test for Namenode's path - contents of file that does not exist</description>
+      <test-commands>
+        <command>-fs NAMENODE -cat NAMENODE/file</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^cat: File does not exist: hdfs://localhost[.a-z]*:[0-9]+/file</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>cat: Test for Namenode's path - contents of directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir1</command>
+        <command>-fs NAMENODE -cat NAMENODE/dir1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^cat: Source must be a file.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <!-- Tests for copyToLocal -->
+    <test> <!-- TESTED -->
+      <description>copyToLocal: non existent relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -copyToLocal file CLITEST_DATA/file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>copyToLocal: null</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>copyToLocal: non existent absolute path</description>
+      <test-commands>
+        <command>-fs NAMENODE -copyToLocal /user/file CLITEST_DATA/file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>copyToLocal: null</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>copyToLocal: Test for hdfs:// path - non existent file/directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -copyToLocal hdfs:///file CLITEST_DATA/file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>copyToLocal: null</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>copyToLocal: Test for Namenode's path - non existent file/directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -copyToLocal NAMENODE/file CLITEST_DATA/file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>copyToLocal: null</expected-output>
+        </comparator>
+     </comparators>
+    </test>
+    
+    <!-- Tests for moveToLocal -->
+    <!-- Not yet implemented -->
+
+    <!-- Tests for mkdir -->
+    <test> <!-- TESTED -->
+      <description>mkdir: creating directory (absolute path) </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -dus /dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir0(|\t)*0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mkdir: creating directory (relative path) </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0 </command>
+        <command>-fs NAMENODE -dus dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0(|\t)*0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mkdir: creating many directories (absolute path) </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0 </command>
+        <command>-fs NAMENODE -mkdir /dir1 </command>        
+        <command>-fs NAMENODE -mkdir /dir2 </command>
+        <command>-fs NAMENODE -mkdir /dir3 </command>
+        <command>-fs NAMENODE -dus /dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir0(|\t)*0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir1(|\t)*0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir2(|\t)*0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir3(|\t)*0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mkdir: creating many directories (relative path) </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0 </command>
+        <command>-fs NAMENODE -mkdir dir1 </command>
+        <command>-fs NAMENODE -mkdir dir2 </command>
+        <command>-fs NAMENODE -mkdir dir3 </command>
+        <command>-fs NAMENODE -dus dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0(|\t)*0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir1(|\t)*0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir2(|\t)*0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir3(|\t)*0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mkdir: creating a directory with the name of an already existing directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>mkdir: cannot create directory /dir0: File exists</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mkdir: creating a directory with the name of an already existing file</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes data15bytes</command>
+        <command>-fs NAMENODE -mkdir data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr data15bytes</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>mkdir: data15bytes exists but is not a directory</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+   
+    <test> <!-- TESTED -->
+      <description>mkdir: Test for hdfs:// path - creating directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -dus hdfs:///dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir0(|\t)*0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+     
+    <test> <!-- TESTED -->
+      <description>mkdir: Test for hdfs:// path - creating many directories</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0 hdfs:///dir1 hdfs:///dir2 hdfs:///dir3 </command>
+        <command>-fs NAMENODE -dus hdfs:///dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir0(|\t)*0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir1(|\t)*0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir2(|\t)*0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir3(|\t)*0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mkdir: Test for hdfs:// path - creating a directory with the name of an already existing directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>mkdir: cannot create directory hdfs:///dir0: File exists</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mkdir: Test for hdfs:// path - creating a directory with the name of an already existing file</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes hdfs:///data15bytes</command>
+        <command>-fs NAMENODE -mkdir hdfs:///data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///data15bytes</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>mkdir: hdfs:///data15bytes exists but is not a directory</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mkdir: Test for Namenode's path - creating directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -dus NAMENODE/dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir0(|\t)*0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mkdir: Test for Namenode's path - creating many directories</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0 NAMENODE/dir1 NAMENODE/dir2 NAMENODE/dir3</command>
+        <command>-fs NAMENODE -dus NAMENODE/dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir0(|\t)*0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir1(|\t)*0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir2(|\t)*0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^hdfs://localhost[.a-z]*:[0-9]*/dir3(|\t)*0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mkdir: Test for NAMENODE path - creating a directory with the name of an already existing directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>mkdir: cannot create directory hdfs://localhost[.a-z]*:[0-9]+/dir0: File exists</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>mkdir: Test for Namenode's path - creating a directory with the name of an already existing file</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes NAMENODE/data15bytes</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/data15bytes</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>mkdir: hdfs://localhost[.a-z]*:[0-9]+/data15bytes exists but is not a directory</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <!--Tests for setrep-->
+    <test> <!-- TESTED -->
+      <description>setrep: existent file (absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -setrep 2 /dir0/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Replication 2 set: hdfs://localhost[.a-z]*:[0-9]*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>setrep: existent file (relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file0</command>
+        <command>-fs NAMENODE -setrep 2 file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Replication 2 set: hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>setrep: existent directory (absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/file1</command>
+        <command>-fs NAMENODE -setrep -R 2 /dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Replication 2 set: hdfs://localhost[.a-z]*:[0-9]*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Replication 2 set: hdfs://localhost[.a-z]*:[0-9]*/dir0/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>setrep: existent directory (relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/file1</command>
+        <command>-fs NAMENODE -setrep -R 2 dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Replication 2 set: hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Replication 2 set: hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir0/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>setrep: non existent file (absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -setrep 2 /dir0/file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^setrep: File does not exist: /dir0/file</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>setrep: non existent file (relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -setrep 2 file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^setrep: File does not exist: file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>setrep: Test for hdfs:// path - existent file</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -setrep 2 hdfs:///dir0/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Replication 2 set: hdfs://localhost[.a-z]*:[0-9]*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>setrep: Test for hdfs:// path - existent directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file1</command>
+        <command>-fs NAMENODE -setrep -R 2 hdfs:///dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Replication 2 set: hdfs://localhost[.a-z]*:[0-9]*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Replication 2 set: hdfs://localhost[.a-z]*:[0-9]*/dir0/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>setrep: Test for hdfs:// path - non existent file</description>
+      <test-commands>
+        <command>-fs NAMENODE -setrep 2 hdfs:///dir0/file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^setrep: File does not exist: hdfs:/dir0/file</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>setrep: Test for Namenode's path - existent file</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -setrep 2 NAMENODE/dir0/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Replication 2 set: hdfs://localhost[.a-z]*:[0-9]*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>setrep: Test for Namenode's path - existent directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file1</command>
+        <command>-fs NAMENODE -setrep -R 2 NAMENODE/dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Replication 2 set: hdfs://localhost[.a-z]*:[0-9]*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^Replication 2 set: hdfs://localhost[.a-z]*:[0-9]*/dir0/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>setrep: Test for Namenode's path - non existent file</description>
+      <test-commands>
+        <command>-fs NAMENODE -setrep 2 NAMENODE/dir0/file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^setrep: File does not exist: hdfs://localhost[.a-z]*:[0-9]+/dir0/file</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <!-- Tests for touchz-->
+    <test> <!-- TESTED -->
+      <description>touchz: touching file (absolute path) </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /user/file0</command>
+        <command>-fs NAMENODE -du /user/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^0( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>touchz: touching file(relative path) </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file0 </command>
+        <command>-fs NAMENODE -du file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^0( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>touchz: touching many files </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file0 file1 file2</command>
+        <command>-fs NAMENODE -du file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 3 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^0( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file0</expected-output>
+          <expected-output>^0( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file1</expected-output>
+          <expected-output>^0( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>touchz: touching already existing file </description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes data15bytes</command>
+        <command>-fs NAMENODE -touchz data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm data15bytes</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>touchz: data15bytes must be a zero-length file</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>touchz: Test for hdfs:// path - touching file</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///user/file0</command>
+        <command>-fs NAMENODE -du hdfs:///user/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^0( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>touchz: Test for hdfs:// path - touching many files </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file0 hdfs:///file1 hdfs:///file2</command>
+        <command>-fs NAMENODE -du hdfs:///file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 3 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^0( |\t)*hdfs://localhost[.a-z]*:[0-9]+/file0</expected-output>
+          <expected-output>^0( |\t)*hdfs://localhost[.a-z]*:[0-9]+/file1</expected-output>
+          <expected-output>^0( |\t)*hdfs://localhost[.a-z]*:[0-9]+/file2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>touchz: Test for hdfs:// path - touching already existing file </description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes hdfs:///data15bytes</command>
+        <command>-fs NAMENODE -touchz hdfs:///data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///data15bytes</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>touchz: hdfs:///data15bytes must be a zero-length file</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>touchz: Test for Namenode's path - touching file</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/user/file0</command>
+        <command>-fs NAMENODE -du NAMENODE/user/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^0( |\t)*hdfs://localhost[.a-z]*:[0-9]*/user/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>touchz: Test for Namenode path - touching many files </description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file0 NAMENODE/file1 NAMENODE/file2</command>
+        <command>-fs NAMENODE -du NAMENODE/file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 3 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^0( )*hdfs://localhost[.a-z]*:[0-9]+/file0</expected-output>
+          <expected-output>^0( )*hdfs://localhost[.a-z]*:[0-9]+/file1</expected-output>
+          <expected-output>^0( )*hdfs://localhost[.a-z]*:[0-9]+/file2</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>touchz: Test for Namenode's path - touching already existing file </description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes NAMENODE/data15bytes</command>
+        <command>-fs NAMENODE -touchz NAMENODE/data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/data15bytes</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>touchz: hdfs://localhost[.a-z]*:[0-9]+/data15bytes must be a zero-length file</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+   
+    <!--Tests for test-->
+    <test> <!-- TESTED -->
+      <description>test: non existent file (absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -test -z /dir0/file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^test: File does not exist: /dir0/file</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>test: non existent file (relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -test -z file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^test: File does not exist: file</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>test: non existent directory (absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -test -d /dir</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^test: File does not exist: /dir</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>test: non existent directory (relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -test -d dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^test: File does not exist: dir0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+   
+    <test> <!-- TESTED -->
+      <description>test: Test for hdfs:// path - non existent file</description>
+      <test-commands>
+        <command>-fs NAMENODE -test -z hdfs:///dir0/file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^test: File does not exist: hdfs:/dir0/file</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>test: Test for hdfs:// path - non existent directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -test -d hdfs:///dir</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^test: File does not exist: hdfs:/dir</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>test: Test for Namenode's path - non existent file</description>
+      <test-commands>
+        <command>-fs NAMENODE -test -z NAMENODE/dir0/file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^test: File does not exist: hdfs://localhost[.a-z]*:[0-9]+/dir0/file</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>test: Test for Namenode's path - non existent directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -test -d NAMENODE/dir</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^test: File does not exist: hdfs://localhost[.a-z]*:[0-9]+/dir</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+   
+    <!--Tests for stat -->
+    <test> <!-- TESTED -->
+      <description>stat: statistics about file(absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes /data60bytes</command>
+        <command>-fs NAMENODE -stat "%n-%b" /data60bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /data60bytes</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>data60bytes-60</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>stat: statistics about file(relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes data60bytes</command>
+        <command>-fs NAMENODE -stat "%n-%b" data60bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>data60bytes-60</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>stat: statistics about directory(absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dirtest</command>
+        <command>-fs NAMENODE -stat "%n-%b-%o" /dirtest</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dirtest</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>dirtest-0-0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>stat: statistics about directory(relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dirtest</command>
+        <command>-fs NAMENODE -stat "%n-%b-%o" dirtest</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>dirtest-0-0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>stat: statistics about files (absolute path) using globbing</description>
+      <test-commands>
+        <command>-fs -mkdir /dir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /dir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes /dir0/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes /dir0/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes /dir0/data120bytes</command>
+        <command>-fs NAMENODE -mkdir /dir0/datadir</command>
+        <command>-fs NAMENODE -stat "%n-%b" /dir0/data*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>"data15bytes-15"</expected-output>
+        </comparator>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>"data30bytes-30"</expected-output>
+        </comparator>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>"data60bytes-60"</expected-output>
+        </comparator>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>"data120bytes-120"</expected-output>
+        </comparator>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>"datadir-0"</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>stat: statistics about files (relative path) using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes data120bytes</command>
+        <command>-fs NAMENODE -mkdir datadir</command>
+        <command>-fs NAMENODE -stat "%n-%b" data*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>"data15bytes-15"</expected-output>
+        </comparator>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>"data30bytes-30"</expected-output>
+        </comparator>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>"data60bytes-60"</expected-output>
+        </comparator>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>"data120bytes-120"</expected-output>
+        </comparator>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>"datadir-0"</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>stat: statistics about file or directory(absolute path) that does not exist</description>
+      <test-commands>
+        <command>-fs NAMENODE -stat /file</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^stat: cannot stat `/file': No such file or directory</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>stat: statistics about file or directory(relative path) that does not exist </description>
+      <test-commands>
+        <command>-fs NAMENODE -stat file1</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^stat: cannot stat `file1': No such file or directory</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>stat: Test for hdfs:// path - statistics about file</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes hdfs:///data60bytes</command>
+        <command>-fs NAMENODE -stat "%n-%b" hdfs:///data60bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///data60bytes</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>data60bytes-60</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>stat: Test for hdfs:// path - statistics about directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dirtest</command>
+        <command>-fs NAMENODE -stat "%n-%b-%o" hdfs:///dirtest</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dirtest</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>dirtest-0-0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>stat: Test for hdfs:// path - statistics about files using globbing</description>
+      <test-commands>
+        <command>-fs -mkdir /dir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes hdfs:///dir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes hdfs:///dir0/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes hdfs:///dir0/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes hdfs:///dir0/data120bytes</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/datadir</command>
+        <command>-fs NAMENODE -stat "%n-%b" hdfs:///dir0/data*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>"data15bytes-15"</expected-output>
+        </comparator>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>"data30bytes-30"</expected-output>
+        </comparator>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>"data60bytes-60"</expected-output>
+        </comparator>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>"data120bytes-120"</expected-output>
+        </comparator>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>"datadir-0"</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>stat: Test for hdfs:// path - statistics about file or directory that does not exist</description>
+      <test-commands>
+        <command>-fs NAMENODE -stat hdfs:///file</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^stat: cannot stat `hdfs:///file': No such file or directory</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>stat: Test for Namenode's path - statistics about file</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes NAMENODE/data60bytes</command>
+        <command>-fs NAMENODE -stat "%n-%b" NAMENODE/data60bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/data60bytes</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>data60bytes-60</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>stat: Test for Namenode's path - statistics about directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dirtest</command>
+        <command>-fs NAMENODE -stat "%n-%b-%o" NAMENODE/dirtest</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dirtest</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>dirtest-0-0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>stat: Test for Namenode's path - statistics about files using globbing</description>
+      <test-commands>
+        <command>-fs -mkdir /dir0</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes NAMENODE/dir0/data15bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data30bytes NAMENODE/dir0/data30bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data60bytes NAMENODE/dir0/data60bytes</command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data120bytes NAMENODE/dir0/data120bytes</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/datadir</command>
+        <command>-fs NAMENODE -stat "%n-%b" NAMENODE/dir0/data*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>"data15bytes-15"</expected-output>
+        </comparator>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>"data30bytes-30"</expected-output>
+        </comparator>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>"data60bytes-60"</expected-output>
+        </comparator>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>"data120bytes-120"</expected-output>
+        </comparator>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>"datadir-0"</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>stat: Test for Namenode's path - statistics about file or directory that does not exist</description>
+      <test-commands>
+        <command>-fs NAMENODE -stat NAMENODE/file</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^stat: cannot stat `hdfs://localhost[.a-z]*:[0-9]+/file': No such file or directory</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+  
+    <!-- Tests for tail -->
+    <test> <!-- TESTED -->
+      <description>tail: contents of file(absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /data15bytes</command>
+        <command>-fs NAMENODE -tail /data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>12345678901234</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!--TESTED-->
+      <description>tail: contents of file(relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes data15bytes</command>
+        <command>-fs NAMENODE -tail data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr  /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>12345678901234</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>tail: contents of files(absolute path) using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -touchz /file2</command>
+        <command>-fs NAMENODE -touchz /file3</command>
+        <command>-fs NAMENODE -touchz /file4</command>
+        <command>-fs NAMENODE -tail /file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^tail: File does not exist: /file\*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>tail: contents of files(relative path) using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -touchz file2</command>
+        <command>-fs NAMENODE -touchz file3</command>
+        <command>-fs NAMENODE -touchz file4</command>
+        <command>-fs NAMENODE -tail file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^tail: File does not exist: file\*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>tail: contents of file(absolute path) that does not exist</description>
+      <test-commands>
+        <command>-fs NAMENODE -tail /file</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^tail: File does not exist: /file</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>tail: contents of file(relative path) that does not exist</description>
+      <test-commands>
+        <command>-fs NAMENODE -tail file1</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^tail: File does not exist: file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>tail: contents of directory(absolute path) </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir1</command>
+        <command>-fs NAMENODE -tail /dir1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^tail: Source must be a file.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>tail: contents of directory(relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir1</command>
+        <command>-fs NAMENODE -tail dir1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^tail: Source must be a file.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+   
+    <test> <!-- TESTED -->
+      <description>tail: Test for hdfs:// path - contents of file</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes hdfs:///data15bytes</command>
+        <command>-fs NAMENODE -tail hdfs:///data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>12345678901234</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>tail: Test for hdfs:// path - contents of files using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///file2</command>
+        <command>-fs NAMENODE -touchz hdfs:///file3</command>
+        <command>-fs NAMENODE -touchz hdfs:///file4</command>
+        <command>-fs NAMENODE -tail hdfs:///file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^tail: File does not exist: hdfs:/file\*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>tail: Test for hdfs:// path - contents of file that does not exist</description>
+      <test-commands>
+        <command>-fs NAMENODE -tail hdfs:///file</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^tail: File does not exist: hdfs:/file</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>tail: Test for hdfs:// path - contents of directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir1</command>
+        <command>-fs NAMENODE -tail hdfs:///dir1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^tail: Source must be a file.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>tail: Test for Namenode's path - contents of file</description>
+      <test-commands>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes NAMENODE/data15bytes</command>
+        <command>-fs NAMENODE -tail NAMENODE/data15bytes</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>12345678901234</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+   
+    <test> <!-- TESTED -->
+      <description>tail: Test for Namenode's path - contents of files using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file2</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file3</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file4</command>
+        <command>-fs NAMENODE -tail NAMENODE/file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^tail: File does not exist: hdfs://localhost[.a-z]*:[0-9]+/file\*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>tail: Test for Namenode's path - contents of file that does not exist</description>
+      <test-commands>
+        <command>-fs NAMENODE -tail NAMENODE/file</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^tail: File does not exist: hdfs://localhost[.a-z]*:[0-9]+/file</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!-- TESTED -->
+      <description>tail: Test for Namenode's path - contents of directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir1</command>
+        <command>-fs NAMENODE -tail NAMENODE/dir1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^tail: Source must be a file.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <!-- Tests for count -->
+    <test> <!-- TESTED -->
+      <description>count: file using absolute path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -count /file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: file using relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -count file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: directory using absolute path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir1</command>
+        <command>-fs NAMENODE -count /dir1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: directory using relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir1</command>
+        <command>-fs NAMENODE -count dir1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: absolute path to file/directory that does not exist</description>
+      <test-commands>
+        <command>-fs NAMENODE -count /file1 </command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Can not find listing for /file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: relative path to file/directory that does not exist</description>
+      <test-commands>
+        <command>-fs NAMENODE -count file1</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Can not find listing for file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: absolute path to multiple files using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -touchz /file2</command>
+        <command>-fs NAMENODE -touchz /file3</command>
+        <command>-fs NAMENODE -touchz /file4</command>
+        <command>-fs NAMENODE -count /file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: relative path to multiple files using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -touchz file2</command>
+        <command>-fs NAMENODE -touchz file3</command>
+        <command>-fs NAMENODE -touchz file4</command>
+        <command>-fs NAMENODE -count file*</command>
+      </test-commands>
+      <cleanup-commands>
+       <command>-fs NAMENODE -rmr /user</command>
+     </cleanup-commands>
+     <comparators>
+       <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: absolute path to multiple files without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -touchz /file2</command>
+        <command>-fs NAMENODE -touchz /file3</command>
+        <command>-fs NAMENODE -touchz /file4</command>
+        <command>-fs NAMENODE -count /file1 /file2 /file3 /file4</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: relative path to multiple files without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -touchz file2</command>
+        <command>-fs NAMENODE -touchz file3</command>
+        <command>-fs NAMENODE -touchz file4</command>
+        <command>-fs NAMENODE -count file1 file2 file3 file4</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: absolute path to multiple directories using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir1</command>
+        <command>-fs NAMENODE -mkdir /dir2</command>
+        <command>-fs NAMENODE -mkdir /dir3</command>
+        <command>-fs NAMENODE -mkdir /dir4</command>
+        <command>-fs NAMENODE -count /dir* </command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: relative path to multiple directories using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir1</command>
+        <command>-fs NAMENODE -mkdir dir2</command>
+        <command>-fs NAMENODE -mkdir dir3</command>
+        <command>-fs NAMENODE -mkdir dir4</command>
+        <command>-fs NAMENODE -count dir* </command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: absolute path to multiple directories without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir1</command>
+        <command>-fs NAMENODE -mkdir /dir2</command>
+        <command>-fs NAMENODE -mkdir /dir3</command>
+        <command>-fs NAMENODE -mkdir /dir4</command>
+        <command>-fs NAMENODE -count /dir1 /dir2 /dir3 /dir4 </command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: relative path to multiple directories without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir1</command>
+        <command>-fs NAMENODE -mkdir dir2</command>
+        <command>-fs NAMENODE -mkdir dir3</command>
+        <command>-fs NAMENODE -mkdir dir4</command>
+        <command>-fs NAMENODE -count dir1 dir2 dir3 dir4 </command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: file using absolute path with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -count -q /file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: file using relative path with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -count -q file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: directory using absolute path with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir1</command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 /dir1 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m /dir1 </dfs-admin-command>
+        <command>-fs NAMENODE -count -q /dir1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: directory using relative path with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir1</command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 dir1 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m dir1 </dfs-admin-command>
+        <command>-fs NAMENODE -count -q dir1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: absolute path to file/directory that does not exist with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -count -q /file1 </command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+         <type>TokenComparator</type>
+          <expected-output>Can not find listing for /file1</expected-output>
+       </comparator>
+     </comparators>
+   </test>
+   
+   <test> <!-- TESTED -->
+      <description>count: relative path to file/directory that does not exist with -q option</description>
+     <test-commands>
+        <command>-fs NAMENODE -count -q file1</command>
+     </test-commands>
+     <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Can not find listing for file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: absolute path to multiple files using globbing with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -touchz /file2</command>
+        <command>-fs NAMENODE -touchz /file3</command>
+        <command>-fs NAMENODE -touchz /file4</command>
+        <command>-fs NAMENODE -count -q /file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: relative path to multiple files using globbing with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -touchz file2</command>
+        <command>-fs NAMENODE -touchz file3</command>
+        <command>-fs NAMENODE -touchz file4</command>
+        <command>-fs NAMENODE -count -q file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+   
+    <test> <!-- TESTED -->
+      <description>count: absolute path to multiple files without globbing with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -touchz /file2</command>
+        <command>-fs NAMENODE -touchz /file3</command>
+        <command>-fs NAMENODE -touchz /file4</command>
+        <command>-fs NAMENODE -count -q /file1 /file2 /file3 /file4</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: relative path to multiple files without globbing with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -touchz file2</command>
+        <command>-fs NAMENODE -touchz file3</command>
+        <command>-fs NAMENODE -touchz file4</command>
+        <command>-fs NAMENODE -count -q file1 file2 file3 file4</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: absolute path to multiple directories using globbing with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir1</command>
+        <command>-fs NAMENODE -mkdir /dir2</command>
+        <command>-fs NAMENODE -mkdir /dir3</command>
+        <command>-fs NAMENODE -mkdir /dir4</command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 /dir1 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m /dir1 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 /dir2 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m /dir2 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 /dir3 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m /dir3 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 /dir4 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m /dir4 </dfs-admin-command>
+        <command>-fs NAMENODE -count -q /dir* </command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: relative path to multiple directories using globbing with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir1</command>
+        <command>-fs NAMENODE -mkdir dir2</command>
+        <command>-fs NAMENODE -mkdir dir3</command>
+        <command>-fs NAMENODE -mkdir dir4</command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 dir1 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m dir1 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 dir2 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m dir2 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 dir3 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m dir3 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 dir4 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m dir4 </dfs-admin-command>
+        <command>-fs NAMENODE -count -q dir* </command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: absolute path to multiple directories without globbing with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir1</command>
+        <command>-fs NAMENODE -mkdir /dir2</command>
+        <command>-fs NAMENODE -mkdir /dir3</command>
+        <command>-fs NAMENODE -mkdir /dir4</command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 /dir1 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m /dir1 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 /dir2 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m /dir2 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 /dir3 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m /dir3 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 /dir4 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m /dir4 </dfs-admin-command>
+        <command>-fs NAMENODE -count -q /dir1 /dir2 /dir3 /dir4 </command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: relative path to multiple directories without globbing with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir1</command>
+        <command>-fs NAMENODE -mkdir dir2</command>
+        <command>-fs NAMENODE -mkdir dir3</command>
+        <command>-fs NAMENODE -mkdir dir4</command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 dir1 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m dir1 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 dir2 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m dir2 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 dir3 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m dir3 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 dir4 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m dir4 </dfs-admin-command>
+        <command>-fs NAMENODE -count -q dir1 dir2 dir3 dir4 </command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+      <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/user/[a-z]*/dir4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for file using hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -count hdfs:///file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for directory using hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir1</command>
+        <command>-fs NAMENODE -count hdfs:///dir1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for hdfs:// path - file/directory that does not exist</description>
+      <test-commands>
+        <command>-fs NAMENODE -count hdfs:///file1 </command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Can not find listing for hdfs:///file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for hdfs:// path - multiple files using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///file2</command>
+        <command>-fs NAMENODE -touchz hdfs:///file3</command>
+        <command>-fs NAMENODE -touchz hdfs:///file4</command>
+        <command>-fs NAMENODE -count hdfs:///file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for hdfs:// path - multiple files without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///file2</command>
+        <command>-fs NAMENODE -touchz hdfs:///file3</command>
+        <command>-fs NAMENODE -touchz hdfs:///file4</command>
+        <command>-fs NAMENODE -count hdfs:///file1 hdfs:///file2 hdfs:///file3 hdfs:///file4</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for hdfs:// path - multiple directories using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir1</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir3</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir4</command>
+        <command>-fs NAMENODE -count hdfs:///dir* </command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for hdfs:// path - multiple directories without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir1</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir3</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir4</command>
+        <command>-fs NAMENODE -count hdfs:///dir1 hdfs:///dir2 hdfs:///dir3 hdfs:///dir4 </command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for file using hdfs:// path with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -count -q hdfs:///file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for directory using hdfs:// path with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir1</command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 hdfs:///dir1 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m hdfs:///dir1 </dfs-admin-command>
+        <command>-fs NAMENODE -count -q hdfs:///dir1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for hdfs:// path - file/directory that does not exist with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -count -q hdfs:///file1 </command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Can not find listing for hdfs:///file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for hdfs:// path - multiple files using globbing with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///file2</command>
+        <command>-fs NAMENODE -touchz hdfs:///file3</command>
+        <command>-fs NAMENODE -touchz hdfs:///file4</command>
+        <command>-fs NAMENODE -count -q hdfs:///file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+         <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for hdfs:// path - multiple files without globbing with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///file2</command>
+        <command>-fs NAMENODE -touchz hdfs:///file3</command>
+        <command>-fs NAMENODE -touchz hdfs:///file4</command>
+        <command>-fs NAMENODE -count -q hdfs:///file1 hdfs:///file2 hdfs:///file3 hdfs:///file4</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for hdfs:// path - multiple directories using globbing with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir1</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir3</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir4</command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 hdfs:///dir1 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m hdfs:///dir1 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 hdfs:///dir2 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m hdfs:///dir2 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 hdfs:///dir3 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m hdfs:///dir3 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 hdfs:///dir4 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m hdfs:///dir4 </dfs-admin-command>
+        <command>-fs NAMENODE -count -q hdfs:///dir* </command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for hdfs:// path - multiple directories without globbing with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir1</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir3</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir4</command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 hdfs:///dir1 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m hdfs:///dir1 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 hdfs:///dir2 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m hdfs:///dir2 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 hdfs:///dir3 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m hdfs:///dir3 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 hdfs:///dir4 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m hdfs:///dir4 </dfs-admin-command>
+        <command>-fs NAMENODE -count -q hdfs:///dir1 hdfs:///dir2 hdfs:///dir3 hdfs:///dir4 </command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for file using Namenode's path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -count NAMENODE/file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for directory using Namenode's path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir1</command>
+        <command>-fs NAMENODE -count NAMENODE/dir1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for Namenode's path - file/directory that does not exist</description>
+      <test-commands>
+        <command>-fs NAMENODE -count NAMENODE/file1 </command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Can not find listing for hdfs://localhost[.a-z]*:[0-9]+/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for Namenode's path - multiple files using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file2</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file3</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file4</command>
+        <command>-fs NAMENODE -count NAMENODE/file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for Namenode's path - multiple files without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file2</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file3</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file4</command>
+        <command>-fs NAMENODE -count NAMENODE/file1 NAMENODE/file2 NAMENODE/file3 NAMENODE/file4</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for Namenode's path - multiple directories using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir1</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir3</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir4</command>
+        <command>-fs NAMENODE -count NAMENODE/dir* </command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for Namenode's path - multiple directories without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir1</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir3</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir4</command>
+        <command>-fs NAMENODE -count NAMENODE/dir1 NAMENODE/dir2 NAMENODE/dir3 NAMENODE/dir4 </command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for file using Namenode's path with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -count -q NAMENODE/file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for directory using Namenode's path with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir1</command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 NAMENODE/dir1 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m NAMENODE/dir1 </dfs-admin-command>
+        <command>-fs NAMENODE -count -q NAMENODE/dir1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for Namenode's path - file/directory that does not exist with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -count -q NAMENODE/file1 </command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Can not find listing for hdfs://localhost[.a-z]*:[0-9]+/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for Namenode's path - multiple files using globbing with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file2</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file3</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file4</command>
+        <command>-fs NAMENODE -count -q NAMENODE/file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+         <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for Namenode's path - multiple files without globbing with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file2</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file3</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file4</command>
+        <command>-fs NAMENODE -count -q NAMENODE/file1 NAMENODE/file2 NAMENODE/file3 NAMENODE/file4</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*none( |\t)*inf( |\t)*none( |\t)*inf( |\t)*0( |\t)*1( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for Namenode's path - multiple directories using globbing with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir1</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir3</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir4</command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 NAMENODE/dir1 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m NAMENODE/dir1 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 NAMENODE/dir2 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m NAMENODE/dir2 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 NAMENODE/dir3 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m NAMENODE/dir3 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 NAMENODE/dir4 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m NAMENODE/dir4 </dfs-admin-command>
+        <command>-fs NAMENODE -count -q NAMENODE/dir* </command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>count: Test for Namenode's path - multiple directories without globbing with -q option</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir1</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir3</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir4</command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 NAMENODE/dir1 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m NAMENODE/dir1 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 NAMENODE/dir2 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m NAMENODE/dir2 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 NAMENODE/dir3 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m NAMENODE/dir3 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 10 NAMENODE/dir4 </dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1m NAMENODE/dir4 </dfs-admin-command>
+        <command>-fs NAMENODE -count -q NAMENODE/dir1 NAMENODE/dir2 NAMENODE/dir3 NAMENODE/dir4 </command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>( |\t)*10( |\t)*9( |\t)*1048576( |\t)*1048576( |\t)*1( |\t)*0( |\t)*0 hdfs://localhost[.a-z]*:[0-9]*/dir4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <!-- Tests for chmod -->
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of file in absolute path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -chmod 777 /file1</command>
+        <command>-fs NAMENODE -ls /file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of file in relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -chmod 666 file1</command>
+        <command>-fs NAMENODE -ls file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of directory in absolute path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chmod 777 /dir0/dir1</command>
+        <command>-fs NAMENODE -lsr /dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of directory in relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chmod 777 dir0/dir1</command>
+        <command>-fs NAMENODE -lsr dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of file in absolute path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -chmod a+rw /file1</command>
+        <command>-fs NAMENODE -ls /file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of file in relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -chmod a+rw file1</command>
+        <command>-fs NAMENODE -ls file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of directory in absolute path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chmod a+rwx /dir0/dir1</command>
+        <command>-fs NAMENODE -lsr /dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of directory in relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chmod a+rwx dir0/dir1</command>
+        <command>-fs NAMENODE -lsr dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of directory in absolute path recursively </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chmod -R 777 /dir0/dir1</command>
+        <command>-fs NAMENODE -lsr /dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of directory in relative path recursively</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chmod -R 777 dir0/dir1</command>
+        <command>-fs NAMENODE -lsr dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of directory in absolute path recursively</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chmod -R a+rwx /dir0/dir1</command>
+        <command>-fs NAMENODE -lsr /dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of directory in relative path recursively</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chmod -R a+rwx dir0/dir1</command>
+        <command>-fs NAMENODE -lsr dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of non existent file in absolute path</description>
+      <test-commands>
+        <command>-fs NAMENODE -chmod 777 /file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chmod: could not get status for '/file1': File does not exist: /file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of non existent file in relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -chmod 666 file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chmod: could not get status for 'file1': File does not exist: file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of non existent file in absolute path</description>
+      <test-commands>
+         <command>-fs NAMENODE -chmod a+rw /file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chmod: could not get status for '/file1': File does not exist: /file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of non existent file in relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -chmod a+rw file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chmod: could not get status for 'file1': File does not exist: file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of multiple files in absolute path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -touchz /file2</command>
+        <command>-fs NAMENODE -touchz /file3</command>
+        <command>-fs NAMENODE -touchz /file4</command>
+        <command>-fs NAMENODE -chmod 777 /file* </command>
+        <command>-fs NAMENODE -lsr /file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of multiple files in relative path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -touchz file2</command>
+        <command>-fs NAMENODE -touchz file3</command>
+        <command>-fs NAMENODE -touchz file4</command>
+        <command>-fs NAMENODE -chmod 777 file* </command>
+        <command>-fs NAMENODE -lsr file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of multiple directories in absolute path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir /dir2</command>
+        <command>-fs NAMENODE -mkdir /dir2/dir1</command>
+        <command>-fs NAMENODE -touchz /dir2/file0</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod 777 /dir*</command>
+        <command>-fs NAMENODE -lsr /dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of multiple directories in relative path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir dir2</command>
+        <command>-fs NAMENODE -mkdir dir2/dir1</command>
+        <command>-fs NAMENODE -touchz dir2/file0</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod 777 dir*</command>
+        <command>-fs NAMENODE -lsr dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of multiple files in absolute path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -touchz /file2</command>
+        <command>-fs NAMENODE -touchz /file3</command>
+        <command>-fs NAMENODE -touchz /file4</command>
+        <command>-fs NAMENODE -chmod 777 /file1 /file2 /file3 /file4 </command>
+        <command>-fs NAMENODE -lsr /file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of multiple files in relative path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -touchz file2</command>
+        <command>-fs NAMENODE -touchz file3</command>
+        <command>-fs NAMENODE -touchz file4</command>
+        <command>-fs NAMENODE -chmod 777 file1 file2 file3 file4 </command>
+        <command>-fs NAMENODE -lsr file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of multiple directories in absolute path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir /dir2</command>
+        <command>-fs NAMENODE -mkdir /dir2/dir1</command>
+        <command>-fs NAMENODE -touchz /dir2/file0</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod 777 /dir0 /dir2</command>
+        <command>-fs NAMENODE -lsr /dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of multiple directories in relative path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir dir2</command>
+        <command>-fs NAMENODE -mkdir dir2/dir1</command>
+        <command>-fs NAMENODE -touchz dir2/file0</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod 777 dir0 dir2</command>
+        <command>-fs NAMENODE -lsr dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of multiple files in absolute path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -touchz /file2</command>
+        <command>-fs NAMENODE -touchz /file3</command>
+        <command>-fs NAMENODE -touchz /file4</command>
+        <command>-fs NAMENODE -chmod a+rw /file* </command>
+        <command>-fs NAMENODE -lsr /file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of multiple files in relative path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -touchz file2</command>
+        <command>-fs NAMENODE -touchz file3</command>
+        <command>-fs NAMENODE -touchz file4</command>
+        <command>-fs NAMENODE -chmod a+rw file* </command>
+        <command>-fs NAMENODE -lsr file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of multiple directories in absolute path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir /dir2</command>
+        <command>-fs NAMENODE -mkdir /dir2/dir1</command>
+        <command>-fs NAMENODE -touchz /dir2/file0</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod a+rwx /dir*</command>
+        <command>-fs NAMENODE -lsr /dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of multiple directories in relative path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir dir2</command>
+        <command>-fs NAMENODE -mkdir dir2/dir1</command>
+        <command>-fs NAMENODE -touchz dir2/file0</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod a+rwx dir*</command>
+        <command>-fs NAMENODE -lsr dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of multiple files in absolute path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -touchz /file2</command>
+        <command>-fs NAMENODE -touchz /file3</command>
+        <command>-fs NAMENODE -touchz /file4</command>
+        <command>-fs NAMENODE -chmod a+rw /file1 /file2 /file3 /file4 </command>
+        <command>-fs NAMENODE -lsr /file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of multiple files in relative path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -touchz file2</command>
+        <command>-fs NAMENODE -touchz file3</command>
+        <command>-fs NAMENODE -touchz file4</command>
+        <command>-fs NAMENODE -chmod a+rw file1 file2 file3 file4 </command>
+        <command>-fs NAMENODE -lsr file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of multiple directories in absolute path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir /dir2</command>
+        <command>-fs NAMENODE -mkdir /dir2/dir1</command>
+        <command>-fs NAMENODE -touchz /dir2/file0</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod a+rwx /dir0 /dir2</command>
+        <command>-fs NAMENODE -lsr /dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of multiple directories in relative path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir dir2</command>
+        <command>-fs NAMENODE -mkdir dir2/dir1</command>
+        <command>-fs NAMENODE -touchz dir2/file0</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod a+rwx dir0 dir2</command>
+        <command>-fs NAMENODE -lsr dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of multiple directories in absolute path recursively using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir /dir2</command>
+        <command>-fs NAMENODE -mkdir /dir2/dir1</command>
+        <command>-fs NAMENODE -touchz /dir2/file0</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod -R 777 /dir*</command>
+        <command>-fs NAMENODE -lsr /dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of multiple directories in relative path recursively using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir dir2</command>
+        <command>-fs NAMENODE -mkdir dir2/dir1</command>
+        <command>-fs NAMENODE -touchz dir2/file0</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod -R 777 dir*</command>
+        <command>-fs NAMENODE -lsr dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of multiple directories in absolute path recursively without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir /dir2</command>
+        <command>-fs NAMENODE -mkdir /dir2/dir1</command>
+        <command>-fs NAMENODE -touchz /dir2/file0</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod -R 777 /dir0 /dir2</command>
+        <command>-fs NAMENODE -lsr /dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of multiple directories in relative path recursively without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir dir2</command>
+        <command>-fs NAMENODE -mkdir dir2/dir1</command>
+        <command>-fs NAMENODE -touchz dir2/file0</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod -R 777 dir0 dir2</command>
+        <command>-fs NAMENODE -lsr dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of multiple directories in absolute path recursively using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir /dir2</command>
+        <command>-fs NAMENODE -mkdir /dir2/dir1</command>
+        <command>-fs NAMENODE -touchz /dir2/file0</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod -R a+rwx /dir*</command>
+        <command>-fs NAMENODE -lsr /dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of multiple directories in relative path recursively using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir dir2</command>
+        <command>-fs NAMENODE -mkdir dir2/dir1</command>
+        <command>-fs NAMENODE -touchz dir2/file0</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod -R a+rwx dir*</command>
+        <command>-fs NAMENODE -lsr dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of multiple directories in absolute path recursively without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir /dir2</command>
+        <command>-fs NAMENODE -mkdir /dir2/dir1</command>
+        <command>-fs NAMENODE -touchz /dir2/file0</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod -R a+rwx /dir0 /dir2</command>
+        <command>-fs NAMENODE -lsr /dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of multiple directories in relative path recursively without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir dir2</command>
+        <command>-fs NAMENODE -mkdir dir2/dir1</command>
+        <command>-fs NAMENODE -touchz dir2/file0</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod -R a+rwx dir0 dir2</command>
+        <command>-fs NAMENODE -lsr dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: invalid value in octal mode of file in absolute path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -chmod 999 /file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chmod: chmod : mode '999' does not match the expected pattern.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: invalid value in octal mode of file in relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -chmod 999 file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chmod: chmod : mode '999' does not match the expected pattern.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: invalid value in normal mode of file in absolute path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -chmod r+def /file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chmod: chmod : mode \'r\+def\' does not match the expected pattern.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: invalid value in normal mode of file in relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -chmod r+def file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chmod: chmod : mode \'r\+def\' does not match the expected pattern.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of file in hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -chmod 777 hdfs:///file1</command>
+        <command>-fs NAMENODE -ls hdfs:///file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of directory in hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chmod 777 hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of file in hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -chmod a+rw hdfs:///file1</command>
+        <command>-fs NAMENODE -ls hdfs:///file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of directory in hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chmod a+rwx hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of directory in hdfs:// path recursively </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chmod -R 777 hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of directory in hdfs:// path recursively</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chmod -R a+rwx hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of non existent file in hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -chmod 777 hdfs:///file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chmod: could not get status for 'hdfs:/file1': File does not exist: hdfs:/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of non existent file in hdfs:// path</description>
+      <test-commands>
+         <command>-fs NAMENODE -chmod a+rw hdfs:///file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chmod: could not get status for 'hdfs:/file1': File does not exist: hdfs:/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of multiple files in hdfs:// path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///file2</command>
+        <command>-fs NAMENODE -touchz hdfs:///file3</command>
+        <command>-fs NAMENODE -touchz hdfs:///file4</command>
+        <command>-fs NAMENODE -chmod 777 hdfs:///file* </command>
+        <command>-fs NAMENODE -lsr hdfs:///file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of multiple directories in hdfs:// path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod 777 hdfs:///dir*</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of multiple files in hdfs:// path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///file2</command>
+        <command>-fs NAMENODE -touchz hdfs:///file3</command>
+        <command>-fs NAMENODE -touchz hdfs:///file4</command>
+        <command>-fs NAMENODE -chmod 777 hdfs:///file1 hdfs:///file2 hdfs:///file3 hdfs:///file4 </command>
+        <command>-fs NAMENODE -lsr hdfs:///file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of multiple directories in hdfs:// path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod 777 hdfs:///dir0 hdfs:///dir2</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of multiple files in hdfs:// path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///file2</command>
+        <command>-fs NAMENODE -touchz hdfs:///file3</command>
+        <command>-fs NAMENODE -touchz hdfs:///file4</command>
+        <command>-fs NAMENODE -chmod a+rw hdfs:///file* </command>
+        <command>-fs NAMENODE -lsr hdfs:///file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of multiple directories in hdfs:// path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod a+rwx hdfs:///dir*</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of multiple files in hdfs:// path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///file2</command>
+        <command>-fs NAMENODE -touchz hdfs:///file3</command>
+        <command>-fs NAMENODE -touchz hdfs:///file4</command>
+        <command>-fs NAMENODE -chmod a+rw hdfs:///file1 hdfs:///file2 hdfs:///file3 hdfs:///file4 </command>
+        <command>-fs NAMENODE -lsr hdfs:///file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of multiple directories in hdfs:// path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod a+rwx hdfs:///dir0 hdfs:///dir2</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of multiple directories in hdfs:// path recursively using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod -R 777 hdfs:///dir*</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of multiple directories in hdfs:// path recursively without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod -R 777 hdfs:///dir0 hdfs:///dir2</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of multiple directories in hdfs:// path recursively using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod -R a+rwx hdfs:///dir*</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of multiple directories in hdfs:// path recursively without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod -R a+rwx hdfs:///dir0 hdfs:///dir2</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: invalid value in octal mode of file in hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -chmod 999 hdfs:///file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chmod: chmod : mode '999' does not match the expected pattern.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: invalid value in normal mode of file in hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -chmod rdef hdfs:///file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chmod: chmod : mode \'rdef\' does not match the expected pattern.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <!-- Tests for chmod -->
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of file in Namenode's path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -chmod 777 NAMENODE/file1</command>
+        <command>-fs NAMENODE -ls NAMENODE/file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of directory in Namenode's path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chmod 777 NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of file in Namenode's path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -chmod a+rw NAMENODE/file1</command>
+        <command>-fs NAMENODE -ls NAMENODE/file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of directory in Namenode's path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chmod a+rwx NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of directory in Namenode's path recursively </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chmod -R 777 NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of directory in Namenode's path recursively</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chmod -R a+rwx NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of non existent file in Namenode's path</description>
+      <test-commands>
+        <command>-fs NAMENODE -chmod 777 NAMENODE/file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chmod: could not get status for 'hdfs://localhost[.a-z]*:[0-9]+/file1': File does not exist: hdfs://localhost[.a-z]*:[0-9]+/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of non existent file in Namenode's path</description>
+      <test-commands>
+         <command>-fs NAMENODE -chmod a+rw NAMENODE/file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chmod: could not get status for 'hdfs://localhost[.a-z]*:[0-9]+/file1': File does not exist: hdfs://localhost[.a-z]*:[0-9]+/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of multiple files in Namenode's path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file2</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file3</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file4</command>
+        <command>-fs NAMENODE -chmod 777 NAMENODE/file* </command>
+        <command>-fs NAMENODE -lsr NAMENODE/file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of multiple directories in Namenode's path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod 777 NAMENODE/dir*</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of multiple files in Namenode's path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file2</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file3</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file4</command>
+        <command>-fs NAMENODE -chmod 777 NAMENODE/file1 NAMENODE/file2 NAMENODE/file3 NAMENODE/file4 </command>
+        <command>-fs NAMENODE -lsr NAMENODE/file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of multiple directories in Namenode's path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod 777 NAMENODE/dir0 NAMENODE/dir2</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+     <description>chmod: change permission(normal mode) of multiple files in Namenode's path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file2</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file3</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file4</command>
+        <command>-fs NAMENODE -chmod a+rw NAMENODE/file* </command>
+        <command>-fs NAMENODE -lsr NAMENODE/file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of multiple directories in Namenode's path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod a+rwx NAMENODE/dir*</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of multiple files in Namenode's path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file2</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file3</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file4</command>
+        <command>-fs NAMENODE -chmod a+rw NAMENODE/file1 NAMENODE/file2 NAMENODE/file3 NAMENODE/file4 </command>
+        <command>-fs NAMENODE -lsr NAMENODE/file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of multiple directories in Namenode's path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod a+rwx NAMENODE/dir0 NAMENODE/dir2</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of multiple directories in Namenode's path recursively using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod -R 777 NAMENODE/dir*</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(octal mode) of multiple directories in Namenode's path recursively without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod -R 777 NAMENODE/dir0 NAMENODE/dir2</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of multiple directories in Namenode's path recursively using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod -R a+rwx NAMENODE/dir*</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: change permission(normal mode) of multiple directories in Namenode's path recursively without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chmod -R a+rwx NAMENODE/dir0 NAMENODE/dir2</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxrwxrwx( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-rw-rw-( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: invalid value in octal mode of file in Namenode's path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -chmod 999 NAMENODE/file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chmod: chmod : mode '999' does not match the expected pattern.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chmod: invalid value in normal mode of file in Namenode's path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -chmod rdef NAMENODE/file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chmod: chmod : mode \'rdef\' does not match the expected pattern.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <!-- Tests for chown -->
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of file in hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -chown newowner:newgroup hdfs:///file1</command>
+        <command>-fs NAMENODE -ls hdfs:///file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+     <test> <!-- TESTED -->
+      <description>chown: change ownership of directory in hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chown newowner:newgroup hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of directory in hdfs:// path recursively </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chown -R newowner:newgroup hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of non existent file in hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -chown newowner:newgroup hdfs:///file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chown: could not get status for 'hdfs:/file1': File does not exist: hdfs:/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of multiple files in hdfs:// path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///file2</command>
+        <command>-fs NAMENODE -touchz hdfs:///file3</command>
+        <command>-fs NAMENODE -touchz hdfs:///file4</command>
+        <command>-fs NAMENODE -chown newowner:newgroup hdfs:///file* </command>
+        <command>-fs NAMENODE -lsr hdfs:///file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of multiple directories in hdfs:// path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chown newowner:newgroup hdfs:///dir*</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of multiple files in hdfs:// path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///file2</command>
+        <command>-fs NAMENODE -touchz hdfs:///file3</command>
+        <command>-fs NAMENODE -touchz hdfs:///file4</command>
+        <command>-fs NAMENODE -chown newowner:newgroup hdfs:///file1 hdfs:///file2 hdfs:///file3 hdfs:///file4 </command>
+        <command>-fs NAMENODE -lsr hdfs:///file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of multiple directories in hdfs:// path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chown newowner:newgroup hdfs:///dir0 hdfs:///dir2</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of multiple directories recursively in hdfs:// path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chown -R newowner:newgroup hdfs:///dir*</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of multiple directories recursively in hdfs:// path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chown -R newowner:newgroup hdfs:///dir0 hdfs:///dir2</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: invalid option for owner of file in hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -chown %:newgroup hdfs:///file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chown: '%:newgroup' does not match expected pattern for \[owner\]\[:group\].</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: invalid option for group of file in hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -chown newowner:% hdfs:///file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chown: 'newowner:%' does not match expected pattern for \[owner\]\[:group\].</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+   <!-- Tests for chown -->
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of file in absolute path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -chown newowner:newgroup /file1</command>
+        <command>-fs NAMENODE -ls /file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of file in relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -chown newowner:newgroup file1</command>
+        <command>-fs NAMENODE -ls file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file1</expected-output>
+        </comparator>
+      </comparators>
+     </test>
+
+     <test> <!-- TESTED -->
+      <description>chown: change ownership of directory in absolute path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chown newowner:newgroup /dir0/dir1</command>
+        <command>-fs NAMENODE -lsr /dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of directory in relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chown newowner:newgroup dir0/dir1</command>
+        <command>-fs NAMENODE -lsr dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of directory in absolute path recursively </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chown -R newowner:newgroup /dir0/dir1</command>
+        <command>-fs NAMENODE -lsr /dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of directory in relative path recursively </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chown -R newowner:newgroup dir0/dir1</command>
+        <command>-fs NAMENODE -lsr dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of non existent file in absolute path</description>
+      <test-commands>
+        <command>-fs NAMENODE -chown newowner:newgroup /file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chown: could not get status for '/file1': File does not exist: /file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of non existent file in relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -chown newowner:newgroup file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chown: could not get status for 'file1': File does not exist: file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of multiple files in absolute path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -touchz /file2</command>
+        <command>-fs NAMENODE -touchz /file3</command>
+        <command>-fs NAMENODE -touchz /file4</command>
+        <command>-fs NAMENODE -chown newowner:newgroup /file* </command>
+        <command>-fs NAMENODE -lsr /file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of multiple files in relative path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -touchz file2</command>
+        <command>-fs NAMENODE -touchz file3</command>
+        <command>-fs NAMENODE -touchz file4</command>
+        <command>-fs NAMENODE -chown newowner:newgroup file* </command>
+        <command>-fs NAMENODE -lsr file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of multiple directories in absolute path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir /dir2</command>
+        <command>-fs NAMENODE -mkdir /dir2/dir1</command>
+        <command>-fs NAMENODE -touchz /dir2/file0</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chown newowner:newgroup /dir*</command>
+        <command>-fs NAMENODE -lsr /dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of multiple directories in relative path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir dir2</command>
+        <command>-fs NAMENODE -mkdir dir2/dir1</command>
+        <command>-fs NAMENODE -touchz dir2/file0</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chown newowner:newgroup dir*</command>
+        <command>-fs NAMENODE -lsr dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of multiple files in absolute path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -touchz /file2</command>
+        <command>-fs NAMENODE -touchz /file3</command>
+        <command>-fs NAMENODE -touchz /file4</command>
+        <command>-fs NAMENODE -chown newowner:newgroup /file1 /file2 /file3 /file4 </command>
+        <command>-fs NAMENODE -lsr /file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of multiple files in relative path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -touchz file2</command>
+        <command>-fs NAMENODE -touchz file3</command>
+        <command>-fs NAMENODE -touchz file4</command>
+        <command>-fs NAMENODE -chown newowner:newgroup file1 file2 file3 file4 </command>
+        <command>-fs NAMENODE -lsr file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of multiple directories in absolute path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir /dir2</command>
+        <command>-fs NAMENODE -mkdir /dir2/dir1</command>
+        <command>-fs NAMENODE -touchz /dir2/file0</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chown newowner:newgroup /dir0 /dir2</command>
+        <command>-fs NAMENODE -lsr /dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of multiple directories in relative path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir dir2</command>
+        <command>-fs NAMENODE -mkdir dir2/dir1</command>
+        <command>-fs NAMENODE -touchz dir2/file0</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chown newowner:newgroup dir0 dir2</command>
+        <command>-fs NAMENODE -lsr dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of multiple directories recursively in absolute path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir /dir2</command>
+        <command>-fs NAMENODE -mkdir /dir2/dir1</command>
+        <command>-fs NAMENODE -touchz /dir2/file0</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chown -R newowner:newgroup /dir*</command>
+        <command>-fs NAMENODE -lsr /dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of multiple directories recursively in relative path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir dir2</command>
+        <command>-fs NAMENODE -mkdir dir2/dir1</command>
+        <command>-fs NAMENODE -touchz dir2/file0</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chown -R newowner:newgroup dir*</command>
+        <command>-fs NAMENODE -lsr dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of multiple directories recursively in absolute path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir /dir2</command>
+        <command>-fs NAMENODE -mkdir /dir2/dir1</command>
+        <command>-fs NAMENODE -touchz /dir2/file0</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chown -R newowner:newgroup /dir0 /dir2</command>
+        <command>-fs NAMENODE -lsr /dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of multiple directories recursively in relative path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir dir2</command>
+        <command>-fs NAMENODE -mkdir dir2/dir1</command>
+        <command>-fs NAMENODE -touchz dir2/file0</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chown -R newowner:newgroup dir0 dir2</command>
+        <command>-fs NAMENODE -lsr dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: invalid option for owner of file in absolute path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -chown %:newgroup /file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chown: '%:newgroup' does not match expected pattern for \[owner\]\[:group\].</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: invalid option for owner of file in relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -chown %:newgroup file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chown: '%:newgroup' does not match expected pattern for \[owner\]\[:group\].</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: invalid option for group of file in absolute path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -chown newowner:% /file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chown: 'newowner:%' does not match expected pattern for \[owner\]\[:group\].</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: invalid option for group of file in relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -chown newowner:% file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chown: 'newowner:%' does not match expected pattern for \[owner\]\[:group\].</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of file in Namenode's path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -chown newowner:newgroup NAMENODE/file1</command>
+        <command>-fs NAMENODE -ls NAMENODE/file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of directory in Namenode's path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chown newowner:newgroup NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of directory in Namenode's path recursively </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chown -R newowner:newgroup NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of non existent file in Namenode's path</description>
+      <test-commands>
+        <command>-fs NAMENODE -chown newowner:newgroup NAMENODE/file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chown: could not get status for 'hdfs://localhost[.a-z]*:[0-9]+/file1': File does not exist: hdfs://localhost[.a-z]*:[0-9]+/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of multiple files in Namenode's path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file2</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file3</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file4</command>
+        <command>-fs NAMENODE -chown newowner:newgroup NAMENODE/file* </command>
+        <command>-fs NAMENODE -lsr NAMENODE/file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of multiple directories in Namenode's path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chown newowner:newgroup NAMENODE/dir*</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of multiple files in Namenode's path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file2</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file3</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file4</command>
+        <command>-fs NAMENODE -chown newowner:newgroup NAMENODE/file1 NAMENODE/file2 NAMENODE/file3 NAMENODE/file4 </command>
+        <command>-fs NAMENODE -lsr NAMENODE/file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of multiple directories in Namenode's path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chown newowner:newgroup NAMENODE/dir0 NAMENODE/dir2</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of multiple directories recursively in Namenode's path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chown -R newowner:newgroup NAMENODE/dir*</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chown: change ownership of multiple directories recursively in Namenode's path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chown -R newowner:newgroup NAMENODE/dir0 NAMENODE/dir2</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*newowner( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chown: invalid option for owner of file in Namenode's path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -chown %:newgroup NAMENODE/file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chown: '%:newgroup' does not match expected pattern for \[owner\]\[:group\].</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chown: invalid option for group of file in Namenode's path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -chown newowner:% NAMENODE/file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chown: 'newowner:%' does not match expected pattern for \[owner\]\[:group\].</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+   <!-- Tests for chgrp -->
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of file in absolute path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -chgrp newgroup /file1</command>
+        <command>-fs NAMENODE -ls /file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of file in relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -chgrp newgroup file1</command>
+        <command>-fs NAMENODE -ls file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of directory in absolute path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chgrp newgroup /dir0/dir1</command>
+        <command>-fs NAMENODE -lsr /dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of directory in relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chgrp newgroup dir0/dir1</command>
+        <command>-fs NAMENODE -lsr dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1</expected-output>
+       </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of directory in absolute path recursively </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chgrp -R newgroup /dir0/dir1</command>
+        <command>-fs NAMENODE -lsr /dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of directory in relative path recursively </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chgrp -R newgroup dir0/dir1</command>
+        <command>-fs NAMENODE -lsr dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of non existent file in absolute path</description>
+      <test-commands>
+        <command>-fs NAMENODE -chgrp newgroup /file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chgrp: could not get status for '/file1': File does not exist: /file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of non existent file in relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -chgrp newgroup file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chgrp: could not get status for 'file1': File does not exist: file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of multiple files in absolute path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -touchz /file2</command>
+        <command>-fs NAMENODE -touchz /file3</command>
+        <command>-fs NAMENODE -touchz /file4</command>
+        <command>-fs NAMENODE -chgrp newgroup /file* </command>
+        <command>-fs NAMENODE -lsr /file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of multiple files in relative path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -touchz file2</command>
+        <command>-fs NAMENODE -touchz file3</command>
+        <command>-fs NAMENODE -touchz file4</command>
+        <command>-fs NAMENODE -chgrp newgroup file* </command>
+        <command>-fs NAMENODE -lsr file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of multiple directories in absolute path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir /dir2</command>
+        <command>-fs NAMENODE -mkdir /dir2/dir1</command>
+        <command>-fs NAMENODE -touchz /dir2/file0</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chgrp newgroup /dir*</command>
+        <command>-fs NAMENODE -lsr /dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of multiple directories in relative path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir dir2</command>
+        <command>-fs NAMENODE -mkdir dir2/dir1</command>
+        <command>-fs NAMENODE -touchz dir2/file0</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chgrp newgroup dir*</command>
+        <command>-fs NAMENODE -lsr dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chgrp: change of multiple files in absolute path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -touchz /file2</command>
+        <command>-fs NAMENODE -touchz /file3</command>
+        <command>-fs NAMENODE -touchz /file4</command>
+        <command>-fs NAMENODE -chgrp newgroup /file1 /file2 /file3 /file4 </command>
+        <command>-fs NAMENODE -lsr /file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of multiple files in relative path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -touchz file2</command>
+        <command>-fs NAMENODE -touchz file3</command>
+        <command>-fs NAMENODE -touchz file4</command>
+        <command>-fs NAMENODE -chgrp newgroup file1 file2 file3 file4 </command>
+        <command>-fs NAMENODE -lsr file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of multiple directories in absolute path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir /dir2</command>
+        <command>-fs NAMENODE -mkdir /dir2/dir1</command>
+        <command>-fs NAMENODE -touchz /dir2/file0</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chgrp newgroup /dir0 /dir2</command>
+        <command>-fs NAMENODE -lsr /dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of multiple directories in relative path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir dir2</command>
+        <command>-fs NAMENODE -mkdir dir2/dir1</command>
+        <command>-fs NAMENODE -touchz dir2/file0</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chgrp newgroup dir0 dir2</command>
+        <command>-fs NAMENODE -lsr dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of multiple directories recursively in absolute path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir /dir2</command>
+        <command>-fs NAMENODE -mkdir /dir2/dir1</command>
+        <command>-fs NAMENODE -touchz /dir2/file0</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chgrp -R newgroup /dir*</command>
+        <command>-fs NAMENODE -lsr /dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of multiple directories recursively in relative path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir dir2</command>
+        <command>-fs NAMENODE -mkdir dir2/dir1</command>
+        <command>-fs NAMENODE -touchz dir2/file0</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chgrp -R newgroup dir*</command>
+        <command>-fs NAMENODE -lsr dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of multiple directories recursively in absolute path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir0</command>
+        <command>-fs NAMENODE -mkdir /dir0/dir1</command>
+        <command>-fs NAMENODE -touchz /dir0/file0</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir /dir2</command>
+        <command>-fs NAMENODE -mkdir /dir2/dir1</command>
+        <command>-fs NAMENODE -touchz /dir2/file0</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz /dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chgrp -R newgroup /dir0 /dir2</command>
+        <command>-fs NAMENODE -lsr /dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+       <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of multiple directories recursively in relative path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir dir0</command>
+        <command>-fs NAMENODE -mkdir dir0/dir1</command>
+        <command>-fs NAMENODE -touchz dir0/file0</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir dir2</command>
+        <command>-fs NAMENODE -mkdir dir2/dir1</command>
+        <command>-fs NAMENODE -touchz dir2/file0</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chgrp -R newgroup dir0 dir2</command>
+        <command>-fs NAMENODE -lsr dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/user/[a-z]*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chgrp: invalid option for group of file in absolute path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz /file1</command>
+        <command>-fs NAMENODE -chgrp % /file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm /file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chgrp: '%' does not match expected pattern for group</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chgrp: invalid option for group of file in relative path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz file1</command>
+        <command>-fs NAMENODE -chgrp % file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chgrp: '%' does not match expected pattern for group</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of file in hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -chgrp newgroup hdfs:///file1</command>
+        <command>-fs NAMENODE -ls hdfs:///file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of directory in hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chgrp newgroup hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of directory in hdfs:// path recursively </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chgrp -R newgroup hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of non existent file in hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -chgrp newgroup hdfs:///file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chgrp: could not get status for 'hdfs:/file1': File does not exist: hdfs:/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of multiple files in hdfs:// path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///file2</command>
+        <command>-fs NAMENODE -touchz hdfs:///file3</command>
+        <command>-fs NAMENODE -touchz hdfs:///file4</command>
+        <command>-fs NAMENODE -chgrp newgroup hdfs:///file* </command>
+        <command>-fs NAMENODE -lsr hdfs:///file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of multiple directories in hdfs:// path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chgrp newgroup hdfs:///dir*</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chgrp: change of multiple files in hdfs:// path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///file2</command>
+        <command>-fs NAMENODE -touchz hdfs:///file3</command>
+        <command>-fs NAMENODE -touchz hdfs:///file4</command>
+        <command>-fs NAMENODE -chgrp newgroup hdfs:///file1 hdfs:///file2 hdfs:///file3 hdfs:///file4 </command>
+        <command>-fs NAMENODE -lsr hdfs:///file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of multiple directories in hdfs:// path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chgrp newgroup hdfs:///dir0 hdfs:///dir2</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of multiple directories recursively in hdfs:// path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chgrp -R newgroup hdfs:///dir*</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+         <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of multiple directories recursively in hdfs:// path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir0/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2</command>
+        <command>-fs NAMENODE -mkdir hdfs:///dir2/dir1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/file0</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz hdfs:///dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chgrp -R newgroup hdfs:///dir0 hdfs:///dir2</command>
+        <command>-fs NAMENODE -lsr hdfs:///dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chgrp: invalid option for group of file in hdfs:// path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file1</command>
+        <command>-fs NAMENODE -chgrp % hdfs:///file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm hdfs:///file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chgrp: '%' does not match expected pattern for group</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+   <!-- Tests for chgrp -->
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of file in Namenode's path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -chgrp newgroup NAMENODE/file1</command>
+        <command>-fs NAMENODE -ls NAMENODE/file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Found 1 items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of directory in Namenode's path</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chgrp newgroup NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of directory in Namenode's path recursively </description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file2</command>
+        <command>-fs NAMENODE -chgrp -R newgroup NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir0</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of non existent file in Namenode's path</description>
+      <test-commands>
+        <command>-fs NAMENODE -chgrp newgroup NAMENODE/file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chgrp: could not get status for 'hdfs://localhost[.a-z]*:[0-9]+/file1': File does not exist: hdfs://localhost[.a-z]*:[0-9]+/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of multiple files in Namenode's path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file2</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file3</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file4</command>
+        <command>-fs NAMENODE -chgrp newgroup NAMENODE/file* </command>
+        <command>-fs NAMENODE -lsr NAMENODE/file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of multiple directories in Namenode's path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chgrp newgroup NAMENODE/dir*</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chgrp: change of multiple files in Namenode's path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file2</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file3</command>
+        <command>-fs NAMENODE -touchz NAMENODE/file4</command>
+        <command>-fs NAMENODE -chgrp newgroup NAMENODE/file1 NAMENODE/file2 NAMENODE/file3 NAMENODE/file4 </command>
+        <command>-fs NAMENODE -lsr NAMENODE/file*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file3</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/file4</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of multiple directories in Namenode's path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chgrp newgroup NAMENODE/dir0 NAMENODE/dir2</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of multiple directories recursively in Namenode's path using globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chgrp -R newgroup NAMENODE/dir*</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+         <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+         <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+   </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chgrp: change group of multiple directories recursively in Namenode's path without globbing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir0/dir1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir0/dir1/file2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2</command>
+        <command>-fs NAMENODE -mkdir NAMENODE/dir2/dir1</command>
+       <command>-fs NAMENODE -touchz NAMENODE/dir2/file0</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file1</command>
+        <command>-fs NAMENODE -touchz NAMENODE/dir2/dir1/file2</command>
+        <command>-fs NAMENODE -chgrp -R newgroup NAMENODE/dir0 NAMENODE/dir2</command>
+        <command>-fs NAMENODE -lsr NAMENODE/dir*</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/dir*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir0/file0</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^drwxr-xr-x( )*-( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/dir1/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*[a-z]*( )*newgroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/dir2/file0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!-- TESTED -->
+      <description>chgrp: invalid option for group of file in Namenode's path</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file1</command>
+        <command>-fs NAMENODE -chgrp % NAMENODE/file1</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rm NAMENODE/file1</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^chgrp: '%' does not match expected pattern for group</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for ls</description>
+      <test-commands>
+        <command>-fs NAMENODE -help ls</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-ls &lt;path&gt;:( |\t)*List the contents that match the specified file pattern. If( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*path is not specified, the contents of /user/&lt;currentUser&gt;( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*path is not specified, the contents of /user/&lt;currentUser&gt;( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*will be listed. Directory entries are of the form( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*dirName \(full path\) &lt;dir&gt;( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*and file entries are of the form( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*fileName\(full path\) &lt;r n&gt; size( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*where n is the number of replicas specified for the file( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*and size is the size of the file, in bytes.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for lsr</description>
+      <test-commands>
+        <command>-fs NAMENODE -help lsr</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-lsr &lt;path&gt;:( |\t)*Recursively list the contents that match the specified( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*file pattern.( |\t)*Behaves very similarly to hadoop fs -ls,( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*except that the data is shown for all the entries in the( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*subtree.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for get</description>
+      <test-commands>
+        <command>-fs NAMENODE -help get</command>
+      </test-commands>
+      <cleanup-commands>
+        <!-- No cleanup -->
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-get( )*\[-ignoreCrc\]( )*\[-crc\]( )*&lt;src&gt; &lt;localdst&gt;:( |\t)*Copy files that match the file pattern &lt;src&gt;( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*to the local name.( )*&lt;src&gt; is kept.( )*When copying mutiple,( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*files, the destination must be a directory.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for du</description>
+      <test-commands>
+        <command>-fs NAMENODE -help du</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-du &lt;path>:( |\t)*Show the amount of space, in bytes, used by the files that( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*match the specified file pattern.( )*Equivalent to the unix( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*command "du -sb &lt;path&gt;/\*" in case of a directory,( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*and to "du -b &lt;path&gt;" in case of a file.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*The output is in the form( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*name\(full path\) size \(in bytes\)( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for dus</description>
+      <test-commands>
+        <command>-fs NAMENODE -help dus</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-dus &lt;path&gt;:( |\t)*Show the amount of space, in bytes, used by the files that( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*match the specified file pattern.  Equivalent to the unix( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*command "du -sb"  The output is in the form( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*name\(full path\) size \(in bytes\)( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for count</description>
+      <test-commands>
+        <command>-fs NAMENODE -help count</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-count\[-q\] &lt;path&gt;: Count the number of directories, files and bytes under the paths( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*that match the specified file pattern.  The output columns are:( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*DIR_COUNT FILE_COUNT CONTENT_SIZE FILE_NAME or( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*QUOTA REMAINING_QUATA SPACE_QUOTA REMAINING_SPACE_QUOTA( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*DIR_COUNT FILE_COUNT CONTENT_SIZE FILE_NAME( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+   <test> <!-- TESTED -->
+      <description>help: help for mv</description>
+      <test-commands>
+        <command>-fs NAMENODE -help mv</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-mv &lt;src&gt; &lt;dst&gt;:( |\t)*Move files that match the specified file pattern &lt;src&gt;( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*to a destination &lt;dst&gt;.  When moving multiple files, the( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*destination must be a directory.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for cp</description>
+      <test-commands>
+        <command>-fs NAMENODE -help cp</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-cp &lt;src&gt; &lt;dst&gt;:( |\t)*Copy files that match the file pattern &lt;src&gt; to a( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*destination.  When copying multiple files, the destination( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*must be a directory.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for rm</description>
+      <test-commands>
+        <command>-fs NAMENODE -help rm</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rm &lt;src&gt;:( |\t)*Delete all files that match the specified file pattern.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*Equivlent to the Unix command "rm &lt;src&gt;"( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for rmr</description>
+      <test-commands>
+        <command>-fs NAMENODE -help rmr</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rmr &lt;src&gt;:( |\t)*Remove all directories which match the specified file( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*pattern. Equivlent to the Unix command "rm -rf &lt;src&gt;"( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+   <test> <!-- TESTED -->
+      <description>help: help for put</description>
+      <test-commands>
+        <command>-fs NAMENODE -help put</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-put &lt;localsrc&gt; ... &lt;dst&gt;:( |\t)*Copy files from the local file system( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*into fs.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for copyFromLocal</description>
+      <test-commands>
+        <command>-fs NAMENODE -help copyFromLocal</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-copyFromLocal &lt;localsrc&gt; ... &lt;dst&gt;:( )*Identical to the -put command.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for moveFromLocal</description>
+      <test-commands>
+        <command>-fs NAMENODE -help moveFromLocal</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-moveFromLocal &lt;localsrc&gt; ... &lt;dst&gt;: Same as -put, except that the source is( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*deleted after it's copied.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for get</description>
+      <test-commands>
+        <command>-fs NAMENODE -help get</command>
+      </test-commands>
+      <cleanup-commands>
+        <!-- No cleanup -->
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-get( )*\[-ignoreCrc\]( )*\[-crc\]( )*&lt;src&gt; &lt;localdst&gt;:( |\t)*Copy files that match the file pattern &lt;src&gt;( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*to the local name.( )*&lt;src&gt; is kept.( )*When copying mutiple,( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*files, the destination must be a directory.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for getmerge</description>
+      <test-commands>
+        <command>-fs NAMENODE -help getmerge</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-getmerge &lt;src&gt; &lt;localdst&gt;:  Get all the files in the directories that( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*match the source file pattern and merge and sort them to only( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*one file on local fs. &lt;src&gt; is kept.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for cat</description>
+      <test-commands>
+        <command>-fs NAMENODE -help cat</command>
+      </test-commands>
+      <cleanup-commands>
+        <!-- No cleanup -->
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-cat &lt;src&gt;:( |\t)*Fetch all files that match the file pattern &lt;src&gt;( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*and display their content on stdout.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for copyToLocal</description>
+      <test-commands>
+        <command>-fs NAMENODE -help copyToLocal</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-copyToLocal \[-ignoreCrc\] \[-crc\] &lt;src&gt; &lt;localdst&gt;:( )*Identical to the -get command.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for moveToLocal</description>
+      <test-commands>
+        <command>-fs NAMENODE -help moveToLocal</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-moveToLocal &lt;src&gt; &lt;localdst&gt;:( )*Not implemented yet( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for mkdir</description>
+      <test-commands>
+        <command>-fs NAMENODE -help mkdir</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-mkdir &lt;path&gt;:( |\t)*Create a directory in specified location.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for setrep</description>
+      <test-commands>
+        <command>-fs NAMENODE -help setrep</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-setrep \[-R\] \[-w\] &lt;rep&gt; &lt;path/file&gt;:( )*Set the replication level of a file.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*The -R flag requests a recursive change of replication level( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*for an entire tree.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for touchz</description>
+      <test-commands>
+        <command>-fs NAMENODE -help touchz</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-touchz &lt;path&gt;: Write a timestamp in yyyy-MM-dd HH:mm:ss format( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*in a file at &lt;path&gt;. An error is returned if the file exists with non-zero length( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for test</description>
+      <test-commands>
+        <command>-fs NAMENODE -help test</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-test -\[ezd\] &lt;path&gt;: If file \{ exists, has zero length, is a directory( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*then return 0, else return 1.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for stat</description>
+      <test-commands>
+        <command>-fs NAMENODE -help stat</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-stat \[format\] &lt;path&gt;: Print statistics about the file/directory at &lt;path&gt;( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*in the specified format. Format accepts filesize in blocks \(%b\), filename \(%n\),( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*block size \(%o\), replication \(%r\), modification date \(%y, %Y\)( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for tail</description>
+      <test-commands>
+        <command>-fs NAMENODE -help tail</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-tail \[-f\] &lt;file&gt;:  Show the last 1KB of the file.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*The -f option shows apended data as the file grows.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for chmod</description>
+      <test-commands>
+        <command>-fs NAMENODE -help chmod</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-chmod \[-R\] &lt;MODE\[,MODE\]... \| OCTALMODE&gt; PATH...( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*Changes permissions of a file.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*This works similar to shell's chmod with a few exceptions.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*-R( |\t)*modifies the files recursively. This is the only option( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*currently supported.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*MODE( |\t)*Mode is same as mode used for chmod shell command.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*Only letters recognized are 'rwxXt'. E.g. \+t,a\+r,g-w,\+rwx,o=r( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*OCTALMODE Mode specifed in 3 or 4 digits. If 4 digits, the first may( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*be 1 or 0 to turn the sticky bit on or off, respectively.( )*Unlike( |\t)*shell command, it is not possible to specify only part of the mode( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*E.g. 754 is same as u=rwx,g=rx,o=r( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*If none of 'augo' is specified, 'a' is assumed and unlike( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*shell command, no umask is applied.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for chown</description>
+      <test-commands>
+        <command>-fs NAMENODE -help chown</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-chown \[-R\] \[OWNER\]\[:\[GROUP\]\] PATH...( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*Changes owner and group of a file.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*This is similar to shell's chown with a few exceptions.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*-R( |\t)*modifies the files recursively. This is the only option( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*currently supported.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*If only owner or group is specified then only owner or( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*group is modified.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*The owner and group names may only cosists of digits, alphabet,( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*and any of '-_.@/' i.e. \[-_.@/a-zA-Z0-9\]. The names are case( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*sensitive.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*WARNING: Avoid using '.' to separate user name and group though( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*Linux allows it. If user names have dots in them and you are( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*using local file system, you might see surprising results since( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*shell command 'chown' is used for local files.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for chgrp</description>
+      <test-commands>
+        <command>-fs NAMENODE -help chgrp</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-chgrp \[-R\] GROUP PATH...( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*This is equivalent to -chown ... :GROUP ...( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>help: help for help</description>
+      <test-commands>
+        <command>-fs NAMENODE -help help</command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-help \[cmd\]:( |\t)*Displays help for given command or all commands if none( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*is specified.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!--Tested -->
+      <description>help: help for dfsadmin report</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -help report</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-report:( |\t)*Reports basic filesystem information and statistics.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!--Tested -->
+      <description>help: help for dfsadmin safemode</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -help safemode</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-safemode &lt;enter\|leave\|get\|wait&gt;:( |\t)*Safe mode maintenance command.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*Safe mode is a Namenode state in which it( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*1.( )*does not accept changes to the name space \(read-only\)( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*2.( )*does not replicate or delete blocks.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*Safe mode is entered automatically at Namenode startup, and( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*leaves safe mode automatically when the configured minimum( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*percentage of blocks satisfies the minimum replication( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*condition.  Safe mode can also be entered manually, but then( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*it can only be turned off manually as well.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!--Tested -->
+      <description>help: help for dfsadmin refreshNodes</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -help refreshNodes</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-refreshNodes:( |\t)*Updates the set of hosts allowed to connect to namenode.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*Re-reads the config file to update values defined by( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*dfs.hosts and dfs.host.exclude and reads the( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*entires \(hostnames\) in those files.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*Each entry not defined in dfs.hosts but in( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*dfs.hosts.exclude is decommissioned. Each entry defined( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*in dfs.hosts and also in dfs.host.exclude is stopped from( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*decommissioning if it has aleady been marked for decommission.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*Entires not present in both the lists are decommissioned.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!--Tested -->
+      <description>help: help for dfsadmin finalizeUpgrade</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -help finalizeUpgrade</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-finalizeUpgrade:( )*Finalize upgrade of HDFS.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*Datanodes delete their previous version working directories,( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*followed by Namenode doing the same.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*This completes the upgrade process.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!--Tested -->
+      <description>help: help for dfsadmin upgradeProgress</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -help upgradeProgress</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-upgradeProgress &lt;status\|details\|force&gt;:( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*request current distributed upgrade status,( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*a detailed status or force the upgrade to proceed.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!--Tested -->
+      <description>help: help for dfsadmin metasave</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -help metasave</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-metasave &lt;filename&gt;:( |\t)*Save Namenode's primary data structures( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*to &lt;filename&gt; in the directory specified by hadoop.log.dir property.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*&lt;filename&gt; will contain one line for each of the following( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*1. Datanodes heart beating with Namenode( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*2. Blocks waiting to be replicated( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*3. Blocks currrently being replicated( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*4. Blocks waiting to be deleted( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!--Tested -->
+      <description>help: help for dfsadmin setQuota</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -help setQuota</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-setQuota &lt;quota&gt; &lt;dirname&gt;...&lt;dirname&gt;: Set the quota &lt;quota&gt; for each directory &lt;dirName&gt;.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*The directory quota is a long integer that puts a hard limit( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*on the number of names in the directory tree( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*Best effort for the directory, with faults reported if( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*1. N is not a positive integer, or( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*2. user is not an administrator, or( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*3. the directory does not exist or is a file, or( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*4. the directory would immediately exceed the new quota.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!--Tested -->
+      <description>help: help for dfsadmin clrQuota</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -help clrQuota</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-clrQuota &lt;dirname&gt;...&lt;dirname&gt;: Clear the quota for each directory &lt;dirName&gt;.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*Best effort for the directory. with fault reported if( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*1. the directory does not exist or is a file, or( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*2. user is not an administrator.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*It does not fault if the directory has no quota.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!--Tested -->
+      <description>help: help for dfsadmin setSpaceQuota</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -help setSpaceQuota</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-setSpaceQuota &lt;quota&gt; &lt;dirname&gt;...&lt;dirname&gt;: Set the disk space quota &lt;quota&gt; for each directory &lt;dirName&gt;.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*The directory quota is a long integer that puts a hard limit( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*on the number of names in the directory tree.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*2. user is not an administrator.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*It does not fault if the directory has no quota.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!--Tested -->
+      <description>help: help for dfsadmin clrSpaceQuota</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -help clrSpaceQuota</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-clrSpaceQuota &lt;dirname&gt;...&lt;dirname&gt;: Clear the disk space quota for each directory &lt;dirName&gt;.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*Best effort for the directory. with fault reported if( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*1. the directory does not exist or is a file, or( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*2. user is not an administrator.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*It does not fault if the directory has no quota.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!--Tested -->
+      <description>help: help for dfsadmin refreshServiceAcl</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -help refreshServiceAcl</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-refreshServiceAcl: Reload the service-level authorization policy file( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*Namenode will reload the authorization policy file.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!--Tested -->
+      <description>help: help for dfsadmin help</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -help help</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-help \[cmd\]:( |\t)*Displays help for the given command or all commands if none( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*is specified.( )*</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+ 
+    <test> <!--Tested -->
+      <description>verifying error messages for quota commands - setting quota on a file</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /test </command>
+        <command>-fs NAMENODE -touchz /test/file1 </command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 1 /test/file1 </dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      	<dfs-admin-command>-fs NAMENODE -setQuota 5 /test </dfs-admin-command>
+        <!-- Same directory will be used in the next test -->
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Cannot set quota on a file: /test/file1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!--Tested -->
+      <description>verifying error messages for quota commands - setting quota on non-existing file</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1g /test1 </dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+             <!-- Same directory will be used in the next test -->   
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Can not find listing for /test1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!--Tested -->
+      <description>verifying error messages for quota commands - exceeding quota</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -setQuota 3 /test </dfs-admin-command>
+        <command>-fs NAMENODE -touchz /test/file0 </command>
+        <command>-fs NAMENODE -mkdir /test/test1 </command>
+      </test-commands>
+      <cleanup-commands>
+             <!-- Same directory(/test) will be used in the next test -->   
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>The quota of /test is exceeded</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!--Tested -->
+      <description>verifying error messages for quota commands - setting not valid quota</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -setQuota 0 /test </dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+             <!-- Same directory will be used in the next test -->   
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Invalid values for quota : 0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!--Tested -->
+      <description>verifying error messages for quota commands - setting not valid space quota</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota a5 /test </dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+             <!-- Same directory will be used in the next test -->   
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>For input string: "a5"</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!--Tested -->
+      <description>verifying error messages for quota commands - clearQuota on non existing file</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -clrQuota /test1 </dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      	<command>-fs NAMENODE -rmr /test </command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Can not find listing for /test1</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+   
+    <test> <!--Tested -->
+      <description>verifying error messages for quota commands - using globing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir1</command>
+        <command>-fs NAMENODE -mkdir /dir2</command>
+        <command>-fs NAMENODE -mkdir /dir3</command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 1k /dir* </dfs-admin-command>
+        <command>-fs NAMENODE -put CLITEST_DATA/data15bytes /dir1/</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir* </command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>put:( )*org.apache.hadoop.hdfs.protocol.QuotaExceededException:( )*The( )*quota( )*of( )*/dir1( )*is( )*exceeded:( )*namespace( )*quota=-1( )*file( )*count=[0-9],( )*diskspace( )*quota=1024( )*diskspace=[0-9]+</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!--Tested -->
+      <description>verifying error messages for quota commands - setting quota using globing</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir /dir1</command>
+        <command>-fs NAMENODE -mkdir /dir2</command>
+        <command>-fs NAMENODE -mkdir /dir3</command>
+        <dfs-admin-command>-fs NAMENODE -setQuota 1 /dir* </dfs-admin-command>
+        <command>-fs NAMENODE -mkdir /dir1/dir4</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /dir* </command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>mkdir: org.apache.hadoop.hdfs.protocol.QuotaExceededException: The quota of /dir1 is exceeded: namespace quota=1 file count=2, diskspace quota=-1 diskspace=0</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!--Tested -->
+      <description>refreshServiceAcl: refreshing security authorization policy for namenode</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -refreshServiceAcl </dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <!-- No cleanup -->
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output></expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <test> <!--Tested -->
+      <description>refreshServiceAcl: verifying error message while refreshing security authorization policy for namenode</description>
+      <test-commands>
+        <!-- hadoop-policy.xml for tests has 
+             security.refresh.policy.protocol.acl = ${user.name} -->
+        <dfs-admin-command>-fs NAMENODE -Dhadoop.job.ugi=blah,blah -refreshServiceAcl </dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <!-- No cleanup -->
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>access denied</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+   
+    <!-- Test for safemode -->
+    <test> <!-- TESTED -->
+      <description>safemode: Test for enter - Namenode is not in safemode</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -safemode enter</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <dfs-admin-command>-fs NAMENODE -safemode leave</dfs-admin-command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Safe mode is ON</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>safemode: Test for enter - Namenode is already in safemode</description>
+      <test-commands>
+	<dfs-admin-command>-fs NAMENODE -safemode enter</dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -safemode enter</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <dfs-admin-command>-fs NAMENODE -safemode leave</dfs-admin-command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Safe mode is ON</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>safemode: Test for leave - Namenode is already in safemode</description>
+      <test-commands>
+	<dfs-admin-command>-fs NAMENODE -safemode enter</dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -safemode leave</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <!-- No cleanup -->
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Safe mode is OFF</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>safemode: Test for leave - Namenode is not in safemode</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -safemode leave</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <!-- No cleanup -->
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Safe mode is OFF</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>safemode: Test for get - Namenode is not in safemode</description>
+      <test-commands>
+	<dfs-admin-command>-fs NAMENODE -safemode leave</dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -safemode get</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <!-- No cleanup -->
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Safe mode is OFF</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>safemode:Test for get - Namenode is already in safemode</description>
+      <test-commands>
+	<dfs-admin-command>-fs NAMENODE -safemode enter</dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -safemode get</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+	<dfs-admin-command>-fs NAMENODE -safemode leave</dfs-admin-command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Safe mode is ON</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>safemode: Test for wait - Namenode is not in safemode</description>
+      <test-commands>
+	<dfs-admin-command>-fs NAMENODE -safemode leave</dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -safemode wait</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <!-- No cleanup -->
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Safe mode is OFF</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>safemode: Test for wait - Namenode is already in safemode</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -safemode enter</dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -safemode wait &amp;</dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -safemode leave</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <!-- No cleanup -->
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Safe mode is OFF</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <!-- Test for report -->
+    <test> <!-- TESTED -->
+      <description>report: Displays the report about the Datanodes</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -report</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+	<!-- no cleanup cmd -->        
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Configured Capacity: [0-9]+ \([0-9]+\.[0-9]+ [BKMGT]+\)</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Present Capacity: [0-9]+ \([0-9\.]+ [BKMGT]+\)</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>DFS Remaining: [0-9]+ \([0-9\.]+ [BKMGT]+\)</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>DFS Used: [0-9]+ \([0-9\.]+ [BKMGT]+\)</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>DFS Used\%: [0-9\.]+%</expected-output>
+        </comparator>
+	<comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Datanodes available: [0-9]+ \([0-9]+ total, [0-9]+ dead\)</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Name: [0-9\.:]+ \([a-zA-z0-9\.]+\)</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Decommission Status : [a-zA-Z]+</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Non DFS Used: [0-9]+ \([0-9\.]+ [BKMGT]+\)</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>DFS Remaining%: [0-9\.]+%</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Last contact: [a-zA-Z]+ [a-zA-Z]+ [0-9]+ [0-9:]+ [A-Z\-\+\:0-9]+ [0-9]+</expected-output>
+        </comparator>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>Live datanodes:</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <!-- Test for saveNamespace -->
+    <test> <!-- TESTED -->
+      <description>saveNamespace: to save the namespace when safemode is ON</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -safemode enter</dfs-admin-command>
+	<dfs-admin-command>-fs NAMENODE -saveNamespace</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <dfs-admin-command>-fs NAMENODE -safemode leave</dfs-admin-command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output></expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>saveNamespace: to save the namespace when safemode is OFF</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -safemode leave</dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -saveNamespace</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <dfs-admin-command>-fs NAMENODE -safemode leave</dfs-admin-command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>saveNamespace: java.io.IOException: Safe mode should be turned ON in order to create namespace image.</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <!-- Test for refreshNodes -->
+    <test> <!-- TESTED -->
+      <description>refreshNodes: to refresh the nodes</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -refreshNodes</dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -report</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <!-- no cleanup -->
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Configured Capacity: [0-9]+ \([0-9]+\.[0-9]+ [BKMGT]+\)</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Present Capacity: [0-9]+ \([0-9\.]+ [BKMGT]+\)</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>DFS Remaining: [0-9]+ \([0-9\.]+ [BKMGT]+\)</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>DFS Used: [0-9]+ \([0-9\.]+ [BKMGT]+\)</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>DFS Used\%: [0-9\.]+%</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Datanodes available: [0-9]+ \([0-9]+ total, [0-9]+ dead\)</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Name: [0-9\.:]+ \([a-zA-z0-9\.]+\)</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Decommission Status : [a-zA-Z]+</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Non DFS Used: [0-9]+ \([0-9\.]+ [BKMGT]+\)</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>DFS Remaining%: [0-9\.]+%</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Last contact: [a-zA-Z]+ [a-zA-Z]+ [0-9]+ [0-9:]+ [A-Z\-\+\:0-9]+ [0-9]+</expected-output>
+        </comparator>
+	<comparator>
+          <type>TokenComparator</type>
+          <expected-output>Live datanodes:</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <!-- Test for metasave -->
+    <test> <!-- TESTED -->
+      <description>metasave: to save metadata</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -metasave metafile</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <!-- No cleanup -->
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Created file metafile on server hdfs:\/\/[a-zA-Z0-9\.:]+</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+    
+    <!-- Test for clrSpacequota -->
+    <test> <!-- TESTED -->
+      <description>clrSpaceQuota: Namenode is already in safemode</description>
+      <test-commands>
+	<command>-fs NAMENODE -mkdir test</command>
+        <dfs-admin-command>-fs NAMENODE -setSpaceQuota 15 test</dfs-admin-command>
+        <dfs-admin-command>-fs NAMENODE -clrSpaceQuota test</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr test</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>ExactComparator</type>
+          <expected-output></expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- Tested -->
+      <description>printTopology: verifying that the topology map is what we expect</description>
+      <test-commands>
+        <dfs-admin-command>-fs NAMENODE -printTopology</dfs-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <!-- No cleanup -->
+      </cleanup-commands>
+      <comparators>
+        <!-- miniDFS cluster started in TestCLI is set to match this output -->
+        <comparator>
+          <type>RegexpAcrossOutputComparator</type>
+          <expected-output>^Rack: \/rack1\s*127\.0\.0\.1:\d+\s\(localhost.*\)\s*127\.0\.0\.1:\d+\s\(localhost.*\)</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpAcrossOutputComparator</type>
+          <expected-output>Rack: \/rack2\s*127\.0\.0\.1:\d+\s\(localhost.*\)\s*127\.0\.0\.1:\d+\s\(localhost.*\)\s*127\.0\.0\.1:\d+\s\(localhost.*\)</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpAcrossOutputComparator</type>
+          <expected-output>Rack: \/rack3\s*127\.0\.0\.1:\d+\s\(localhost.*\)</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpAcrossOutputComparator</type>
+          <expected-output>Rack: \/rack4\s*127\.0\.0\.1:\d+\s\(localhost.*\)\s*127\.0\.0\.1:\d+\s\(localhost.*\)</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <!-- Tests for moveFromLocal -->
+    <test> <!-- TESTED -->
+      <description>moveFromLocal: moving non existent file(absolute path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -moveFromLocal /user/wrongdata file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>moveFromLocal: File /user/wrongdata does not exist</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>moveFromLocal: moving non existent file(relative path)</description>
+      <test-commands>
+        <command>-fs NAMENODE -moveFromLocal wrongdata file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>moveFromLocal: File wrongdata does not exist</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>moveFromLocal: moving many files into an existing file</description>
+      <test-commands>
+        <command>-fs NAMENODE -moveFromLocal CLITEST_DATA/data15bytes /data15bytes</command>
+        <command>-fs NAMENODE -moveFromLocal CLITEST_DATA/data30bytes /data30bytes</command>
+        <command>-fs NAMENODE -touchz file0</command>
+        <command>-fs NAMENODE -moveFromLocal /data15bytes /data30bytes file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^moveFromLocal: copying multiple files, but last argument `file0' is not a directory</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>moveFromLocal: moving many files into a non existent directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -moveFromLocal CLITEST_DATA/data15bytes /data15bytes</command>
+        <command>-fs NAMENODE -moveFromLocal CLITEST_DATA/data30bytes /data30bytes</command>
+        <command>-fs NAMENODE -moveFromLocal /data15bytes /data30bytes wrongdir</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^moveFromLocal: `wrongdir': specified destination directory doest not exist</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>moveFromLocal: Test for hdfs:// path - moving non existent file</description>
+      <test-commands>
+        <command>-fs NAMENODE -moveFromLocal /user/wrongdata hdfs:///file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>moveFromLocal: File /user/wrongdata does not exist</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>moveFromLocal: Test for hdfs:// path - moving many files into an existing file</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz hdfs:///file0</command>
+        <command>-fs NAMENODE -moveFromLocal CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes hdfs:///file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr hdfs:///*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^moveFromLocal: copying multiple files, but last argument `hdfs:/file0' is not a directory</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>moveFromLocal: Test for hdfs:// path - moving many files into a non existent directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -moveFromLocal CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes hdfs:///wrongdir</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^moveFromLocal: `hdfs:/wrongdir': specified destination directory doest not exist</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>moveFromLocal: Test for Namenode's path - moving non existent file</description>
+      <test-commands>
+        <command>-fs NAMENODE -moveFromLocal /user/wrongdata NAMENODE/file</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>TokenComparator</type>
+          <expected-output>moveFromLocal: File /user/wrongdata does not exist</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>moveFromLocal: Test for Namenode's path - moving many files into an existing file</description>
+      <test-commands>
+        <command>-fs NAMENODE -touchz NAMENODE/file0</command>
+        <command>-fs NAMENODE -moveFromLocal CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes NAMENODE/file0</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr NAMENODE/*</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^moveFromLocal: copying multiple files, but last argument `hdfs://localhost[.a-z]*:[0-9]+/file0' is not a directory</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test> <!-- TESTED -->
+      <description>moveFromLocal: Test for Namenode's path - moving many files into a non existent directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -moveFromLocal CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes NAMENODE/wrongdir</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmr /user</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^moveFromLocal: `hdfs://localhost[.a-z]*:[0-9]+/wrongdir': specified destination directory doest not exist</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+  </tests>
+</configuration>

+ 431 - 0
src/test/org/apache/hadoop/fs/TestGlobPaths.java

@@ -0,0 +1,431 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.IOException;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+
+import junit.framework.TestCase;
+
+public class TestGlobPaths extends TestCase {
+  
+  static class RegexPathFilter implements PathFilter {
+    
+    private final String regex;
+    public RegexPathFilter(String regex) {
+      this.regex = regex;
+    }
+
+    public boolean accept(Path path) {
+      return path.toString().matches(regex);
+    }
+
+  }
+  
+  static private MiniDFSCluster dfsCluster;
+  static private FileSystem fs;
+  static final private int NUM_OF_PATHS = 4;
+  static final String USER_DIR = "/user/"+System.getProperty("user.name");
+  private Path[] path = new Path[NUM_OF_PATHS];
+  
+  protected void setUp() throws Exception {
+    try {
+      Configuration conf = new Configuration();
+      dfsCluster = new MiniDFSCluster(conf, 1, true, null);
+      fs = FileSystem.get(conf);
+    } catch (IOException e) {
+      e.printStackTrace();
+    }
+  }
+  
+  protected void tearDown() throws Exception {
+    if(dfsCluster!=null) {
+      dfsCluster.shutdown();
+    }
+  }
+  
+  public void testPathFilter() throws IOException {
+    try {
+      String[] files = new String[] { USER_DIR + "/a", USER_DIR + "/a/b" };
+      Path[] matchedPath = prepareTesting(USER_DIR + "/*/*", files,
+          new RegexPathFilter("^.*" + Pattern.quote(USER_DIR) + "/a/b"));
+      assertEquals(matchedPath.length, 1);
+      assertEquals(matchedPath[0], path[1]);
+    } finally {
+      cleanupDFS();
+    }
+  }
+  
+  public void testPathFilterWithFixedLastComponent() throws IOException {
+    try {
+      String[] files = new String[] { USER_DIR + "/a", USER_DIR + "/a/b",
+                                      USER_DIR + "/c", USER_DIR + "/c/b", };
+      Path[] matchedPath = prepareTesting(USER_DIR + "/*/b", files,
+          new RegexPathFilter("^.*" + Pattern.quote(USER_DIR) + "/a/b"));
+      assertEquals(matchedPath.length, 1);
+      assertEquals(matchedPath[0], path[1]);
+    } finally {
+      cleanupDFS();
+    }
+  }
+  
+  public void testGlob() throws Exception {
+    //pTestEscape(); // need to wait until HADOOP-1995 is fixed
+    pTestJavaRegexSpecialChars();
+    pTestCurlyBracket();
+    pTestLiteral();
+    pTestAny();
+    pTestClosure();
+    pTestSet();
+    pTestRange();
+    pTestSetExcl();
+    pTestCombination();
+    pTestRelativePath();
+  }
+  
+  private void pTestLiteral() throws IOException {
+    try {
+      String [] files = new String[] {USER_DIR+"/a2c", USER_DIR+"/abc.d"};
+      Path[] matchedPath = prepareTesting(USER_DIR+"/abc.d", files);
+      assertEquals(matchedPath.length, 1);
+      assertEquals(matchedPath[0], path[1]);
+    } finally {
+      cleanupDFS();
+    }
+  }
+  
+  private void pTestEscape() throws IOException {
+    try {
+      String [] files = new String[] {USER_DIR+"/ab\\[c.d"};
+      Path[] matchedPath = prepareTesting(USER_DIR+"/ab\\[c.d", files);
+      assertEquals(matchedPath.length, 1);
+      assertEquals(matchedPath[0], path[0]);
+    } finally {
+      cleanupDFS();
+    }
+  }
+  
+  private void pTestAny() throws IOException {
+    try {
+      String [] files = new String[] { USER_DIR+"/abc", USER_DIR+"/a2c",
+                                       USER_DIR+"/a.c", USER_DIR+"/abcd"};
+      Path[] matchedPath = prepareTesting(USER_DIR+"/a?c", files);
+      assertEquals(matchedPath.length, 3);
+      assertEquals(matchedPath[0], path[2]);
+      assertEquals(matchedPath[1], path[1]);
+      assertEquals(matchedPath[2], path[0]);
+    } finally {
+      cleanupDFS();
+    }
+  }
+  
+  private void pTestClosure() throws IOException {
+    pTestClosure1();
+    pTestClosure2();
+    pTestClosure3();
+    pTestClosure4();
+    pTestClosure5();
+  }
+  
+  private void pTestClosure1() throws IOException {
+    try {
+      String [] files = new String[] {USER_DIR+"/a", USER_DIR+"/abc",
+                                      USER_DIR+"/abc.p", USER_DIR+"/bacd"};
+      Path[] matchedPath = prepareTesting(USER_DIR+"/a*", files);
+      assertEquals(matchedPath.length, 3);
+      assertEquals(matchedPath[0], path[0]);
+      assertEquals(matchedPath[1], path[1]);
+      assertEquals(matchedPath[2], path[2]);
+    } finally {
+      cleanupDFS();
+    }
+  }
+  
+  private void pTestClosure2() throws IOException {
+    try {
+      String [] files = new String[] {USER_DIR+"/a.", USER_DIR+"/a.txt",
+                                     USER_DIR+"/a.old.java", USER_DIR+"/.java"};
+      Path[] matchedPath = prepareTesting(USER_DIR+"/a.*", files);
+      assertEquals(matchedPath.length, 3);
+      assertEquals(matchedPath[0], path[0]);
+      assertEquals(matchedPath[1], path[2]);
+      assertEquals(matchedPath[2], path[1]);
+    } finally {
+      cleanupDFS();
+    }
+  }
+  
+  private void pTestClosure3() throws IOException {
+    try {    
+      String [] files = new String[] {USER_DIR+"/a.txt.x", USER_DIR+"/ax",
+                                      USER_DIR+"/ab37x", USER_DIR+"/bacd"};
+      Path[] matchedPath = prepareTesting(USER_DIR+"/a*x", files);
+      assertEquals(matchedPath.length, 3);
+      assertEquals(matchedPath[0], path[0]);
+      assertEquals(matchedPath[1], path[2]);
+      assertEquals(matchedPath[2], path[1]);
+    } finally {
+      cleanupDFS();
+    } 
+  }
+
+  private void pTestClosure4() throws IOException {
+    try {
+      String [] files = new String[] {USER_DIR+"/dir1/file1", 
+                                      USER_DIR+"/dir2/file2", 
+                                       USER_DIR+"/dir3/file1"};
+      Path[] matchedPath = prepareTesting(USER_DIR+"/*/file1", files);
+      assertEquals(matchedPath.length, 2);
+      assertEquals(matchedPath[0], path[0]);
+      assertEquals(matchedPath[1], path[2]);
+    } finally {
+      cleanupDFS();
+    }
+  }
+  
+  private void pTestClosure5() throws IOException {
+    try {
+      String [] files = new String[] {USER_DIR+"/dir1/file1", 
+                                      USER_DIR+"/file1"};
+      Path[] matchedPath = prepareTesting(USER_DIR+"/*/file1", files);
+      assertEquals(matchedPath.length, 1);
+      assertEquals(matchedPath[0], path[0]);
+    } finally {
+      cleanupDFS();
+    }
+  }
+
+  private void pTestSet() throws IOException {
+    try {    
+      String [] files = new String[] {USER_DIR+"/a.c", USER_DIR+"/a.cpp",
+                                      USER_DIR+"/a.hlp", USER_DIR+"/a.hxy"};
+      Path[] matchedPath = prepareTesting(USER_DIR+"/a.[ch]??", files);
+      assertEquals(matchedPath.length, 3);
+      assertEquals(matchedPath[0], path[1]);
+      assertEquals(matchedPath[1], path[2]);
+      assertEquals(matchedPath[2], path[3]);
+    } finally {
+      cleanupDFS();
+    }
+  }
+  
+  private void pTestRange() throws IOException {
+    try {    
+      String [] files = new String[] {USER_DIR+"/a.d", USER_DIR+"/a.e",
+                                      USER_DIR+"/a.f", USER_DIR+"/a.h"};
+      Path[] matchedPath = prepareTesting(USER_DIR+"/a.[d-fm]", files);
+      assertEquals(matchedPath.length, 3);
+      assertEquals(matchedPath[0], path[0]);
+      assertEquals(matchedPath[1], path[1]);
+      assertEquals(matchedPath[2], path[2]);
+    } finally {
+      cleanupDFS();
+    }
+  }
+  
+  private void pTestSetExcl() throws IOException {
+    try {    
+      String [] files = new String[] {USER_DIR+"/a.d", USER_DIR+"/a.e",
+                                      USER_DIR+"/a.0", USER_DIR+"/a.h"};
+      Path[] matchedPath = prepareTesting(USER_DIR+"/a.[^a-cg-z0-9]", files);
+      assertEquals(matchedPath.length, 2);
+      assertEquals(matchedPath[0], path[0]);
+      assertEquals(matchedPath[1], path[1]);
+    } finally {
+      cleanupDFS();
+    }
+  }
+
+  private void pTestCombination() throws IOException {
+    try {    
+      String [] files = new String[] {"/user/aa/a.c", "/user/bb/a.cpp",
+                                      "/user1/cc/b.hlp", "/user/dd/a.hxy"};
+      Path[] matchedPath = prepareTesting("/use?/*/a.[ch]{lp,xy}", files);
+      assertEquals(matchedPath.length, 1);
+      assertEquals(matchedPath[0], path[3]);
+    } finally {
+      cleanupDFS();
+    }
+  }
+  
+  private void pTestRelativePath() throws IOException {
+    try {
+      String [] files = new String[] {"a", "abc", "abc.p", "bacd"};
+      Path[] matchedPath = prepareTesting("a*", files);
+      assertEquals(matchedPath.length, 3);
+      assertEquals(matchedPath[0], new Path(USER_DIR, path[0]));
+      assertEquals(matchedPath[1], new Path(USER_DIR, path[1]));
+      assertEquals(matchedPath[2], new Path(USER_DIR, path[2]));
+    } finally {
+      cleanupDFS();
+    }
+  }
+  
+  /* Test {xx,yy} */
+  private void pTestCurlyBracket() throws IOException {
+    Path[] matchedPath;
+    String [] files;
+    try {
+      files = new String[] { USER_DIR+"/a.abcxx", USER_DIR+"/a.abxy",
+                             USER_DIR+"/a.hlp", USER_DIR+"/a.jhyy"};
+      matchedPath = prepareTesting(USER_DIR+"/a.{abc,jh}??", files);
+      assertEquals(matchedPath.length, 2);
+      assertEquals(matchedPath[0], path[0]);
+      assertEquals(matchedPath[1], path[3]);
+    } finally {
+      cleanupDFS();
+    }
+    // nested curlies
+    try {
+      files = new String[] { USER_DIR+"/a.abcxx", USER_DIR+"/a.abdxy",
+                             USER_DIR+"/a.hlp", USER_DIR+"/a.jhyy" };
+      matchedPath = prepareTesting(USER_DIR+"/a.{ab{c,d},jh}??", files);
+      assertEquals(matchedPath.length, 3);
+      assertEquals(matchedPath[0], path[0]);
+      assertEquals(matchedPath[1], path[1]);
+      assertEquals(matchedPath[2], path[3]);
+    } finally {
+      cleanupDFS();
+    }
+    // cross-component curlies
+    try {
+      files = new String[] { USER_DIR+"/a/b", USER_DIR+"/a/d",
+                             USER_DIR+"/c/b", USER_DIR+"/c/d" };
+      matchedPath = prepareTesting(USER_DIR+"/{a/b,c/d}", files);
+      assertEquals(matchedPath.length, 2);
+      assertEquals(matchedPath[0], path[0]);
+      assertEquals(matchedPath[1], path[3]);
+    } finally {
+      cleanupDFS();
+    }
+    // cross-component absolute curlies
+    try {
+      files = new String[] { "/a/b", "/a/d",
+                             "/c/b", "/c/d" };
+      matchedPath = prepareTesting("{/a/b,/c/d}", files);
+      assertEquals(matchedPath.length, 2);
+      assertEquals(matchedPath[0], path[0]);
+      assertEquals(matchedPath[1], path[3]);
+    } finally {
+      cleanupDFS();
+    }
+    try {
+      // test standalone }
+      files = new String[] {USER_DIR+"/}bc", USER_DIR+"/}c"};
+      matchedPath = prepareTesting(USER_DIR+"/}{a,b}c", files);
+      assertEquals(matchedPath.length, 1);
+      assertEquals(matchedPath[0], path[0]);
+      // test {b}
+      matchedPath = prepareTesting(USER_DIR+"/}{b}c", files);
+      assertEquals(matchedPath.length, 1);
+      assertEquals(matchedPath[0], path[0]);
+      // test {}
+      matchedPath = prepareTesting(USER_DIR+"/}{}bc", files);
+      assertEquals(matchedPath.length, 1);
+      assertEquals(matchedPath[0], path[0]);
+
+      // test {,}
+      matchedPath = prepareTesting(USER_DIR+"/}{,}bc", files);
+      assertEquals(matchedPath.length, 1);
+      assertEquals(matchedPath[0], path[0]);
+
+      // test {b,}
+      matchedPath = prepareTesting(USER_DIR+"/}{b,}c", files);
+      assertEquals(matchedPath.length, 2);
+      assertEquals(matchedPath[0], path[0]);
+      assertEquals(matchedPath[1], path[1]);
+
+      // test {,b}
+      matchedPath = prepareTesting(USER_DIR+"/}{,b}c", files);
+      assertEquals(matchedPath.length, 2);
+      assertEquals(matchedPath[0], path[0]);
+      assertEquals(matchedPath[1], path[1]);
+
+      // test a combination of {} and ?
+      matchedPath = prepareTesting(USER_DIR+"/}{ac,?}", files);
+      assertEquals(matchedPath.length, 1);
+      assertEquals(matchedPath[0], path[1]);
+      
+      // test ill-formed curly
+      boolean hasException = false;
+      try {
+        prepareTesting(USER_DIR+"}{bc", files);
+      } catch (IOException e) {
+        assertTrue(e.getMessage().startsWith("Illegal file pattern:") );
+        hasException = true;
+      }
+      assertTrue(hasException);
+    } finally {
+      cleanupDFS();
+    }
+  }
+  
+  /* test that a path name can contain Java regex special characters */
+  private void pTestJavaRegexSpecialChars() throws IOException {
+    try {
+      String[] files = new String[] {USER_DIR+"/($.|+)bc", USER_DIR+"/abc"};
+      Path[] matchedPath = prepareTesting(USER_DIR+"/($.|+)*", files);
+      assertEquals(matchedPath.length, 1);
+      assertEquals(matchedPath[0], path[0]);
+    } finally {
+      cleanupDFS();
+    }
+
+  }
+  private Path[] prepareTesting(String pattern, String[] files)
+    throws IOException {
+    for(int i=0; i<Math.min(NUM_OF_PATHS, files.length); i++) {
+      path[i] = new Path(files[i]).makeQualified(fs);
+      if (!fs.mkdirs(path[i])) {
+        throw new IOException("Mkdirs failed to create " + path[i].toString());
+      }
+    }
+    Path patternPath = new Path(pattern);
+    Path[] globResults = FileUtil.stat2Paths(fs.globStatus(patternPath),
+                                             patternPath);
+    for(int i=0; i<globResults.length; i++) {
+      globResults[i] = globResults[i].makeQualified(fs);
+    }
+    return globResults;
+  }
+  
+  private Path[] prepareTesting(String pattern, String[] files,
+      PathFilter filter) throws IOException {
+    for(int i=0; i<Math.min(NUM_OF_PATHS, files.length); i++) {
+      path[i] = new Path(files[i]).makeQualified(fs);
+      if (!fs.mkdirs(path[i])) {
+        throw new IOException("Mkdirs failed to create " + path[i].toString());
+      }
+    }
+    Path patternPath = new Path(pattern);
+    Path[] globResults = FileUtil.stat2Paths(fs.globStatus(patternPath, filter),
+                                             patternPath);
+    for(int i=0; i<globResults.length; i++) {
+      globResults[i] = globResults[i].makeQualified(fs);
+    }
+    return globResults;
+  }
+  
+  private void cleanupDFS() throws IOException {
+    fs.delete(new Path("/user"), true);
+  }
+  
+}

+ 155 - 0
src/test/org/apache/hadoop/fs/TestUrlStreamHandler.java

@@ -0,0 +1,155 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsUrlStreamHandlerFactory;
+import org.apache.hadoop.fs.Path;
+
+/**
+ * Test of the URL stream handler factory.
+ */
+public class TestUrlStreamHandler extends TestCase {
+
+  /**
+   * Test opening and reading from an InputStream through a hdfs:// URL.
+   * <p>
+   * First generate a file with some content through the FileSystem API, then
+   * try to open and read the file through the URL stream API.
+   * 
+   * @throws IOException
+   */
+  public void testDfsUrls() throws IOException {
+
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    FileSystem fs = cluster.getFileSystem();
+
+    // Setup our own factory
+    // setURLSteramHandlerFactor is can be set at most once in the JVM
+    // the new URLStreamHandler is valid for all tests cases 
+    // in TestStreamHandler
+    FsUrlStreamHandlerFactory factory =
+        new org.apache.hadoop.fs.FsUrlStreamHandlerFactory();
+    java.net.URL.setURLStreamHandlerFactory(factory);
+
+    Path filePath = new Path("/thefile");
+
+    try {
+      byte[] fileContent = new byte[1024];
+      for (int i = 0; i < fileContent.length; ++i)
+        fileContent[i] = (byte) i;
+
+      // First create the file through the FileSystem API
+      OutputStream os = fs.create(filePath);
+      os.write(fileContent);
+      os.close();
+
+      // Second, open and read the file content through the URL API
+      URI uri = fs.getUri();
+      URL fileURL =
+          new URL(uri.getScheme(), uri.getHost(), uri.getPort(), filePath
+              .toString());
+
+      InputStream is = fileURL.openStream();
+      assertNotNull(is);
+
+      byte[] bytes = new byte[4096];
+      assertEquals(1024, is.read(bytes));
+      is.close();
+
+      for (int i = 0; i < fileContent.length; ++i)
+        assertEquals(fileContent[i], bytes[i]);
+
+      // Cleanup: delete the file
+      fs.delete(filePath, false);
+
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+
+  }
+
+  /**
+   * Test opening and reading from an InputStream through a file:// URL.
+   * 
+   * @throws IOException
+   * @throws URISyntaxException
+   */
+  public void testFileUrls() throws IOException, URISyntaxException {
+    // URLStreamHandler is already set in JVM by testDfsUrls() 
+    Configuration conf = new Configuration();
+
+    // Locate the test temporary directory.
+    File tmpDir = new File(conf.get("hadoop.tmp.dir"));
+    if (!tmpDir.exists()) {
+      if (!tmpDir.mkdirs())
+        throw new IOException("Cannot create temporary directory: " + tmpDir);
+    }
+
+    File tmpFile = new File(tmpDir, "thefile");
+    URI uri = tmpFile.toURI();
+
+    FileSystem fs = FileSystem.get(uri, conf);
+
+    try {
+      byte[] fileContent = new byte[1024];
+      for (int i = 0; i < fileContent.length; ++i)
+        fileContent[i] = (byte) i;
+
+      // First create the file through the FileSystem API
+      OutputStream os = fs.create(new Path(uri.getPath()));
+      os.write(fileContent);
+      os.close();
+
+      // Second, open and read the file content through the URL API.
+      URL fileURL = uri.toURL();
+
+      InputStream is = fileURL.openStream();
+      assertNotNull(is);
+
+      byte[] bytes = new byte[4096];
+      assertEquals(1024, is.read(bytes));
+      is.close();
+
+      for (int i = 0; i < fileContent.length; ++i)
+        assertEquals(fileContent[i], bytes[i]);
+
+      // Cleanup: delete the file
+      fs.delete(new Path(uri.getPath()), false);
+
+    } finally {
+      fs.close();
+    }
+
+  }
+
+}

+ 155 - 0
src/test/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java

@@ -0,0 +1,155 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.ftp;
+
+import java.net.URI;
+import junit.framework.TestCase;
+
+import org.apache.ftpserver.DefaultFtpServerContext;
+import org.apache.ftpserver.FtpServer;
+import org.apache.ftpserver.ftplet.Authority;
+import org.apache.ftpserver.ftplet.UserManager;
+import org.apache.ftpserver.listener.mina.MinaListener;
+import org.apache.ftpserver.usermanager.BaseUser;
+import org.apache.ftpserver.usermanager.WritePermission;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+
+/**
+ * Generates a bunch of random files and directories using class 'DFSTestUtil',
+ * stores them on the FTP file system, copies them and check if all the files
+ * were retrieved successfully without any data corruption
+ */
+public class TestFTPFileSystem extends TestCase {
+
+  private Configuration defaultConf = new Configuration();
+  private FtpServer server = null;
+  private FileSystem localFs = null;
+  private FileSystem ftpFs = null;
+
+  private Path workDir = new Path(new Path(System.getProperty(
+      "test.build.data", "."), "data"), "TestFTPFileSystem");
+
+  Path ftpServerRoot = new Path(workDir, "FTPServer");
+  Path ftpServerConfig = null;
+
+  private void startServer() {
+    try {
+      DefaultFtpServerContext context = new DefaultFtpServerContext(false);
+      MinaListener listener = new MinaListener();
+      // Set port to 0 for OS to give a free port
+      listener.setPort(0);
+      context.setListener("default", listener);
+
+      // Create a test user.
+      UserManager userManager = context.getUserManager();
+      BaseUser adminUser = new BaseUser();
+      adminUser.setName("admin");
+      adminUser.setPassword("admin");
+      adminUser.setEnabled(true);
+      adminUser.setAuthorities(new Authority[] { new WritePermission() });
+
+      Path adminUserHome = new Path(ftpServerRoot, "user/admin");
+      adminUser.setHomeDirectory(adminUserHome.toUri().getPath());
+      adminUser.setMaxIdleTime(0);
+      userManager.save(adminUser);
+
+      // Initialize the server and start.
+      server = new FtpServer(context);
+      server.start();
+
+    } catch (Exception e) {
+      throw new RuntimeException("FTP server start-up failed", e);
+    }
+  }
+
+  private void stopServer() {
+    if (server != null) {
+      server.stop();
+    }
+  }
+
+  @Override
+  public void setUp() throws Exception {
+    startServer();
+    defaultConf = new Configuration();
+    localFs = FileSystem.getLocal(defaultConf);
+    ftpServerConfig = new Path(localFs.getWorkingDirectory(), "res");
+    MinaListener listener = (MinaListener) server.getServerContext()
+        .getListener("default");
+    int serverPort = listener.getPort();
+    ftpFs = FileSystem.get(URI.create("ftp://admin:admin@localhost:"
+        + serverPort), defaultConf);
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+    localFs.delete(ftpServerRoot, true);
+    localFs.delete(ftpServerConfig, true);
+    localFs.close();
+    ftpFs.close();
+    stopServer();
+  }
+
+  /**
+   * Tests FTPFileSystem, create(), open(), delete(), mkdirs(), rename(),
+   * listStatus(), getStatus() APIs. *
+   * 
+   * @throws Exception
+   */
+  public void testReadWrite() throws Exception {
+
+    DFSTestUtil util = new DFSTestUtil("TestFTPFileSystem", 20, 3, 1024 * 1024);
+    localFs.setWorkingDirectory(workDir);
+    Path localData = new Path(workDir, "srcData");
+    Path remoteData = new Path("srcData");
+
+    util.createFiles(localFs, localData.toUri().getPath());
+
+    boolean dataConsistency = util.checkFiles(localFs, localData.getName());
+    assertTrue("Test data corrupted", dataConsistency);
+
+    // Copy files and directories recursively to FTP file system.
+    boolean filesCopied = FileUtil.copy(localFs, localData, ftpFs, remoteData,
+        false, defaultConf);
+    assertTrue("Copying to FTPFileSystem failed", filesCopied);
+
+    // Rename the remote copy
+    Path renamedData = new Path("Renamed");
+    boolean renamed = ftpFs.rename(remoteData, renamedData);
+    assertTrue("Rename failed", renamed);
+
+    // Copy files and directories from FTP file system and delete remote copy.
+    filesCopied = FileUtil.copy(ftpFs, renamedData, localFs, workDir, true,
+        defaultConf);
+    assertTrue("Copying from FTPFileSystem fails", filesCopied);
+
+    // Check if the data was received completely without any corruption.
+    dataConsistency = util.checkFiles(localFs, renamedData.getName());
+    assertTrue("Invalid or corrupted data recieved from FTP Server!",
+        dataConsistency);
+
+    // Delete local copies
+    boolean deleteSuccess = localFs.delete(renamedData, true)
+        & localFs.delete(localData, true);
+    assertTrue("Local test data deletion failed", deleteSuccess);
+  }
+}

+ 252 - 0
src/test/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java

@@ -0,0 +1,252 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.loadGenerator;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.FileWriter;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+
+import junit.framework.TestCase;
+/**
+ * This class tests if a balancer schedules tasks correctly.
+ */
+public class TestLoadGenerator extends TestCase {
+  private static final Configuration CONF = new Configuration();
+  private static final int DEFAULT_BLOCK_SIZE = 10;
+  private static final String OUT_DIR = 
+    System.getProperty("test.build.data","build/test/data");
+  private static final File DIR_STRUCTURE_FILE = 
+    new File(OUT_DIR, StructureGenerator.DIR_STRUCTURE_FILE_NAME);
+  private static final File FILE_STRUCTURE_FILE =
+    new File(OUT_DIR, StructureGenerator.FILE_STRUCTURE_FILE_NAME);
+  private static final String DIR_STRUCTURE_FIRST_LINE = "/dir0";
+  private static final String DIR_STRUCTURE_SECOND_LINE = "/dir1";
+  private static final String FILE_STRUCTURE_FIRST_LINE =
+    "/dir0/_file_0 0.3754598635933768";
+  private static final String FILE_STRUCTURE_SECOND_LINE =
+    "/dir1/_file_1 1.4729310851145203";
+  
+
+  static {
+    CONF.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+    CONF.setInt("io.bytes.per.checksum", DEFAULT_BLOCK_SIZE);
+    CONF.setLong("dfs.heartbeat.interval", 1L);
+  }
+
+  /** Test if the structure generator works fine */ 
+  public void testStructureGenerator() throws Exception {
+    StructureGenerator sg = new StructureGenerator();
+    String[] args = new String[]{"-maxDepth", "2", "-minWidth", "1",
+        "-maxWidth", "2", "-numOfFiles", "2",
+        "-avgFileSize", "1", "-outDir", OUT_DIR, "-seed", "1"};
+    
+    final int MAX_DEPTH = 1;
+    final int MIN_WIDTH = 3;
+    final int MAX_WIDTH = 5;
+    final int NUM_OF_FILES = 7;
+    final int AVG_FILE_SIZE = 9;
+    final int SEED = 13;
+    try {
+      // successful case
+      assertEquals(0, sg.run(args));
+      BufferedReader in = new BufferedReader(new FileReader(DIR_STRUCTURE_FILE));
+      assertEquals(DIR_STRUCTURE_FIRST_LINE, in.readLine());
+      assertEquals(DIR_STRUCTURE_SECOND_LINE, in.readLine());
+      assertEquals(null, in.readLine());
+      in.close();
+      
+      in = new BufferedReader(new FileReader(FILE_STRUCTURE_FILE));
+      assertEquals(FILE_STRUCTURE_FIRST_LINE, in.readLine());
+      assertEquals(FILE_STRUCTURE_SECOND_LINE, in.readLine());
+      assertEquals(null, in.readLine());
+      in.close();
+
+      String oldArg = args[MAX_DEPTH];
+      args[MAX_DEPTH] = "0";
+      assertEquals(-1, sg.run(args));
+      args[MAX_DEPTH] = oldArg;
+      
+      oldArg = args[MIN_WIDTH];
+      args[MIN_WIDTH] = "-1";
+      assertEquals(-1, sg.run(args));
+      args[MIN_WIDTH] = oldArg;
+      
+      oldArg = args[MAX_WIDTH];
+      args[MAX_WIDTH] = "-1";
+      assertEquals(-1, sg.run(args));
+      args[MAX_WIDTH] = oldArg;
+      
+      oldArg = args[NUM_OF_FILES];
+      args[NUM_OF_FILES] = "-1";
+      assertEquals(-1, sg.run(args));
+      args[NUM_OF_FILES] = oldArg;
+      
+      oldArg = args[NUM_OF_FILES];
+      args[NUM_OF_FILES] = "-1";
+      assertEquals(-1, sg.run(args));
+      args[NUM_OF_FILES] = oldArg;
+      
+      oldArg = args[AVG_FILE_SIZE];
+      args[AVG_FILE_SIZE] = "-1";
+      assertEquals(-1, sg.run(args));
+      args[AVG_FILE_SIZE] = oldArg;
+      
+      oldArg = args[SEED];
+      args[SEED] = "34.d4";
+      assertEquals(-1, sg.run(args));
+      args[SEED] = oldArg;
+    } finally {
+      DIR_STRUCTURE_FILE.delete();
+      FILE_STRUCTURE_FILE.delete();
+    }
+  }
+
+  /** Test if the load generator works fine */
+  public void testLoadGenerator() throws Exception {
+    final String TEST_SPACE_ROOT = "/test";
+
+    final String SCRIPT_TEST_DIR = new File(System.getProperty("test.build.data",
+    "/tmp")).getAbsolutePath();
+    String script = SCRIPT_TEST_DIR + "/" + "loadgenscript";
+    String script2 = SCRIPT_TEST_DIR + "/" + "loadgenscript2";
+    File scriptFile1 = new File(script);
+    File scriptFile2 = new File(script2);
+    
+    FileWriter writer = new FileWriter(DIR_STRUCTURE_FILE);
+    writer.write(DIR_STRUCTURE_FIRST_LINE+"\n");
+    writer.write(DIR_STRUCTURE_SECOND_LINE+"\n");
+    writer.close();
+    
+    writer = new FileWriter(FILE_STRUCTURE_FILE);
+    writer.write(FILE_STRUCTURE_FIRST_LINE+"\n");
+    writer.write(FILE_STRUCTURE_SECOND_LINE+"\n");
+    writer.close();
+    
+    MiniDFSCluster cluster = new MiniDFSCluster(CONF, 3, true, null);
+    cluster.waitActive();
+    
+    try {
+      DataGenerator dg = new DataGenerator();
+      dg.setConf(CONF);
+      String [] args = new String[] {"-inDir", OUT_DIR, "-root", TEST_SPACE_ROOT};
+      assertEquals(0, dg.run(args));
+
+      final int READ_PROBABILITY = 1;
+      final int WRITE_PROBABILITY = 3;
+      final int MAX_DELAY_BETWEEN_OPS = 7;
+      final int NUM_OF_THREADS = 9;
+      final int START_TIME = 11;
+      final int ELAPSED_TIME = 13;
+      
+      LoadGenerator lg = new LoadGenerator();
+      lg.setConf(CONF);
+      args = new String[] {"-readProbability", "0.3", "-writeProbability", "0.3",
+          "-root", TEST_SPACE_ROOT, "-maxDelayBetweenOps", "0",
+          "-numOfThreads", "1", "-startTime", 
+          Long.toString(System.currentTimeMillis()), "-elapsedTime", "10"};
+      
+      assertEquals(0, lg.run(args));
+
+      String oldArg = args[READ_PROBABILITY];
+      args[READ_PROBABILITY] = "1.1";
+      assertEquals(-1, lg.run(args));
+      args[READ_PROBABILITY] = "-1.1";
+      assertEquals(-1, lg.run(args));
+      args[READ_PROBABILITY] = oldArg;
+
+      oldArg = args[WRITE_PROBABILITY];
+      args[WRITE_PROBABILITY] = "1.1";
+      assertEquals(-1, lg.run(args));
+      args[WRITE_PROBABILITY] = "-1.1";
+      assertEquals(-1, lg.run(args));
+      args[WRITE_PROBABILITY] = "0.9";
+      assertEquals(-1, lg.run(args));
+      args[READ_PROBABILITY] = oldArg;
+
+      oldArg = args[MAX_DELAY_BETWEEN_OPS];
+      args[MAX_DELAY_BETWEEN_OPS] = "1.x1";
+      assertEquals(-1, lg.run(args));
+      args[MAX_DELAY_BETWEEN_OPS] = oldArg;
+      
+      oldArg = args[MAX_DELAY_BETWEEN_OPS];
+      args[MAX_DELAY_BETWEEN_OPS] = "1.x1";
+      assertEquals(-1, lg.run(args));
+      args[MAX_DELAY_BETWEEN_OPS] = oldArg;
+      
+      oldArg = args[NUM_OF_THREADS];
+      args[NUM_OF_THREADS] = "-1";
+      assertEquals(-1, lg.run(args));
+      args[NUM_OF_THREADS] = oldArg;
+      
+      oldArg = args[START_TIME];
+      args[START_TIME] = "-1";
+      assertEquals(-1, lg.run(args));
+      args[START_TIME] = oldArg;
+
+      oldArg = args[ELAPSED_TIME];
+      args[ELAPSED_TIME] = "-1";
+      assertEquals(-1, lg.run(args));
+      args[ELAPSED_TIME] = oldArg;
+      
+      // test scripted operation
+      // Test with good script
+      FileWriter fw = new FileWriter(scriptFile1);
+      fw.write("2 .22 .33\n");
+      fw.write("3 .10 .6\n");
+      fw.write("6 0 .7\n");
+      fw.close();
+      
+      String[] scriptArgs = new String[] {
+          "-root", TEST_SPACE_ROOT, "-maxDelayBetweenOps", "0",
+          "-numOfThreads", "10", "-startTime", 
+          Long.toString(System.currentTimeMillis()), "-scriptFile", script};
+      
+      assertEquals(0, lg.run(scriptArgs));
+      
+      // Test with bad script
+      fw = new FileWriter(scriptFile2);
+      fw.write("2 .22 .33\n");
+      fw.write("3 blah blah blah .6\n");
+      fw.write("6 0 .7\n");
+      fw.close();
+      
+      scriptArgs[scriptArgs.length - 1] = script2;
+      assertEquals(-1, lg.run(scriptArgs));
+      
+    } finally {
+      cluster.shutdown();
+      DIR_STRUCTURE_FILE.delete();
+      FILE_STRUCTURE_FILE.delete();
+      scriptFile1.delete();
+      scriptFile2.delete();
+    }
+  }
+  
+  /**
+   * @param args
+   */
+  public static void main(String[] args) throws Exception {
+    TestLoadGenerator loadGeneratorTest = new TestLoadGenerator();
+    loadGeneratorTest.testStructureGenerator();
+    loadGeneratorTest.testLoadGenerator();
+  }
+}

+ 311 - 0
src/test/org/apache/hadoop/fs/permission/TestStickyBit.java

@@ -0,0 +1,311 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.permission;
+
+import java.io.IOException;
+
+import javax.security.auth.login.LoginException;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+
+public class TestStickyBit extends TestCase {
+
+  static UnixUserGroupInformation user1 = new UnixUserGroupInformation(
+      "theDoctor", new String[] { "tardis" });
+  static UnixUserGroupInformation user2 = new UnixUserGroupInformation("rose",
+      new String[] { "powellestates" });
+
+  /**
+   * Ensure that even if a file is in a directory with the sticky bit on,
+   * another user can write to that file (assuming correct permissions).
+   */
+  private void confirmCanAppend(Configuration conf, FileSystem hdfs,
+      Path baseDir) throws IOException {
+    // Create a tmp directory with wide-open permissions and sticky bit
+    Path p = new Path(baseDir, "tmp");
+
+    hdfs.mkdirs(p);
+    hdfs.setPermission(p, new FsPermission((short) 01777));
+
+    // Write a file to the new tmp directory as a regular user
+    hdfs = logonAs(user1, conf, hdfs);
+    Path file = new Path(p, "foo");
+    writeFile(hdfs, file);
+    hdfs.setPermission(file, new FsPermission((short) 0777));
+
+    // Log onto cluster as another user and attempt to append to file
+    hdfs = logonAs(user2, conf, hdfs);
+    Path file2 = new Path(p, "foo");
+    FSDataOutputStream h = hdfs.append(file2);
+    h.write("Some more data".getBytes());
+    h.close();
+  }
+
+  /**
+   * Test that one user can't delete another user's file when the sticky bit is
+   * set.
+   */
+  private void confirmDeletingFiles(Configuration conf, FileSystem hdfs,
+      Path baseDir) throws IOException {
+    Path p = new Path(baseDir, "contemporary");
+    hdfs.mkdirs(p);
+    hdfs.setPermission(p, new FsPermission((short) 01777));
+
+    // Write a file to the new temp directory as a regular user
+    hdfs = logonAs(user1, conf, hdfs);
+    Path file = new Path(p, "foo");
+    writeFile(hdfs, file);
+
+    // Make sure the correct user is the owner
+    assertEquals(user1.getUserName(), hdfs.getFileStatus(file).getOwner());
+
+    // Log onto cluster as another user and attempt to delete the file
+    FileSystem hdfs2 = logonAs(user2, conf, hdfs);
+
+    try {
+      hdfs2.delete(file, false);
+      fail("Shouldn't be able to delete someone else's file with SB on");
+    } catch (IOException ioe) {
+      assertTrue(ioe instanceof AccessControlException);
+      assertTrue(ioe.getMessage().contains("sticky bit"));
+    }
+  }
+
+  /**
+   * Test that if a directory is created in a directory that has the sticky bit
+   * on, the new directory does not automatically get a sticky bit, as is
+   * standard Unix behavior
+   */
+  private void confirmStickyBitDoesntPropagate(FileSystem hdfs, Path baseDir)
+      throws IOException {
+    Path p = new Path(baseDir, "scissorsisters");
+
+    // Turn on its sticky bit
+    hdfs.mkdirs(p, new FsPermission((short) 01666));
+
+    // Create a subdirectory within it
+    Path p2 = new Path(p, "bar");
+    hdfs.mkdirs(p2);
+
+    // Ensure new directory doesn't have its sticky bit on
+    assertFalse(hdfs.getFileStatus(p2).getPermission().getStickyBit());
+  }
+
+  /**
+   * Test basic ability to get and set sticky bits on files and directories.
+   */
+  private void confirmSettingAndGetting(FileSystem hdfs, Path baseDir)
+      throws IOException {
+    Path p1 = new Path(baseDir, "roguetraders");
+
+    hdfs.mkdirs(p1);
+
+    // Initially sticky bit should not be set
+    assertFalse(hdfs.getFileStatus(p1).getPermission().getStickyBit());
+
+    // Same permission, but with sticky bit on
+    short withSB;
+    withSB = (short) (hdfs.getFileStatus(p1).getPermission().toShort() | 01000);
+
+    assertTrue((new FsPermission(withSB)).getStickyBit());
+
+    hdfs.setPermission(p1, new FsPermission(withSB));
+    assertTrue(hdfs.getFileStatus(p1).getPermission().getStickyBit());
+
+    // However, while you can set the sticky bit on files, it has no effect,
+    // following the linux/unix model:
+    //
+    // [user@host test]$ ls -alh
+    // -rw-r--r-- 1 user users 0 Dec 31 01:46 aFile
+    // [user@host test]$ chmod +t aFile
+    // [user@host test]$ ls -alh
+    // -rw-r--r-- 1 user users 0 Dec 31 01:46 aFile
+
+    // Write a file to the fs, try to set its sticky bit, expect to be ignored
+    Path f = new Path(baseDir, "somefile");
+    writeFile(hdfs, f);
+    assertFalse(hdfs.getFileStatus(f).getPermission().getStickyBit());
+
+    withSB = (short) (hdfs.getFileStatus(f).getPermission().toShort() | 01000);
+
+    hdfs.setPermission(f, new FsPermission(withSB));
+
+    assertFalse(hdfs.getFileStatus(f).getPermission().getStickyBit());
+  }
+
+  public void testGeneralSBBehavior() throws IOException {
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new Configuration();
+      conf.setBoolean("dfs.permissions", true);
+      conf.setBoolean("dfs.support.append", true);
+      cluster = new MiniDFSCluster(conf, 4, true, null);
+
+      FileSystem hdfs = cluster.getFileSystem();
+
+      assertTrue(hdfs instanceof DistributedFileSystem);
+
+      Path baseDir = new Path("/mcgann");
+      hdfs.mkdirs(baseDir);
+      confirmCanAppend(conf, hdfs, baseDir);
+
+      baseDir = new Path("/eccleston");
+      hdfs.mkdirs(baseDir);
+      confirmSettingAndGetting(hdfs, baseDir);
+
+      baseDir = new Path("/tennant");
+      hdfs.mkdirs(baseDir);
+      confirmDeletingFiles(conf, hdfs, baseDir);
+
+      baseDir = new Path("/smith");
+      hdfs.mkdirs(baseDir);
+      confirmStickyBitDoesntPropagate(hdfs, baseDir);
+
+    } finally {
+      if (cluster != null)
+        cluster.shutdown();
+    }
+  }
+
+  /**
+   * Test that one user can't rename/move another user's file when the sticky
+   * bit is set.
+   */
+  public void testMovingFiles() throws IOException, LoginException {
+    MiniDFSCluster cluster = null;
+
+    try {
+      // Set up cluster for testing
+      Configuration conf = new Configuration();
+      conf.setBoolean("dfs.permissions", true);
+      cluster = new MiniDFSCluster(conf, 4, true, null);
+      FileSystem hdfs = cluster.getFileSystem();
+
+      assertTrue(hdfs instanceof DistributedFileSystem);
+
+      // Create a tmp directory with wide-open permissions and sticky bit
+      Path tmpPath = new Path("/tmp");
+      Path tmpPath2 = new Path("/tmp2");
+      hdfs.mkdirs(tmpPath);
+      hdfs.mkdirs(tmpPath2);
+      hdfs.setPermission(tmpPath, new FsPermission((short) 01777));
+      hdfs.setPermission(tmpPath2, new FsPermission((short) 01777));
+
+      // Write a file to the new tmp directory as a regular user
+      Path file = new Path(tmpPath, "foo");
+
+      FileSystem hdfs2 = logonAs(user1, conf, hdfs);
+
+      writeFile(hdfs2, file);
+
+      // Log onto cluster as another user and attempt to move the file
+      FileSystem hdfs3 = logonAs(user2, conf, hdfs);
+
+      try {
+        hdfs3.rename(file, new Path(tmpPath2, "renamed"));
+        fail("Shouldn't be able to rename someone else's file with SB on");
+      } catch (IOException ioe) {
+        assertTrue(ioe instanceof AccessControlException);
+        assertTrue(ioe.getMessage().contains("sticky bit"));
+      }
+    } finally {
+      if (cluster != null)
+        cluster.shutdown();
+    }
+  }
+
+  /**
+   * Ensure that when we set a sticky bit and shut down the file system, we get
+   * the sticky bit back on re-start, and that no extra sticky bits appear after
+   * re-start.
+   */
+  public void testStickyBitPersistence() throws IOException {
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new Configuration();
+      conf.setBoolean("dfs.permissions", true);
+      cluster = new MiniDFSCluster(conf, 4, true, null);
+      FileSystem hdfs = cluster.getFileSystem();
+
+      assertTrue(hdfs instanceof DistributedFileSystem);
+
+      // A tale of three directories...
+      Path sbSet = new Path("/Housemartins");
+      Path sbNotSpecified = new Path("/INXS");
+      Path sbSetOff = new Path("/Easyworld");
+
+      for (Path p : new Path[] { sbSet, sbNotSpecified, sbSetOff })
+        hdfs.mkdirs(p);
+
+      // Two directories had there sticky bits set explicitly...
+      hdfs.setPermission(sbSet, new FsPermission((short) 01777));
+      hdfs.setPermission(sbSetOff, new FsPermission((short) 00777));
+
+      cluster.shutdown();
+
+      // Start file system up again
+      cluster = new MiniDFSCluster(conf, 4, false, null);
+      hdfs = cluster.getFileSystem();
+
+      assertTrue(hdfs.exists(sbSet));
+      assertTrue(hdfs.getFileStatus(sbSet).getPermission().getStickyBit());
+
+      assertTrue(hdfs.exists(sbNotSpecified));
+      assertFalse(hdfs.getFileStatus(sbNotSpecified).getPermission()
+          .getStickyBit());
+
+      assertTrue(hdfs.exists(sbSetOff));
+      assertFalse(hdfs.getFileStatus(sbSetOff).getPermission().getStickyBit());
+
+    } finally {
+      if (cluster != null)
+        cluster.shutdown();
+    }
+  }
+
+  /***
+   * Create a new configuration for the specified user and return a filesystem
+   * accessed by that user
+   */
+  static private FileSystem logonAs(UnixUserGroupInformation user,
+      Configuration conf, FileSystem hdfs) throws IOException {
+    Configuration conf2 = new Configuration(conf);
+    UnixUserGroupInformation.saveToConf(conf2,
+        UnixUserGroupInformation.UGI_PROPERTY_NAME, user);
+
+    return FileSystem.get(conf2);
+  }
+
+  /***
+   * Write a quick file to the specified file system at specified path
+   */
+  static private void writeFile(FileSystem hdfs, Path p) throws IOException {
+    FSDataOutputStream o = hdfs.create(p);
+    o.write("some file contents".getBytes());
+    o.close();
+  }
+}

+ 119 - 0
src/test/org/apache/hadoop/hdfs/AppendTestUtil.java

@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
+
+/** Utilities for append-related tests */ 
+class AppendTestUtil {
+  /** For specifying the random number generator seed,
+   *  change the following value:
+   */
+  static final Long RANDOM_NUMBER_GENERATOR_SEED = null;
+
+  static final Log LOG = LogFactory.getLog(AppendTestUtil.class);
+
+  private static final Random SEED = new Random();
+  static {
+    final long seed = RANDOM_NUMBER_GENERATOR_SEED == null?
+        SEED.nextLong(): RANDOM_NUMBER_GENERATOR_SEED;
+    LOG.info("seed=" + seed);
+    SEED.setSeed(seed);
+  }
+
+  private static final ThreadLocal<Random> RANDOM = new ThreadLocal<Random>() {
+    protected Random initialValue() {
+      final Random r =  new Random();
+      synchronized(SEED) { 
+        final long seed = SEED.nextLong();
+        r.setSeed(seed);
+        LOG.info(Thread.currentThread().getName() + ": seed=" + seed);
+      }
+      return r;
+    }
+  };
+  
+  static int nextInt() {return RANDOM.get().nextInt();}
+  static int nextInt(int n) {return RANDOM.get().nextInt(n);}
+  static int nextLong() {return RANDOM.get().nextInt();}
+
+  static byte[] randomBytes(long seed, int size) {
+    LOG.info("seed=" + seed + ", size=" + size);
+    final byte[] b = new byte[size];
+    final Random rand = new Random(seed);
+    rand.nextBytes(b);
+    return b;
+  }
+
+  static void sleep(long ms) {
+    try {
+      Thread.sleep(ms);
+    } catch (InterruptedException e) {
+      LOG.info("ms=" + ms, e);
+    }
+  }
+
+  static FileSystem createHdfsWithDifferentUsername(Configuration conf
+      ) throws IOException {
+    Configuration conf2 = new Configuration(conf);
+    String username = UserGroupInformation.getCurrentUGI().getUserName()+"_XXX";
+    UnixUserGroupInformation.saveToConf(conf2,
+        UnixUserGroupInformation.UGI_PROPERTY_NAME,
+        new UnixUserGroupInformation(username, new String[]{"supergroup"}));
+    return FileSystem.get(conf2);
+  }
+
+  static void write(OutputStream out, int offset, int length) throws IOException {
+    final byte[] bytes = new byte[length];
+    for(int i = 0; i < length; i++) {
+      bytes[i] = (byte)(offset + i);
+    }
+    out.write(bytes);
+  }
+  
+  static void check(FileSystem fs, Path p, long length) throws IOException {
+    int i = -1;
+    try {
+      final FileStatus status = fs.getFileStatus(p);
+      TestCase.assertEquals(length, status.getLen());
+      InputStream in = fs.open(p);
+      for(i++; i < length; i++) {
+        TestCase.assertEquals((byte)i, (byte)in.read());  
+      }
+      i = -(int)length;
+      TestCase.assertEquals(-1, in.read()); //EOF  
+      in.close();
+    } catch(IOException ioe) {
+      throw new IOException("p=" + p + ", length=" + length + ", i=" + i, ioe);
+    }
+  }
+}

+ 234 - 0
src/test/org/apache/hadoop/hdfs/BenchmarkThroughput.java

@@ -0,0 +1,234 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.ChecksumFileSystem;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+import org.apache.log4j.Level;
+
+/**
+ * This class benchmarks the performance of the local file system, raw local
+ * file system and HDFS at reading and writing files. The user should invoke
+ * the main of this class and optionally include a repetition count.
+ */
+public class BenchmarkThroughput extends Configured implements Tool {
+
+  // the property in the config that specifies a working directory
+  private LocalDirAllocator dir;
+  private long startTime;
+  // the size of the buffer to use
+  private int BUFFER_SIZE;
+
+  private void resetMeasurements() {
+    startTime = System.currentTimeMillis();
+  }
+
+  private void printMeasurements() {
+    System.out.println(" time: " +
+                       ((System.currentTimeMillis() - startTime)/1000));
+  }
+
+  private Path writeLocalFile(String name, Configuration conf,
+                                     long total) throws IOException {
+    Path path = dir.getLocalPathForWrite(name, total, conf);
+    System.out.print("Writing " + name);
+    resetMeasurements();
+    OutputStream out = new FileOutputStream(new File(path.toString()));
+    byte[] data = new byte[BUFFER_SIZE];
+    for(long size=0; size < total; size += BUFFER_SIZE) {
+      out.write(data);
+    }
+    out.close();
+    printMeasurements();
+    return path;
+  }
+
+  private void readLocalFile(Path path,
+                                    String name,
+                                    Configuration conf) throws IOException {
+    System.out.print("Reading " + name);
+    resetMeasurements();
+    InputStream in = new FileInputStream(new File(path.toString()));
+    byte[] data = new byte[BUFFER_SIZE];
+    long size = 0;
+    while (size >= 0) {
+      size = in.read(data);
+    }
+    in.close();
+    printMeasurements();
+  }
+
+  private void writeAndReadLocalFile(String name,
+                                            Configuration conf,
+                                            long size
+                                           ) throws IOException {
+    Path f = null;
+    try {
+      f = writeLocalFile(name, conf, size);
+      readLocalFile(f, name, conf);
+    } finally {
+      if (f != null) {
+        new File(f.toString()).delete();
+      }
+    }
+  }
+
+  private Path writeFile(FileSystem fs,
+                                String name,
+                                Configuration conf,
+                                long total
+                                ) throws IOException {
+    Path f = dir.getLocalPathForWrite(name, total, conf);
+    System.out.print("Writing " + name);
+    resetMeasurements();
+    OutputStream out = fs.create(f);
+    byte[] data = new byte[BUFFER_SIZE];
+    for(long size = 0; size < total; size += BUFFER_SIZE) {
+      out.write(data);
+    }
+    out.close();
+    printMeasurements();
+    return f;
+  }
+
+  private void readFile(FileSystem fs,
+                               Path f,
+                               String name,
+                               Configuration conf
+                               ) throws IOException {
+    System.out.print("Reading " + name);
+    resetMeasurements();
+    InputStream in = fs.open(f);
+    byte[] data = new byte[BUFFER_SIZE];
+    long val = 0;
+    while (val >= 0) {
+      val = in.read(data);
+    }
+    in.close();
+    printMeasurements();
+  }
+
+  private void writeAndReadFile(FileSystem fs,
+                                       String name,
+                                       Configuration conf,
+                                       long size
+                                       ) throws IOException {
+    Path f = null;
+    try {
+      f = writeFile(fs, name, conf, size);
+      readFile(fs, f, name, conf);
+    } finally {
+      try {
+        if (f != null) {
+          fs.delete(f, true);
+        }
+      } catch (IOException ie) {
+        // IGNORE
+      }
+    }
+  }
+
+  private static void printUsage() {
+    ToolRunner.printGenericCommandUsage(System.err);
+    System.err.println("Usage: dfsthroughput [#reps]");
+    System.err.println("Config properties:\n" +
+      "  dfsthroughput.file.size:\tsize of each write/read (10GB)\n" +
+      "  dfsthroughput.buffer.size:\tbuffer size for write/read (4k)\n");
+  }
+
+  public int run(String[] args) throws IOException {
+    // silence the minidfs cluster
+    Log hadoopLog = LogFactory.getLog("org");
+    if (hadoopLog instanceof Log4JLogger) {
+      ((Log4JLogger) hadoopLog).getLogger().setLevel(Level.WARN);
+    }
+    int reps = 1;
+    if (args.length == 1) {
+      try {
+        reps = Integer.parseInt(args[0]);
+      } catch (NumberFormatException e) {
+        printUsage();
+        return -1;
+      }
+    } else if (args.length > 1) {
+      printUsage();
+      return -1;
+    }
+    Configuration conf = getConf();
+    // the size of the file to write
+    long SIZE = conf.getLong("dfsthroughput.file.size",
+        10L * 1024 * 1024 * 1024);
+    BUFFER_SIZE = conf.getInt("dfsthroughput.buffer.size", 4 * 1024);
+
+    String localDir = conf.get("mapred.temp.dir");
+    dir = new LocalDirAllocator("mapred.temp.dir");
+
+    System.setProperty("test.build.data", localDir);
+    System.out.println("Local = " + localDir);
+    ChecksumFileSystem checkedLocal = FileSystem.getLocal(conf);
+    FileSystem rawLocal = checkedLocal.getRawFileSystem();
+    for(int i=0; i < reps; ++i) {
+      writeAndReadLocalFile("local", conf, SIZE);
+      writeAndReadFile(rawLocal, "raw", conf, SIZE);
+      writeAndReadFile(checkedLocal, "checked", conf, SIZE);
+    }
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster(conf, 1, true, new String[]{"/foo"});
+      cluster.waitActive();
+      FileSystem dfs = cluster.getFileSystem();
+      for(int i=0; i < reps; ++i) {
+        writeAndReadFile(dfs, "dfs", conf, SIZE);
+      }
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+        // clean up minidfs junk
+        rawLocal.delete(new Path(localDir, "dfs"), true);
+      }
+    }
+    return 0;
+  }
+
+  /**
+   * @param args
+   */
+  public static void main(String[] args) throws Exception {
+    int res = ToolRunner.run(new Configuration(),
+        new BenchmarkThroughput(), args);
+    System.exit(res);
+  }
+
+}

+ 284 - 0
src/test/org/apache/hadoop/hdfs/DFSTestUtil.java

@@ -0,0 +1,284 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import java.io.BufferedReader;
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.net.URL;
+import java.net.URLConnection;
+import java.util.Random;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
+
+/** Utilities for HDFS tests */
+public class DFSTestUtil {
+  
+  private static Random gen = new Random();
+  private static String[] dirNames = {
+    "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"
+  };
+  
+  private int maxLevels;// = 3;
+  private int maxSize;// = 8*1024;
+  private int nFiles;
+  private MyFile[] files;
+  
+  /** Creates a new instance of DFSTestUtil
+   *
+   * @param testName Name of the test from where this utility is used
+   * @param nFiles Number of files to be created
+   * @param maxLevels Maximum number of directory levels
+   * @param maxSize Maximum size for file
+   */
+  public DFSTestUtil(String testName, int nFiles, int maxLevels, int maxSize) {
+    this.nFiles = nFiles;
+    this.maxLevels = maxLevels;
+    this.maxSize = maxSize;
+  }
+  
+  /** class MyFile contains enough information to recreate the contents of
+   * a single file.
+   */
+  private class MyFile {
+    
+    private String name = "";
+    private int size;
+    private long seed;
+    
+    MyFile() {
+      int nLevels = gen.nextInt(maxLevels);
+      if (nLevels != 0) {
+        int[] levels = new int[nLevels];
+        for (int idx = 0; idx < nLevels; idx++) {
+          levels[idx] = gen.nextInt(10);
+        }
+        StringBuffer sb = new StringBuffer();
+        for (int idx = 0; idx < nLevels; idx++) {
+          sb.append(dirNames[levels[idx]]);
+          sb.append("/");
+        }
+        name = sb.toString();
+      }
+      long fidx = -1;
+      while (fidx < 0) { fidx = gen.nextLong(); }
+      name = name + Long.toString(fidx);
+      size = gen.nextInt(maxSize);
+      seed = gen.nextLong();
+    }
+    
+    String getName() { return name; }
+    int getSize() { return size; }
+    long getSeed() { return seed; }
+  }
+
+  public void createFiles(FileSystem fs, String topdir) throws IOException {
+    createFiles(fs, topdir, (short)3);
+  }
+  
+  /** create nFiles with random names and directory hierarchies
+   *  with random (but reproducible) data in them.
+   */
+  void createFiles(FileSystem fs, String topdir,
+                   short replicationFactor) throws IOException {
+    files = new MyFile[nFiles];
+    
+    for (int idx = 0; idx < nFiles; idx++) {
+      files[idx] = new MyFile();
+    }
+    
+    Path root = new Path(topdir);
+    
+    for (int idx = 0; idx < nFiles; idx++) {
+      createFile(fs, new Path(root, files[idx].getName()), files[idx].getSize(),
+          replicationFactor, files[idx].getSeed());
+    }
+  }
+  
+  public static void createFile(FileSystem fs, Path fileName, long fileLen, 
+      short replFactor, long seed) throws IOException {
+    if (!fs.mkdirs(fileName.getParent())) {
+      throw new IOException("Mkdirs failed to create " + 
+                            fileName.getParent().toString());
+    }
+    FSDataOutputStream out = null;
+    try {
+      out = fs.create(fileName, replFactor);
+      byte[] toWrite = new byte[1024];
+      Random rb = new Random(seed);
+      long bytesToWrite = fileLen;
+      while (bytesToWrite>0) {
+        rb.nextBytes(toWrite);
+        int bytesToWriteNext = (1024<bytesToWrite)?1024:(int)bytesToWrite;
+
+        out.write(toWrite, 0, bytesToWriteNext);
+        bytesToWrite -= bytesToWriteNext;
+      }
+      out.close();
+      out = null;
+    } finally {
+      IOUtils.closeStream(out);
+    }
+  }
+  
+  /** check if the files have been copied correctly. */
+  public boolean checkFiles(FileSystem fs, String topdir) throws IOException {
+    
+    //Configuration conf = new Configuration();
+    Path root = new Path(topdir);
+    
+    for (int idx = 0; idx < nFiles; idx++) {
+      Path fPath = new Path(root, files[idx].getName());
+      FSDataInputStream in = fs.open(fPath);
+      byte[] toRead = new byte[files[idx].getSize()];
+      byte[] toCompare = new byte[files[idx].getSize()];
+      Random rb = new Random(files[idx].getSeed());
+      rb.nextBytes(toCompare);
+      in.readFully(0, toRead);
+      in.close();
+      for (int i = 0; i < toRead.length; i++) {
+        if (toRead[i] != toCompare[i]) {
+          return false;
+        }
+      }
+      toRead = null;
+      toCompare = null;
+    }
+    
+    return true;
+  }
+
+  void setReplication(FileSystem fs, String topdir, short value) 
+                                              throws IOException {
+    Path root = new Path(topdir);
+    for (int idx = 0; idx < nFiles; idx++) {
+      Path fPath = new Path(root, files[idx].getName());
+      fs.setReplication(fPath, value);
+    }
+  }
+
+  // waits for the replication factor of all files to reach the
+  // specified target
+  //
+  public void waitReplication(FileSystem fs, String topdir, short value) 
+                                              throws IOException {
+    Path root = new Path(topdir);
+
+    /** wait for the replication factor to settle down */
+    for (int idx = 0; idx < nFiles; idx++) {
+      waitReplication(fs, new Path(root, files[idx].getName()), value);
+    }
+  }
+
+  /** return list of filenames created as part of createFiles */
+  public String[] getFileNames(String topDir) {
+    if (nFiles == 0)
+      return new String[]{};
+    else {
+      String[] fileNames =  new String[nFiles];
+      for (int idx=0; idx < nFiles; idx++) {
+        fileNames[idx] = topDir + "/" + files[idx].getName();
+      }
+      return fileNames;
+    }
+  }
+  
+  /** wait for the file's replication to be done */
+  public static void waitReplication(FileSystem fs, Path fileName, 
+      short replFactor)  throws IOException {
+    boolean good;
+    do {
+      good = true;
+      BlockLocation locs[] = fs.getFileBlockLocations(
+        fs.getFileStatus(fileName), 0, Long.MAX_VALUE);
+      for (int j = 0; j < locs.length; j++) {
+        String[] loc = locs[j].getHosts();
+        if (loc.length != replFactor) {
+          System.out.println("File " + fileName + " has replication factor " +
+              loc.length);
+          good = false;
+          try {
+            System.out.println("Waiting for replication factor to drain");
+            Thread.sleep(100);
+          } catch (InterruptedException e) {} 
+          break;
+        }
+      }
+    } while(!good);
+  }
+  
+  /** delete directory and everything underneath it.*/
+  public void cleanup(FileSystem fs, String topdir) throws IOException {
+    Path root = new Path(topdir);
+    fs.delete(root, true);
+    files = null;
+  }
+  
+  public static Block getFirstBlock(FileSystem fs, Path path) throws IOException {
+    DFSDataInputStream in = 
+      (DFSDataInputStream) ((DistributedFileSystem)fs).open(path);
+    in.readByte();
+    return in.getCurrentBlock();
+  }  
+
+  static void setLogLevel2All(org.apache.commons.logging.Log log) {
+    ((org.apache.commons.logging.impl.Log4JLogger)log
+        ).getLogger().setLevel(org.apache.log4j.Level.ALL);
+  }
+
+  static String readFile(File f) throws IOException {
+    StringBuilder b = new StringBuilder();
+    BufferedReader in = new BufferedReader(new FileReader(f));
+    for(int c; (c = in.read()) != -1; b.append((char)c));
+    in.close();      
+    return b.toString();
+  }
+
+  // Returns url content as string.
+  public static String urlGet(URL url) throws IOException {
+    URLConnection conn = url.openConnection();
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
+    IOUtils.copyBytes(conn.getInputStream(), out, 4096, true);
+    return out.toString();
+  }
+
+  static public Configuration getConfigurationWithDifferentUsername(Configuration conf
+      ) throws IOException {
+    final Configuration c = new Configuration(conf);
+    final UserGroupInformation ugi = UserGroupInformation.getCurrentUGI();
+    final String username = ugi.getUserName()+"_XXX";
+    final String[] groups = {ugi.getGroupNames()[0] + "_XXX"};
+    UnixUserGroupInformation.saveToConf(c,
+        UnixUserGroupInformation.UGI_PROPERTY_NAME,
+        new UnixUserGroupInformation(username, groups));
+    return c;
+  }
+}

+ 240 - 0
src/test/org/apache/hadoop/hdfs/DataNodeCluster.java

@@ -0,0 +1,240 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.net.UnknownHostException;
+import java.security.NoSuchAlgorithmException;
+import java.security.SecureRandom;
+import java.util.Random;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.hdfs.server.namenode.CreateEditsLog;
+import org.apache.hadoop.net.DNS;
+
+
+/**
+ * 
+
+ * 
+ * This program starts a mini cluster of data nodes
+ *  (ie a mini cluster without the name node), all within one address space.
+ *  It is assumed that the name node has been started separately prior
+ *  to running this program.
+ *  
+ *  A use case of this is to run a real name node with a large number of
+ *  simulated data nodes for say a NN benchmark.
+ *  
+ * Synopisis:
+ *   DataNodeCluster -n numDatNodes [-racks numRacks] -simulated
+ *              [-inject startingBlockId numBlocksPerDN]
+ *              [ -r replicationForInjectedBlocks ]
+ *              [-d editsLogDirectory]
+ *
+ * if -simulated is specified then simulated data nodes are started.
+ * if -inject is specified then blocks are injected in each datanode;
+ *    -inject option is valid only for simulated data nodes.
+ *    
+ *    See Also @link #CreateEditsLog for creating a edits log file to
+ *    inject a matching set of blocks into into a name node.
+ *    Typical use of -inject is to inject blocks into a set of datanodes
+ *    using this DataNodeCLuster command
+ *    and then to inject the same blocks into a name node using the
+ *    CreateEditsLog command.
+ *
+ */
+
+public class DataNodeCluster {
+  static final String DATANODE_DIRS = "/tmp/DataNodeCluster";
+  static String dataNodeDirs = DATANODE_DIRS;
+  static final String USAGE =
+    "Usage: datanodecluster " +
+    " -n <numDataNodes> " + 
+    " [-racks <numRacks>] " +
+    " [-simulated] " +
+    " [-inject startingBlockId numBlocksPerDN]" +
+    " [-r replicationFactorForInjectedBlocks]" +
+    " [-d dataNodeDirs]\n" + 
+    "      Default datanode direcory is " + DATANODE_DIRS + "\n" +
+    "      Default replication factor for injected blocks is 1\n" +
+    "      Defaul rack is used if -racks is not specified\n" +
+    "      Data nodes are simulated if -simulated OR conf file specifies simulated\n";
+  
+  
+  static void printUsageExit() {
+    System.out.println(USAGE);
+    System.exit(-1); 
+  }
+  static void printUsageExit(String err) {
+    System.out.println(err);
+    printUsageExit();
+  }
+  
+  public static void main(String[] args) {
+    int numDataNodes = 0;
+    int numRacks = 0;
+    boolean inject = false;
+    long startingBlockId = 1;
+    int numBlocksPerDNtoInject = 0;
+    int replication = 1;
+    
+    Configuration conf = new Configuration();
+
+    for (int i = 0; i < args.length; i++) { // parse command line
+      if (args[i].equals("-n")) {
+        if (++i >= args.length || args[i].startsWith("-")) {
+          printUsageExit("missing number of nodes");
+        }
+        numDataNodes = Integer.parseInt(args[i]);
+      } else if (args[i].equals("-racks")) {
+        if (++i >= args.length  || args[i].startsWith("-")) {
+          printUsageExit("Missing number of racks");
+        }
+        numRacks = Integer.parseInt(args[i]);
+      } else if (args[i].equals("-r")) {
+        if (++i >= args.length || args[i].startsWith("-")) {
+          printUsageExit("Missing replicaiton factor");
+        }
+        replication = Integer.parseInt(args[i]);
+      } else if (args[i].equals("-d")) {
+        if (++i >= args.length || args[i].startsWith("-")) {
+          printUsageExit("Missing datanode dirs parameter");
+        }
+        dataNodeDirs = args[i];
+      } else if (args[i].equals("-simulated")) {
+        conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+      } else if (args[i].equals("-inject")) {
+        if (!conf.getBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED,
+                                                                false) ) {
+          System.out.print("-inject is valid only for simulated");
+          printUsageExit(); 
+        }
+       inject = true;
+       if (++i >= args.length  || args[i].startsWith("-")) {
+         printUsageExit(
+             "Missing starting block and number of blocks per DN to inject");
+       }
+       startingBlockId = Integer.parseInt(args[i]);
+       if (++i >= args.length  || args[i].startsWith("-")) {
+         printUsageExit("Missing number of blocks to inject");
+       }
+       numBlocksPerDNtoInject = Integer.parseInt(args[i]);      
+      } else {
+        printUsageExit();
+      }
+    }
+    if (numDataNodes <= 0 || replication <= 0 ) {
+      printUsageExit("numDataNodes and replication have to be greater than zero");
+    }
+    if (replication > numDataNodes) {
+      printUsageExit("Replication must be less than or equal to numDataNodes");
+      
+    }
+    String nameNodeAdr = FileSystem.getDefaultUri(conf).getAuthority();
+    if (nameNodeAdr == null) {
+      System.out.println("No name node address and port in config");
+      System.exit(-1);
+    }
+    boolean simulated = 
+      conf.getBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, false);
+    System.out.println("Starting " + numDataNodes + 
+          (simulated ? " Simulated " : " ") +
+          " Data Nodes that will connect to Name Node at " + nameNodeAdr);
+  
+    System.setProperty("test.build.data", dataNodeDirs);
+
+    MiniDFSCluster mc = new MiniDFSCluster();
+    try {
+      mc.formatDataNodeDirs();
+    } catch (IOException e) {
+      System.out.println("Error formating data node dirs:" + e);
+    }
+
+    String[] rack4DataNode = null;
+    if (numRacks > 0) {
+      System.out.println("Using " + numRacks + " racks: ");
+      String rackPrefix = getUniqueRackPrefix();
+
+      rack4DataNode = new String[numDataNodes];
+      for (int i = 0; i < numDataNodes; ++i ) {
+        //rack4DataNode[i] = racks[i%numRacks];
+        rack4DataNode[i] = rackPrefix + "-" + i%numRacks;
+        System.out.println("Data Node " + i + " using " + rack4DataNode[i]);
+        
+        
+      }
+    }
+    try {
+      mc.startDataNodes(conf, numDataNodes, true, StartupOption.REGULAR,
+          rack4DataNode);
+      if (inject) {
+        long blockSize = 10;
+        System.out.println("Injecting " + numBlocksPerDNtoInject +
+            " blocks in each DN starting at blockId " + startingBlockId +
+            " with blocksize of " + blockSize);
+        Block[] blocks = new Block[numBlocksPerDNtoInject];
+        long blkid = startingBlockId;
+        for (int i_dn = 0; i_dn < numDataNodes; ++i_dn) {
+          for (int i = 0; i < blocks.length; ++i) {
+            blocks[i] = new Block(blkid++, blockSize,
+                CreateEditsLog.BLOCK_GENERATION_STAMP);
+          }
+          for (int i = 1; i <= replication; ++i) { 
+            // inject blocks for dn_i into dn_i and replica in dn_i's neighbors 
+            mc.injectBlocks((i_dn + i- 1)% numDataNodes, blocks);
+            System.out.println("Injecting blocks of dn " + i_dn  + " into dn" + 
+                ((i_dn + i- 1)% numDataNodes));
+          }
+        }
+        System.out.println("Created blocks from Bids " 
+            + startingBlockId + " to "  + (blkid -1));
+      }
+
+    } catch (IOException e) {
+      System.out.println("Error creating data node:" + e);
+    }  
+  }
+
+  /*
+   * There is high probability that the rack id generated here will 
+   * not conflict with those of other data node cluster.
+   * Not perfect but mostly unique rack ids are good enough
+   */
+  static private String getUniqueRackPrefix() {
+  
+    String ip = "unknownIP";
+    try {
+      ip = DNS.getDefaultIP("default");
+    } catch (UnknownHostException ignored) {
+      System.out.println("Could not find ip address of \"default\" inteface.");
+    }
+    
+    int rand = 0;
+    try {
+      rand = SecureRandom.getInstance("SHA1PRNG").nextInt(Integer.MAX_VALUE);
+    } catch (NoSuchAlgorithmException e) {
+      rand = (new Random()).nextInt(Integer.MAX_VALUE);
+    }
+    return "/Rack-" + rand + "-"+ ip  + "-" + 
+                      System.currentTimeMillis();
+  }
+}

+ 848 - 0
src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -0,0 +1,848 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.nio.channels.FileChannel;
+import java.util.Random;
+import java.io.RandomAccessFile;
+
+import javax.security.auth.login.LoginException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.*;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface;
+import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.security.*;
+import org.apache.hadoop.util.ToolRunner;
+
+/**
+ * This class creates a single-process DFS cluster for junit testing.
+ * The data directories for non-simulated DFS are under the testing directory.
+ * For simulated data nodes, no underlying fs storage is used.
+ */
+public class MiniDFSCluster {
+
+  public class DataNodeProperties {
+    DataNode datanode;
+    Configuration conf;
+    String[] dnArgs;
+
+    DataNodeProperties(DataNode node, Configuration conf, String[] args) {
+      this.datanode = node;
+      this.conf = conf;
+      this.dnArgs = args;
+    }
+  }
+
+  private Configuration conf;
+  private NameNode nameNode;
+  private int numDataNodes;
+  private ArrayList<DataNodeProperties> dataNodes = 
+                         new ArrayList<DataNodeProperties>();
+  private File base_dir;
+  private File data_dir;
+  
+  
+  /**
+   * This null constructor is used only when wishing to start a data node cluster
+   * without a name node (ie when the name node is started elsewhere).
+   */
+  public MiniDFSCluster() {
+  }
+  
+  /**
+   * Modify the config and start up the servers with the given operation.
+   * Servers will be started on free ports.
+   * <p>
+   * The caller must manage the creation of NameNode and DataNode directories
+   * and have already set dfs.name.dir and dfs.data.dir in the given conf.
+   * 
+   * @param conf the base configuration to use in starting the servers.  This
+   *          will be modified as necessary.
+   * @param numDataNodes Number of DataNodes to start; may be zero
+   * @param nameNodeOperation the operation with which to start the servers.  If null
+   *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
+   */
+  public MiniDFSCluster(Configuration conf,
+                        int numDataNodes,
+                        StartupOption nameNodeOperation) throws IOException {
+    this(0, conf, numDataNodes, false, false, false,  nameNodeOperation, 
+          null, null, null);
+  }
+  
+  /**
+   * Modify the config and start up the servers.  The rpc and info ports for
+   * servers are guaranteed to use free ports.
+   * <p>
+   * NameNode and DataNode directory creation and configuration will be
+   * managed by this class.
+   *
+   * @param conf the base configuration to use in starting the servers.  This
+   *          will be modified as necessary.
+   * @param numDataNodes Number of DataNodes to start; may be zero
+   * @param format if true, format the NameNode and DataNodes before starting up
+   * @param racks array of strings indicating the rack that each DataNode is on
+   */
+  public MiniDFSCluster(Configuration conf,
+                        int numDataNodes,
+                        boolean format,
+                        String[] racks) throws IOException {
+    this(0, conf, numDataNodes, format, true, true,  null, racks, null, null);
+  }
+  
+  /**
+   * Modify the config and start up the servers.  The rpc and info ports for
+   * servers are guaranteed to use free ports.
+   * <p>
+   * NameNode and DataNode directory creation and configuration will be
+   * managed by this class.
+   *
+   * @param conf the base configuration to use in starting the servers.  This
+   *          will be modified as necessary.
+   * @param numDataNodes Number of DataNodes to start; may be zero
+   * @param format if true, format the NameNode and DataNodes before starting up
+   * @param racks array of strings indicating the rack that each DataNode is on
+   * @param hosts array of strings indicating the hostname for each DataNode
+   */
+  public MiniDFSCluster(Configuration conf,
+                        int numDataNodes,
+                        boolean format,
+                        String[] racks, String[] hosts) throws IOException {
+    this(0, conf, numDataNodes, format, true, true, null, racks, hosts, null);
+  }
+  
+  /**
+   * NOTE: if possible, the other constructors that don't have nameNode port 
+   * parameter should be used as they will ensure that the servers use free ports.
+   * <p>
+   * Modify the config and start up the servers.  
+   * 
+   * @param nameNodePort suggestion for which rpc port to use.  caller should
+   *          use getNameNodePort() to get the actual port used.
+   * @param conf the base configuration to use in starting the servers.  This
+   *          will be modified as necessary.
+   * @param numDataNodes Number of DataNodes to start; may be zero
+   * @param format if true, format the NameNode and DataNodes before starting up
+   * @param manageDfsDirs if true, the data directories for servers will be
+   *          created and dfs.name.dir and dfs.data.dir will be set in the conf
+   * @param operation the operation with which to start the servers.  If null
+   *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
+   * @param racks array of strings indicating the rack that each DataNode is on
+   */
+  public MiniDFSCluster(int nameNodePort, 
+                        Configuration conf,
+                        int numDataNodes,
+                        boolean format,
+                        boolean manageDfsDirs,
+                        StartupOption operation,
+                        String[] racks) throws IOException {
+    this(nameNodePort, conf, numDataNodes, format, manageDfsDirs, manageDfsDirs,
+         operation, racks, null, null);
+  }
+
+  /**
+   * NOTE: if possible, the other constructors that don't have nameNode port 
+   * parameter should be used as they will ensure that the servers use free ports.
+   * <p>
+   * Modify the config and start up the servers.  
+   * 
+   * @param nameNodePort suggestion for which rpc port to use.  caller should
+   *          use getNameNodePort() to get the actual port used.
+   * @param conf the base configuration to use in starting the servers.  This
+   *          will be modified as necessary.
+   * @param numDataNodes Number of DataNodes to start; may be zero
+   * @param format if true, format the NameNode and DataNodes before starting up
+   * @param manageDfsDirs if true, the data directories for servers will be
+   *          created and dfs.name.dir and dfs.data.dir will be set in the conf
+   * @param operation the operation with which to start the servers.  If null
+   *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
+   * @param racks array of strings indicating the rack that each DataNode is on
+   * @param simulatedCapacities array of capacities of the simulated data nodes
+   */
+  public MiniDFSCluster(int nameNodePort, 
+                        Configuration conf,
+                        int numDataNodes,
+                        boolean format,
+                        boolean manageDfsDirs,
+                        StartupOption operation,
+                        String[] racks,
+                        long[] simulatedCapacities) throws IOException {
+    this(nameNodePort, conf, numDataNodes, format, manageDfsDirs, manageDfsDirs,
+          operation, racks, null, simulatedCapacities);
+  }
+  
+  /**
+   * NOTE: if possible, the other constructors that don't have nameNode port 
+   * parameter should be used as they will ensure that the servers use free ports.
+   * <p>
+   * Modify the config and start up the servers.  
+   * 
+   * @param nameNodePort suggestion for which rpc port to use.  caller should
+   *          use getNameNodePort() to get the actual port used.
+   * @param conf the base configuration to use in starting the servers.  This
+   *          will be modified as necessary.
+   * @param numDataNodes Number of DataNodes to start; may be zero
+   * @param format if true, format the NameNode and DataNodes before starting up
+   * @param manageNameDfsDirs if true, the data directories for servers will be
+   *          created and dfs.name.dir and dfs.data.dir will be set in the conf
+   * @param manageDataDfsDirs if true, the data directories for datanodes will
+   *          be created and dfs.data.dir set to same in the conf
+   * @param operation the operation with which to start the servers.  If null
+   *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
+   * @param racks array of strings indicating the rack that each DataNode is on
+   * @param hosts array of strings indicating the hostnames of each DataNode
+   * @param simulatedCapacities array of capacities of the simulated data nodes
+   */
+  public MiniDFSCluster(int nameNodePort, 
+                        Configuration conf,
+                        int numDataNodes,
+                        boolean format,
+                        boolean manageNameDfsDirs,
+                        boolean manageDataDfsDirs,
+                        StartupOption operation,
+                        String[] racks, String hosts[],
+                        long[] simulatedCapacities) throws IOException {
+    this.conf = conf;
+    try {
+      UserGroupInformation.setCurrentUser(UnixUserGroupInformation.login(conf));
+    } catch (LoginException e) {
+      IOException ioe = new IOException();
+      ioe.initCause(e);
+      throw ioe;
+    }
+    base_dir = new File(getBaseDirectory());
+    data_dir = new File(base_dir, "data");
+    
+    // Setup the NameNode configuration
+    FileSystem.setDefaultUri(conf, "hdfs://localhost:"+ Integer.toString(nameNodePort));
+    conf.set("dfs.http.address", "127.0.0.1:0");  
+    if (manageNameDfsDirs) {
+      conf.set("dfs.name.dir", new File(base_dir, "name1").getPath()+","+
+               new File(base_dir, "name2").getPath());
+      conf.set("fs.checkpoint.dir", new File(base_dir, "namesecondary1").
+                getPath()+"," + new File(base_dir, "namesecondary2").getPath());
+    }
+    
+    int replication = conf.getInt("dfs.replication", 3);
+    conf.setInt("dfs.replication", Math.min(replication, numDataNodes));
+    conf.setInt("dfs.safemode.extension", 0);
+    conf.setInt("dfs.namenode.decommission.interval", 3); // 3 second
+    
+    // Format and clean out DataNode directories
+    if (format) {
+      if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
+        throw new IOException("Cannot remove data directory: " + data_dir);
+      }
+      NameNode.format(conf); 
+    }
+    
+    // Start the NameNode
+    String[] args = (operation == null ||
+                     operation == StartupOption.FORMAT ||
+                     operation == StartupOption.REGULAR) ?
+      new String[] {} : new String[] {operation.getName()};
+    conf.setClass("topology.node.switch.mapping.impl", 
+                   StaticMapping.class, DNSToSwitchMapping.class);
+    nameNode = NameNode.createNameNode(args, conf);
+    
+    // Start the DataNodes
+    startDataNodes(conf, numDataNodes, manageDataDfsDirs, 
+                    operation, racks, hosts, simulatedCapacities);
+    waitClusterUp();
+  }
+
+  /**
+   * wait for the cluster to get out of 
+   * safemode.
+   */
+  public void waitClusterUp() {
+    if (numDataNodes > 0) {
+      while (!isClusterUp()) {
+        try {
+          System.err.println("Waiting for the Mini HDFS Cluster to start...");
+          Thread.sleep(1000);
+        } catch (InterruptedException e) {
+        }
+      }
+    }
+  }
+
+  /**
+   * Modify the config and start up additional DataNodes.  The info port for
+   * DataNodes is guaranteed to use a free port.
+   *  
+   *  Data nodes can run with the name node in the mini cluster or
+   *  a real name node. For example, running with a real name node is useful
+   *  when running simulated data nodes with a real name node.
+   *  If minicluster's name node is null assume that the conf has been
+   *  set with the right address:port of the name node.
+   *
+   * @param conf the base configuration to use in starting the DataNodes.  This
+   *          will be modified as necessary.
+   * @param numDataNodes Number of DataNodes to start; may be zero
+   * @param manageDfsDirs if true, the data directories for DataNodes will be
+   *          created and dfs.data.dir will be set in the conf
+   * @param operation the operation with which to start the DataNodes.  If null
+   *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
+   * @param racks array of strings indicating the rack that each DataNode is on
+   * @param hosts array of strings indicating the hostnames for each DataNode
+   * @param simulatedCapacities array of capacities of the simulated data nodes
+   *
+   * @throws IllegalStateException if NameNode has been shutdown
+   */
+  public synchronized void startDataNodes(Configuration conf, int numDataNodes, 
+                             boolean manageDfsDirs, StartupOption operation, 
+                             String[] racks, String[] hosts,
+                             long[] simulatedCapacities) throws IOException {
+
+    int curDatanodesNum = dataNodes.size();
+    // for mincluster's the default initialDelay for BRs is 0
+    if (conf.get("dfs.blockreport.initialDelay") == null) {
+      conf.setLong("dfs.blockreport.initialDelay", 0);
+    }
+    // If minicluster's name node is null assume that the conf has been
+    // set with the right address:port of the name node.
+    //
+    if (nameNode != null) { // set conf from the name node
+      InetSocketAddress nnAddr = nameNode.getNameNodeAddress(); 
+      int nameNodePort = nnAddr.getPort(); 
+      FileSystem.setDefaultUri(conf, 
+                               "hdfs://"+ nnAddr.getHostName() +
+                               ":" + Integer.toString(nameNodePort));
+    }
+    
+    if (racks != null && numDataNodes > racks.length ) {
+      throw new IllegalArgumentException( "The length of racks [" + racks.length
+          + "] is less than the number of datanodes [" + numDataNodes + "].");
+    }
+    if (hosts != null && numDataNodes > hosts.length ) {
+      throw new IllegalArgumentException( "The length of hosts [" + hosts.length
+          + "] is less than the number of datanodes [" + numDataNodes + "].");
+    }
+    //Generate some hostnames if required
+    if (racks != null && hosts == null) {
+      System.out.println("Generating host names for datanodes");
+      hosts = new String[numDataNodes];
+      for (int i = curDatanodesNum; i < curDatanodesNum + numDataNodes; i++) {
+        hosts[i - curDatanodesNum] = "host" + i + ".foo.com";
+      }
+    }
+
+    if (simulatedCapacities != null 
+        && numDataNodes > simulatedCapacities.length) {
+      throw new IllegalArgumentException( "The length of simulatedCapacities [" 
+          + simulatedCapacities.length
+          + "] is less than the number of datanodes [" + numDataNodes + "].");
+    }
+
+    // Set up the right ports for the datanodes
+    conf.set("dfs.datanode.address", "127.0.0.1:0");
+    conf.set("dfs.datanode.http.address", "127.0.0.1:0");
+    conf.set("dfs.datanode.ipc.address", "127.0.0.1:0");
+    
+
+    String [] dnArgs = (operation == null ||
+                        operation != StartupOption.ROLLBACK) ?
+        null : new String[] {operation.getName()};
+    
+    
+    for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; i++) {
+      Configuration dnConf = new Configuration(conf);
+      if (manageDfsDirs) {
+        File dir1 = new File(data_dir, "data"+(2*i+1));
+        File dir2 = new File(data_dir, "data"+(2*i+2));
+        dir1.mkdirs();
+        dir2.mkdirs();
+        if (!dir1.isDirectory() || !dir2.isDirectory()) { 
+          throw new IOException("Mkdirs failed to create directory for DataNode "
+                                + i + ": " + dir1 + " or " + dir2);
+        }
+        dnConf.set("dfs.data.dir", dir1.getPath() + "," + dir2.getPath()); 
+      }
+      if (simulatedCapacities != null) {
+        dnConf.setBoolean("dfs.datanode.simulateddatastorage", true);
+        dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY,
+            simulatedCapacities[i-curDatanodesNum]);
+      }
+      System.out.println("Starting DataNode " + i + " with dfs.data.dir: " 
+                         + dnConf.get("dfs.data.dir"));
+      if (hosts != null) {
+        dnConf.set("slave.host.name", hosts[i - curDatanodesNum]);
+        System.out.println("Starting DataNode " + i + " with hostname set to: " 
+                           + dnConf.get("slave.host.name"));
+      }
+      if (racks != null) {
+        String name = hosts[i - curDatanodesNum];
+        System.out.println("Adding node with hostname : " + name + " to rack "+
+                            racks[i-curDatanodesNum]);
+        StaticMapping.addNodeToRack(name,
+                                    racks[i-curDatanodesNum]);
+      }
+      Configuration newconf = new Configuration(dnConf); // save config
+      if (hosts != null) {
+        NetUtils.addStaticResolution(hosts[i - curDatanodesNum], "localhost");
+      }
+      DataNode dn = DataNode.instantiateDataNode(dnArgs, dnConf);
+      //since the HDFS does things based on IP:port, we need to add the mapping
+      //for IP:port to rackId
+      String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();
+      if (racks != null) {
+        int port = dn.getSelfAddr().getPort();
+        System.out.println("Adding node with IP:port : " + ipAddr + ":" + port+
+                            " to rack " + racks[i-curDatanodesNum]);
+        StaticMapping.addNodeToRack(ipAddr + ":" + port,
+                                  racks[i-curDatanodesNum]);
+      }
+      DataNode.runDatanodeDaemon(dn);
+      dataNodes.add(new DataNodeProperties(dn, newconf, dnArgs));
+    }
+    curDatanodesNum += numDataNodes;
+    this.numDataNodes += numDataNodes;
+    waitActive();
+  }
+  
+  
+  
+  /**
+   * Modify the config and start up the DataNodes.  The info port for
+   * DataNodes is guaranteed to use a free port.
+   *
+   * @param conf the base configuration to use in starting the DataNodes.  This
+   *          will be modified as necessary.
+   * @param numDataNodes Number of DataNodes to start; may be zero
+   * @param manageDfsDirs if true, the data directories for DataNodes will be
+   *          created and dfs.data.dir will be set in the conf
+   * @param operation the operation with which to start the DataNodes.  If null
+   *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
+   * @param racks array of strings indicating the rack that each DataNode is on
+   *
+   * @throws IllegalStateException if NameNode has been shutdown
+   */
+  
+  public void startDataNodes(Configuration conf, int numDataNodes, 
+      boolean manageDfsDirs, StartupOption operation, 
+      String[] racks
+      ) throws IOException {
+    startDataNodes( conf,  numDataNodes, manageDfsDirs,  operation, racks, null, null);
+  }
+  
+  /**
+   * Modify the config and start up additional DataNodes.  The info port for
+   * DataNodes is guaranteed to use a free port.
+   *  
+   *  Data nodes can run with the name node in the mini cluster or
+   *  a real name node. For example, running with a real name node is useful
+   *  when running simulated data nodes with a real name node.
+   *  If minicluster's name node is null assume that the conf has been
+   *  set with the right address:port of the name node.
+   *
+   * @param conf the base configuration to use in starting the DataNodes.  This
+   *          will be modified as necessary.
+   * @param numDataNodes Number of DataNodes to start; may be zero
+   * @param manageDfsDirs if true, the data directories for DataNodes will be
+   *          created and dfs.data.dir will be set in the conf
+   * @param operation the operation with which to start the DataNodes.  If null
+   *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
+   * @param racks array of strings indicating the rack that each DataNode is on
+   * @param simulatedCapacities array of capacities of the simulated data nodes
+   *
+   * @throws IllegalStateException if NameNode has been shutdown
+   */
+  public void startDataNodes(Configuration conf, int numDataNodes, 
+                             boolean manageDfsDirs, StartupOption operation, 
+                             String[] racks,
+                             long[] simulatedCapacities) throws IOException {
+    startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks, null,
+                   simulatedCapacities);
+    
+  }
+  /**
+   * If the NameNode is running, attempt to finalize a previous upgrade.
+   * When this method return, the NameNode should be finalized, but
+   * DataNodes may not be since that occurs asynchronously.
+   *
+   * @throws IllegalStateException if the Namenode is not running.
+   */
+  public void finalizeCluster(Configuration conf) throws Exception {
+    if (nameNode == null) {
+      throw new IllegalStateException("Attempting to finalize "
+                                      + "Namenode but it is not running");
+    }
+    ToolRunner.run(new DFSAdmin(conf), new String[] {"-finalizeUpgrade"});
+  }
+  
+  /**
+   * Gets the started NameNode.  May be null.
+   */
+  public NameNode getNameNode() {
+    return nameNode;
+  }
+  
+  /**
+   * Return the {@link FSNamesystem} object.
+   * @return {@link FSNamesystem} object.
+   */
+  public FSNamesystem getNamesystem() {
+    return nameNode.getNamesystem();
+  }
+
+  /**
+   * Gets a list of the started DataNodes.  May be empty.
+   */
+  public ArrayList<DataNode> getDataNodes() {
+    ArrayList<DataNode> list = new ArrayList<DataNode>();
+    for (int i = 0; i < dataNodes.size(); i++) {
+      DataNode node = dataNodes.get(i).datanode;
+      list.add(node);
+    }
+    return list;
+  }
+  
+  /** @return the datanode having the ipc server listen port */
+  public DataNode getDataNode(int ipcPort) {
+    for(DataNode dn : getDataNodes()) {
+      if (dn.ipcServer.getListenerAddress().getPort() == ipcPort) {
+        return dn;
+      }
+    }
+    return null;
+  }
+
+  /**
+   * Gets the rpc port used by the NameNode, because the caller 
+   * supplied port is not necessarily the actual port used.
+   */     
+  public int getNameNodePort() {
+    return nameNode.getNameNodeAddress().getPort();
+  }
+    
+  /**
+   * Shut down the servers that are up.
+   */
+  public void shutdown() {
+    System.out.println("Shutting down the Mini HDFS Cluster");
+    shutdownDataNodes();
+    if (nameNode != null) {
+      nameNode.stop();
+      nameNode.join();
+      nameNode = null;
+    }
+  }
+  
+  /**
+   * Shutdown all DataNodes started by this class.  The NameNode
+   * is left running so that new DataNodes may be started.
+   */
+  public void shutdownDataNodes() {
+    for (int i = dataNodes.size()-1; i >= 0; i--) {
+      System.out.println("Shutting down DataNode " + i);
+      DataNode dn = dataNodes.remove(i).datanode;
+      dn.shutdown();
+      numDataNodes--;
+    }
+  }
+
+  /*
+   * Corrupt a block on all datanode
+   */
+  void corruptBlockOnDataNodes(String blockName) throws Exception{
+    for (int i=0; i < dataNodes.size(); i++)
+      corruptBlockOnDataNode(i,blockName);
+  }
+
+  /*
+   * Corrupt a block on a particular datanode
+   */
+  boolean corruptBlockOnDataNode(int i, String blockName) throws Exception {
+    Random random = new Random();
+    boolean corrupted = false;
+    File dataDir = new File(getBaseDirectory() + "data");
+    if (i < 0 || i >= dataNodes.size())
+      return false;
+    for (int dn = i*2; dn < i*2+2; dn++) {
+      File blockFile = new File(dataDir, "data" + (dn+1) + "/current/" +
+                                blockName);
+      System.out.println("Corrupting for: " + blockFile);
+      if (blockFile.exists()) {
+        // Corrupt replica by writing random bytes into replica
+        RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
+        FileChannel channel = raFile.getChannel();
+        String badString = "BADBAD";
+        int rand = random.nextInt((int)channel.size()/2);
+        raFile.seek(rand);
+        raFile.write(badString.getBytes());
+        raFile.close();
+      }
+      corrupted = true;
+    }
+    return corrupted;
+  }
+
+  /*
+   * Shutdown a particular datanode
+   */
+  public DataNodeProperties stopDataNode(int i) {
+    if (i < 0 || i >= dataNodes.size()) {
+      return null;
+    }
+    DataNodeProperties dnprop = dataNodes.remove(i);
+    DataNode dn = dnprop.datanode;
+    System.out.println("MiniDFSCluster Stopping DataNode " + 
+                       dn.dnRegistration.getName() +
+                       " from a total of " + (dataNodes.size() + 1) + 
+                       " datanodes.");
+    dn.shutdown();
+    numDataNodes--;
+    return dnprop;
+  }
+
+  /**
+   * Restart a datanode
+   * @param dnprop datanode's property
+   * @return true if restarting is successful
+   * @throws IOException
+   */
+  public synchronized boolean restartDataNode(DataNodeProperties dnprop)
+  throws IOException {
+    Configuration conf = dnprop.conf;
+    String[] args = dnprop.dnArgs;
+    Configuration newconf = new Configuration(conf); // save cloned config
+    dataNodes.add(new DataNodeProperties(
+                     DataNode.createDataNode(args, conf), 
+                     newconf, args));
+    numDataNodes++;
+    return true;
+
+  }
+  /*
+   * Restart a particular datanode
+   */
+  public synchronized boolean restartDataNode(int i) throws IOException {
+    DataNodeProperties dnprop = stopDataNode(i);
+    if (dnprop == null) {
+      return false;
+    } else {
+      return restartDataNode(dnprop);
+    }
+  }
+
+  /*
+   * Shutdown a datanode by name.
+   */
+  public synchronized DataNodeProperties stopDataNode(String name) {
+    int i;
+    for (i = 0; i < dataNodes.size(); i++) {
+      DataNode dn = dataNodes.get(i).datanode;
+      if (dn.dnRegistration.getName().equals(name)) {
+        break;
+      }
+    }
+    return stopDataNode(i);
+  }
+  
+  /**
+   * Returns true if the NameNode is running and is out of Safe Mode.
+   */
+  public boolean isClusterUp() {
+    if (nameNode == null) {
+      return false;
+    }
+    long[] sizes = nameNode.getStats();
+    boolean isUp = false;
+    synchronized (this) {
+      isUp = (!nameNode.isInSafeMode() && sizes[0] != 0);
+    }
+    return isUp;
+  }
+  
+  /**
+   * Returns true if there is at least one DataNode running.
+   */
+  public boolean isDataNodeUp() {
+    if (dataNodes == null || dataNodes.size() == 0) {
+      return false;
+    }
+    return true;
+  }
+  
+  /**
+   * Get a client handle to the DFS cluster.
+   */
+  public FileSystem getFileSystem() throws IOException {
+    return FileSystem.get(conf);
+  }
+
+  /**
+   * Get the directories where the namenode stores its image.
+   */
+  public Collection<File> getNameDirs() {
+    return FSNamesystem.getNamespaceDirs(conf);
+  }
+
+  /**
+   * Get the directories where the namenode stores its edits.
+   */
+  public Collection<File> getNameEditsDirs() {
+    return FSNamesystem.getNamespaceEditsDirs(conf);
+  }
+
+  /**
+   * Wait until the cluster is active and running.
+   */
+  public void waitActive() throws IOException {
+    if (nameNode == null) {
+      return;
+    }
+    InetSocketAddress addr = new InetSocketAddress("localhost",
+                                                   getNameNodePort());
+    DFSClient client = new DFSClient(addr, conf);
+
+    // make sure all datanodes are alive
+    while(client.datanodeReport(DatanodeReportType.LIVE).length
+        != numDataNodes) {
+      try {
+        Thread.sleep(500);
+      } catch (Exception e) {
+      }
+    }
+
+    client.close();
+  }
+  
+  public void formatDataNodeDirs() throws IOException {
+    base_dir = new File(getBaseDirectory());
+    data_dir = new File(base_dir, "data");
+    if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
+      throw new IOException("Cannot remove data directory: " + data_dir);
+    }
+  }
+  
+  /**
+   * 
+   * @param dataNodeIndex - data node whose block report is desired - the index is same as for getDataNodes()
+   * @return the block report for the specified data node
+   */
+  public Block[] getBlockReport(int dataNodeIndex) {
+    if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
+      throw new IndexOutOfBoundsException();
+    }
+    return dataNodes.get(dataNodeIndex).datanode.getFSDataset().getBlockReport();
+  }
+  
+  
+  /**
+   * 
+   * @return block reports from all data nodes
+   *    Block[] is indexed in the same order as the list of datanodes returned by getDataNodes()
+   */
+  public Block[][] getAllBlockReports() {
+    int numDataNodes = dataNodes.size();
+    Block[][] result = new Block[numDataNodes][];
+    for (int i = 0; i < numDataNodes; ++i) {
+     result[i] = getBlockReport(i);
+    }
+    return result;
+  }
+  
+  
+  /**
+   * This method is valid only if the data nodes have simulated data
+   * @param dataNodeIndex - data node i which to inject - the index is same as for getDataNodes()
+   * @param blocksToInject - the blocks
+   * @throws IOException
+   *              if not simulatedFSDataset
+   *             if any of blocks already exist in the data node
+   *   
+   */
+  public void injectBlocks(int dataNodeIndex, Block[] blocksToInject) throws IOException {
+    if (dataNodeIndex < 0 || dataNodeIndex > dataNodes.size()) {
+      throw new IndexOutOfBoundsException();
+    }
+    FSDatasetInterface dataSet = dataNodes.get(dataNodeIndex).datanode.getFSDataset();
+    if (!(dataSet instanceof SimulatedFSDataset)) {
+      throw new IOException("injectBlocks is valid only for SimilatedFSDataset");
+    }
+    SimulatedFSDataset sdataset = (SimulatedFSDataset) dataSet;
+    sdataset.injectBlocks(blocksToInject);
+    dataNodes.get(dataNodeIndex).datanode.scheduleBlockReport(0);
+  }
+  
+  /**
+   * This method is valid only if the data nodes have simulated data
+   * @param blocksToInject - blocksToInject[] is indexed in the same order as the list 
+   *             of datanodes returned by getDataNodes()
+   * @throws IOException
+   *             if not simulatedFSDataset
+   *             if any of blocks already exist in the data nodes
+   *             Note the rest of the blocks are not injected.
+   */
+  public void injectBlocks(Block[][] blocksToInject) throws IOException {
+    if (blocksToInject.length >  dataNodes.size()) {
+      throw new IndexOutOfBoundsException();
+    }
+    for (int i = 0; i < blocksToInject.length; ++i) {
+     injectBlocks(i, blocksToInject[i]);
+    }
+  }
+
+  /**
+   * Set the softLimit and hardLimit of client lease periods
+   */
+  void setLeasePeriod(long soft, long hard) {
+    final FSNamesystem namesystem = nameNode.getNamesystem();
+    namesystem.leaseManager.setLeasePeriod(soft, hard);
+    namesystem.lmthread.interrupt();
+  }
+
+  /**
+   * Returns the current set of datanodes
+   */
+  DataNode[] listDataNodes() {
+    DataNode[] list = new DataNode[dataNodes.size()];
+    for (int i = 0; i < dataNodes.size(); i++) {
+      list[i] = dataNodes.get(i).datanode;
+    }
+    return list;
+  }
+
+  /**
+   * Access to the data directory used for Datanodes
+   * @throws IOException 
+   */
+  public String getDataDirectory() {
+    return data_dir.getAbsolutePath();
+  }
+
+  public static String getBaseDirectory() {
+    return System.getProperty("test.build.data", "build/test/data") + "/dfs/";
+  }
+}

+ 70 - 0
src/test/org/apache/hadoop/hdfs/TestAbandonBlock.java

@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.util.StringUtils;
+
+public class TestAbandonBlock extends junit.framework.TestCase {
+  public static final Log LOG = LogFactory.getLog(TestAbandonBlock.class);
+  
+  private static final Configuration CONF = new Configuration();
+  static final String FILE_NAME_PREFIX
+      = "/" + TestAbandonBlock.class.getSimpleName() + "_"; 
+
+  public void testAbandonBlock() throws IOException {
+    MiniDFSCluster cluster = new MiniDFSCluster(CONF, 2, true, null);
+    FileSystem fs = cluster.getFileSystem();
+
+    String src = FILE_NAME_PREFIX + "foo";
+    FSDataOutputStream fout = null;
+    try {
+      //start writing a a file but not close it
+      fout = fs.create(new Path(src), true, 4096, (short)1, 512L);
+      for(int i = 0; i < 1024; i++) {
+        fout.write(123);
+      }
+      fout.sync();
+  
+      //try reading the block by someone
+      DFSClient dfsclient = new DFSClient(CONF);
+      LocatedBlocks blocks = dfsclient.namenode.getBlockLocations(src, 0, 1);
+      LocatedBlock b = blocks.get(0); 
+      try {
+        dfsclient.namenode.abandonBlock(b.getBlock(), src, "someone");
+        //previous line should throw an exception.
+        assertTrue(false);
+      }
+      catch(IOException ioe) {
+        LOG.info("GREAT! " + StringUtils.stringifyException(ioe));
+      }
+    }
+    finally {
+      try{fout.close();} catch(Exception e) {}
+      try{fs.close();} catch(Exception e) {}
+      try{cluster.shutdown();} catch(Exception e) {}
+    }
+  }
+}

+ 64 - 0
src/test/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java

@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.util.ArrayList;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSClient.DFSOutputStream;
+import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+import junit.framework.TestCase;
+
+/**
+ * This class tests DatanodeDescriptor.getBlocksScheduled() at the
+ * NameNode. This counter is supposed to keep track of blocks currently
+ * scheduled to a datanode.
+ */
+public class TestBlocksScheduledCounter extends TestCase {
+
+  public void testBlocksScheduledCounter() throws IOException {
+    
+    MiniDFSCluster cluster = new MiniDFSCluster(new Configuration(), 1, 
+                                                true, null);
+    cluster.waitActive();
+    FileSystem fs = cluster.getFileSystem();
+    
+    //open a file an write a few bytes:
+    FSDataOutputStream out = fs.create(new Path("/testBlockScheduledCounter"));
+    for (int i=0; i<1024; i++) {
+      out.write(i);
+    }
+    // flush to make sure a block is allocated.
+    ((DFSOutputStream)(out.getWrappedStream())).sync();
+    
+    ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
+    cluster.getNamesystem().DFSNodesStatus(dnList, dnList);
+    DatanodeDescriptor dn = dnList.get(0);
+    
+    assertEquals(1, dn.getBlocksScheduled());
+   
+    // close the file and the counter should go to zero.
+    out.close();   
+    assertEquals(0, dn.getBlocksScheduled());
+  }
+}

+ 225 - 0
src/test/org/apache/hadoop/hdfs/TestCrcCorruption.java

@@ -0,0 +1,225 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import java.io.File;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+
+/**
+ * A JUnit test for corrupted file handling.
+ * This test creates a bunch of files/directories with replication 
+ * factor of 2. Then verifies that a client can automatically 
+ * access the remaining valid replica inspite of the following 
+ * types of simulated errors:
+ *
+ *  1. Delete meta file on one replica
+ *  2. Truncates meta file on one replica
+ *  3. Corrupts the meta file header on one replica
+ *  4. Corrupts any random offset and portion of the meta file
+ *  5. Swaps two meta files, i.e the format of the meta files 
+ *     are valid but their CRCs do not match with their corresponding 
+ *     data blocks
+ * The above tests are run for varied values of io.bytes.per.checksum 
+ * and dfs.block.size. It tests for the case when the meta file is 
+ * multiple blocks.
+ *
+ * Another portion of the test is commented out till HADOOP-1557 
+ * is addressed:
+ *  1. Create file with 2 replica, corrupt the meta file of replica, 
+ *     decrease replication factor from 2 to 1. Validate that the 
+ *     remaining replica is the good one.
+ *  2. Create file with 2 replica, corrupt the meta file of one replica, 
+ *     increase replication factor of file to 3. verify that the new 
+ *     replica was created from the non-corrupted replica.
+ */
+public class TestCrcCorruption extends TestCase {
+  
+  public TestCrcCorruption(String testName) {
+    super(testName);
+  }
+
+  protected void setUp() throws Exception {
+  }
+
+  protected void tearDown() throws Exception {
+  }
+  
+  /** 
+   * check if DFS can handle corrupted CRC blocks
+   */
+  private void thistest(Configuration conf, DFSTestUtil util) throws Exception {
+    MiniDFSCluster cluster = null;
+    int numDataNodes = 2;
+    short replFactor = 2;
+    Random random = new Random();
+
+    try {
+      cluster = new MiniDFSCluster(conf, numDataNodes, true, null);
+      cluster.waitActive();
+      FileSystem fs = cluster.getFileSystem();
+      util.createFiles(fs, "/srcdat", replFactor);
+      util.waitReplication(fs, "/srcdat", (short)2);
+
+      // Now deliberately remove/truncate meta blocks from the first
+      // directory of the first datanode. The complete absense of a meta
+      // file disallows this Datanode to send data to another datanode.
+      // However, a client is alowed access to this block.
+      //
+      File data_dir = new File(System.getProperty("test.build.data"),
+                               "dfs/data/data1/current");
+      assertTrue("data directory does not exist", data_dir.exists());
+      File[] blocks = data_dir.listFiles();
+      assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
+      int num = 0;
+      for (int idx = 0; idx < blocks.length; idx++) {
+        if (blocks[idx].getName().startsWith("blk_") &&
+            blocks[idx].getName().endsWith(".meta")) {
+          num++;
+          if (num % 3 == 0) {
+            //
+            // remove .meta file
+            //
+            System.out.println("Deliberately removing file " + blocks[idx].getName());
+            assertTrue("Cannot remove file.", blocks[idx].delete());
+          } else if (num % 3 == 1) {
+            //
+            // shorten .meta file
+            //
+            RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw");
+            FileChannel channel = file.getChannel();
+            int newsize = random.nextInt((int)channel.size()/2);
+            System.out.println("Deliberately truncating file " + 
+                               blocks[idx].getName() + 
+                               " to size " + newsize + " bytes.");
+            channel.truncate(newsize);
+            file.close();
+          } else {
+            //
+            // corrupt a few bytes of the metafile
+            //
+            RandomAccessFile file = new RandomAccessFile(blocks[idx], "rw");
+            FileChannel channel = file.getChannel();
+            long position = 0;
+            //
+            // The very first time, corrupt the meta header at offset 0
+            //
+            if (num != 2) {
+              position = (long)random.nextInt((int)channel.size());
+            }
+            int length = random.nextInt((int)(channel.size() - position + 1));
+            byte[] buffer = new byte[length];
+            random.nextBytes(buffer);
+            channel.write(ByteBuffer.wrap(buffer), position);
+            System.out.println("Deliberately corrupting file " + 
+                               blocks[idx].getName() + 
+                               " at offset " + position +
+                               " length " + length);
+            file.close();
+          }
+        }
+      }
+      //
+      // Now deliberately corrupt all meta blocks from the second
+      // directory of the first datanode
+      //
+      data_dir = new File(System.getProperty("test.build.data"),
+                               "dfs/data/data2/current");
+      assertTrue("data directory does not exist", data_dir.exists());
+      blocks = data_dir.listFiles();
+      assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
+
+      int count = 0;
+      File previous = null;
+      for (int idx = 0; idx < blocks.length; idx++) {
+        if (blocks[idx].getName().startsWith("blk_") &&
+            blocks[idx].getName().endsWith(".meta")) {
+          //
+          // Move the previous metafile into the current one.
+          //
+          count++;
+          if (count % 2 == 0) {
+            System.out.println("Deliberately insertimg bad crc into files " +
+                                blocks[idx].getName() + " " + previous.getName());
+            assertTrue("Cannot remove file.", blocks[idx].delete());
+            assertTrue("Cannot corrupt meta file.", previous.renameTo(blocks[idx]));
+            assertTrue("Cannot recreate empty meta file.", previous.createNewFile());
+            previous = null;
+          } else {
+            previous = blocks[idx];
+          }
+        }
+      }
+
+      //
+      // Only one replica is possibly corrupted. The other replica should still
+      // be good. Verify.
+      //
+      assertTrue("Corrupted replicas not handled properly.",
+                 util.checkFiles(fs, "/srcdat"));
+      System.out.println("All File still have a valid replica");
+
+      //
+      // set replication factor back to 1. This causes only one replica of
+      // of each block to remain in HDFS. The check is to make sure that 
+      // the corrupted replica generated above is the one that gets deleted.
+      // This test is currently disabled until HADOOP-1557 is solved.
+      //
+      util.setReplication(fs, "/srcdat", (short)1);
+      //util.waitReplication(fs, "/srcdat", (short)1);
+      //System.out.println("All Files done with removing replicas");
+      //assertTrue("Excess replicas deleted. Corrupted replicas found.",
+      //           util.checkFiles(fs, "/srcdat"));
+      System.out.println("The excess-corrupted-replica test is disabled " +
+                         " pending HADOOP-1557");
+
+      util.cleanup(fs, "/srcdat");
+    } finally {
+      if (cluster != null) { cluster.shutdown(); }
+    }
+  }
+
+  public void testCrcCorruption() throws Exception {
+    //
+    // default parameters
+    //
+    System.out.println("TestCrcCorruption with default parameters");
+    Configuration conf1 = new Configuration();
+    conf1.setInt("dfs.blockreport.intervalMsec", 3 * 1000);
+    DFSTestUtil util1 = new DFSTestUtil("TestCrcCorruption", 40, 3, 8*1024);
+    thistest(conf1, util1);
+
+    //
+    // specific parameters
+    //
+    System.out.println("TestCrcCorruption with specific parameters");
+    Configuration conf2 = new Configuration();
+    conf2.setInt("io.bytes.per.checksum", 17);
+    conf2.setInt("dfs.block.size", 34);
+    DFSTestUtil util2 = new DFSTestUtil("TestCrcCorruption", 40, 3, 400);
+    thistest(conf2, util2);
+  }
+}

+ 100 - 0
src/test/org/apache/hadoop/hdfs/TestDFSClientRetries.java

@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
+
+import junit.framework.TestCase;
+
+
+/**
+ * These tests make sure that DFSClient retries fetching data from DFS
+ * properly in case of errors.
+ */
+public class TestDFSClientRetries extends TestCase {
+  
+  // writes 'len' bytes of data to out.
+  private static void writeData(OutputStream out, int len) throws IOException {
+    byte [] buf = new byte[4096*16];
+    while(len > 0) {
+      int toWrite = Math.min(len, buf.length);
+      out.write(buf, 0, toWrite);
+      len -= toWrite;
+    }
+  }
+  
+  /**
+   * This makes sure that when DN closes clients socket after client had
+   * successfully connected earlier, the data can still be fetched.
+   */
+  public void testWriteTimeoutAtDataNode() throws IOException,
+                                                  InterruptedException { 
+    Configuration conf = new Configuration();
+    
+    final int writeTimeout = 100; //milliseconds.
+    // set a very short write timeout for datanode, so that tests runs fast.
+    conf.setInt("dfs.datanode.socket.write.timeout", writeTimeout); 
+    // set a smaller block size
+    final int blockSize = 10*1024*1024;
+    conf.setInt("dfs.block.size", blockSize);
+    conf.setInt("dfs.client.max.block.acquire.failures", 1);
+    // set a small buffer size
+    final int bufferSize = 4096;
+    conf.setInt("io.file.buffer.size", bufferSize);
+
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
+    
+    try {
+      cluster.waitActive();
+      FileSystem fs = cluster.getFileSystem();
+    
+      Path filePath = new Path("/testWriteTimeoutAtDataNode");
+      OutputStream out = fs.create(filePath, true, bufferSize);
+    
+      // write a 2 block file.
+      writeData(out, 2*blockSize);
+      out.close();
+      
+      byte[] buf = new byte[1024*1024]; // enough to empty TCP buffers.
+      
+      InputStream in = fs.open(filePath, bufferSize);
+      
+      //first read a few bytes
+      IOUtils.readFully(in, buf, 0, bufferSize/2);
+      //now read few more chunks of data by sleeping in between :
+      for(int i=0; i<10; i++) {
+        Thread.sleep(2*writeTimeout); // force write timeout at the datanode.
+        // read enough to empty out socket buffers.
+        IOUtils.readFully(in, buf, 0, buf.length); 
+      }
+      // successfully read with write timeout on datanodes.
+      in.close();
+    } finally {
+      cluster.shutdown();
+    }
+  }
+  
+  // more tests related to different failure cases can be added here.
+}

+ 127 - 0
src/test/org/apache/hadoop/hdfs/TestDFSFinalize.java

@@ -0,0 +1,127 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.hdfs;
+
+import java.io.File;
+import java.io.IOException;
+import junit.framework.TestCase;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE;
+import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+
+/**
+ * This test ensures the appropriate response from the system when 
+ * the system is finalized.
+ */
+public class TestDFSFinalize extends TestCase {
+ 
+  private static final Log LOG = LogFactory.getLog(
+                                                   "org.apache.hadoop.hdfs.TestDFSFinalize");
+  private Configuration conf;
+  private int testCounter = 0;
+  private MiniDFSCluster cluster = null;
+    
+  /**
+   * Writes an INFO log message containing the parameters.
+   */
+  void log(String label, int numDirs) {
+    LOG.info("============================================================");
+    LOG.info("***TEST " + (testCounter++) + "*** " 
+             + label + ":"
+             + " numDirs="+numDirs);
+  }
+  
+  /**
+   * Verify that the current directory exists and that the previous directory
+   * does not exist.  Verify that current hasn't been modified by comparing 
+   * the checksum of all it's containing files with their original checksum.
+   * Note that we do not check that previous is removed on the DataNode
+   * because its removal is asynchronous therefore we have no reliable
+   * way to know when it will happen.  
+   */
+  void checkResult(String[] nameNodeDirs, String[] dataNodeDirs) throws IOException {
+    for (int i = 0; i < nameNodeDirs.length; i++) {
+      assertTrue(new File(nameNodeDirs[i],"current").isDirectory());
+      assertTrue(new File(nameNodeDirs[i],"current/VERSION").isFile());
+      assertTrue(new File(nameNodeDirs[i],"current/edits").isFile());
+      assertTrue(new File(nameNodeDirs[i],"current/fsimage").isFile());
+      assertTrue(new File(nameNodeDirs[i],"current/fstime").isFile());
+    }
+    for (int i = 0; i < dataNodeDirs.length; i++) {
+      assertEquals(
+                   UpgradeUtilities.checksumContents(
+                                                     DATA_NODE, new File(dataNodeDirs[i],"current")),
+                   UpgradeUtilities.checksumMasterContents(DATA_NODE));
+    }
+    for (int i = 0; i < nameNodeDirs.length; i++) {
+      assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
+    }
+  }
+ 
+  /**
+   * This test attempts to finalize the NameNode and DataNode.
+   */
+  public void testFinalize() throws Exception {
+    UpgradeUtilities.initialize();
+    
+    for (int numDirs = 1; numDirs <= 2; numDirs++) {
+      /* This test requires that "current" directory not change after
+       * the upgrade. Actually it is ok for those contents to change.
+       * For now disabling block verification so that the contents are 
+       * not changed.
+       */
+      conf = new Configuration();
+      conf.setInt("dfs.datanode.scan.period.hours", -1);
+      conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
+      String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
+      String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
+      
+      log("Finalize with existing previous dir", numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
+      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
+      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
+      cluster = new MiniDFSCluster(conf, 1, StartupOption.REGULAR);
+      cluster.finalizeCluster(conf);
+      checkResult(nameNodeDirs, dataNodeDirs);
+
+      log("Finalize without existing previous dir", numDirs);
+      cluster.finalizeCluster(conf);
+      checkResult(nameNodeDirs, dataNodeDirs);
+
+      cluster.shutdown();
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
+    } // end numDir loop
+  }
+ 
+  protected void tearDown() throws Exception {
+    LOG.info("Shutting down MiniDFSCluster");
+    if (cluster != null) cluster.shutdown();
+  }
+  
+  public static void main(String[] args) throws Exception {
+    new TestDFSFinalize().testFinalize();
+  }
+  
+}
+
+

+ 76 - 0
src/test/org/apache/hadoop/hdfs/TestDFSMkdirs.java

@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import junit.framework.TestCase;
+import java.io.*;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+
+/**
+ * This class tests that the DFS command mkdirs cannot create subdirectories
+ * from a file when passed an illegal path.  HADOOP-281.
+ */
+public class TestDFSMkdirs extends TestCase {
+
+  private void writeFile(FileSystem fileSys, Path name) throws IOException {
+    DataOutputStream stm = fileSys.create(name);
+    stm.writeBytes("wchien");
+    stm.close();
+  }
+  
+  /**
+   * Tests mkdirs can create a directory that does not exist and will
+   * not create a subdirectory off a file.
+   */
+  public void testDFSMkdirs() throws IOException {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    FileSystem fileSys = cluster.getFileSystem();
+    try {
+      // First create a new directory with mkdirs
+      Path myPath = new Path("/test/mkdirs");
+      assertTrue(fileSys.mkdirs(myPath));
+      assertTrue(fileSys.exists(myPath));
+      assertTrue(fileSys.mkdirs(myPath));
+
+      // Second, create a file in that directory.
+      Path myFile = new Path("/test/mkdirs/myFile");
+      writeFile(fileSys, myFile);
+   
+      // Third, use mkdir to create a subdirectory off of that file,
+      // and check that it fails.
+      Path myIllegalPath = new Path("/test/mkdirs/myFile/subdir");
+      Boolean exist = true;
+      try {
+        fileSys.mkdirs(myIllegalPath);
+      } catch (IOException e) {
+        exist = false;
+      }
+      assertFalse(exist);
+      assertFalse(fileSys.exists(myIllegalPath));
+      fileSys.delete(myFile, true);
+    	
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+  }
+}

+ 992 - 0
src/test/org/apache/hadoop/hdfs/TestDFSPermission.java

@@ -0,0 +1,992 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.util.Random;
+
+import javax.security.auth.login.LoginException;
+
+import org.apache.commons.logging.*;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.common.Util;
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.permission.*;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+
+import junit.framework.AssertionFailedError;
+import junit.framework.TestCase;
+
+/** Unit tests for permission */
+public class TestDFSPermission extends TestCase {
+  public static final Log LOG = LogFactory.getLog(TestDFSPermission.class);
+  final private static Configuration conf = new Configuration();
+  
+  final private static String GROUP1_NAME = "group1";
+  final private static String GROUP2_NAME = "group2";
+  final private static String GROUP3_NAME = "group3";
+  final private static String GROUP4_NAME = "group4";
+  final private static String USER1_NAME = "user1";
+  final private static String USER2_NAME = "user2";
+  final private static String USER3_NAME = "user3";
+
+  private static UnixUserGroupInformation SUPERUSER;
+  private static UnixUserGroupInformation USER1;
+  private static UnixUserGroupInformation USER2;
+  private static UnixUserGroupInformation USER3;
+  
+  final private static short MAX_PERMISSION = 511;
+  final private static short DEFAULT_UMASK = 022;
+  final private static short FILE_MASK = 0666;
+  final private static FsPermission DEFAULT_PERMISSION = 
+    FsPermission.createImmutable((short) 0777);
+  final static private int NUM_TEST_PERMISSIONS = 
+    conf.getInt("test.dfs.permission.num", 10) * (MAX_PERMISSION + 1) / 100;
+
+
+  final private static String PATH_NAME = "xx";
+  final private static Path FILE_DIR_PATH = new Path("/", PATH_NAME);
+  final private static Path NON_EXISTENT_PATH = new Path("/parent", PATH_NAME);
+  final private static Path NON_EXISTENT_FILE = new Path("/NonExistentFile");
+
+  private FileSystem fs;
+  private static Random r;
+
+  static {
+    try {
+      // Initiate the random number generator and logging the seed
+      long seed = Util.now();
+      r = new Random(seed);
+      LOG.info("Random number generator uses seed " + seed);
+      LOG.info("NUM_TEST_PERMISSIONS=" + NUM_TEST_PERMISSIONS);
+      
+      // explicitly turn on permission checking
+      conf.setBoolean("dfs.permissions", true);
+      
+      // Initiate all four users
+      SUPERUSER = UnixUserGroupInformation.login(conf);
+      USER1 = new UnixUserGroupInformation(USER1_NAME, new String[] {
+          GROUP1_NAME, GROUP2_NAME });
+      USER2 = new UnixUserGroupInformation(USER2_NAME, new String[] {
+          GROUP2_NAME, GROUP3_NAME });
+      USER3 = new UnixUserGroupInformation(USER3_NAME, new String[] {
+          GROUP3_NAME, GROUP4_NAME });
+    } catch (LoginException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  /** This tests if permission setting in create, mkdir, and 
+   * setPermission works correctly
+   */
+  public void testPermissionSetting() throws Exception {
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
+    try {
+      cluster.waitActive();
+      fs = FileSystem.get(conf);
+      LOG.info("ROOT=" + fs.getFileStatus(new Path("/")));
+      testPermissionSetting(OpType.CREATE); // test file creation
+      testPermissionSetting(OpType.MKDIRS); // test directory creation
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+  }
+
+  /* check permission setting works correctly for file or directory */
+  private void testPermissionSetting(OpType op) throws Exception {
+    // case 1: use default permission but all possible umasks
+    PermissionGenerator generator = new PermissionGenerator(r);
+    for (short i = 0; i < NUM_TEST_PERMISSIONS; i++) {
+      createAndCheckPermission(op, FILE_DIR_PATH, generator.next(),
+          new FsPermission(DEFAULT_PERMISSION), true);
+    }
+
+    // case 2: use permission 0643 and the default umask
+    createAndCheckPermission(op, FILE_DIR_PATH, DEFAULT_UMASK,
+        new FsPermission((short) 0643), true);
+
+    // case 3: use permission 0643 and umask 0222
+    createAndCheckPermission(op, FILE_DIR_PATH, (short) 0222, 
+        new FsPermission((short) 0643), false);
+
+    // case 4: set permission
+    fs.setPermission(FILE_DIR_PATH, new FsPermission((short) 0111));
+    short expectedPermission = (short) ((op == OpType.CREATE) ? 0 : 0111);
+    checkPermission(FILE_DIR_PATH, expectedPermission, true);
+
+    // case 5: test non-existent parent directory
+    assertFalse(fs.exists(NON_EXISTENT_PATH));
+    createAndCheckPermission(op, NON_EXISTENT_PATH, DEFAULT_UMASK,
+        new FsPermission(DEFAULT_PERMISSION), false);
+    Path parent = NON_EXISTENT_PATH.getParent();
+    checkPermission(parent, getPermission(parent.getParent()), true);
+  }
+
+  /* get the permission of a file/directory */
+  private short getPermission(Path path) throws IOException {
+    return fs.getFileStatus(path).getPermission().toShort();
+  }
+
+  /* create a file/directory with the default umask and permission */
+  private void create(OpType op, Path name) throws IOException {
+    create(op, name, DEFAULT_UMASK, new FsPermission(DEFAULT_PERMISSION));
+  }
+
+  /* create a file/directory with the given umask and permission */
+  private void create(OpType op, Path name, short umask, 
+      FsPermission permission) throws IOException {
+    // set umask in configuration
+    conf.setInt(FsPermission.UMASK_LABEL, umask);
+
+    // create the file/directory
+    switch (op) {
+    case CREATE:
+      FSDataOutputStream out = fs.create(name, permission, true, conf.getInt(
+          "io.file.buffer.size", 4096), fs.getDefaultReplication(), fs
+          .getDefaultBlockSize(), null);
+      out.close();
+      break;
+    case MKDIRS:
+      fs.mkdirs(name, permission);
+      break;
+    default:
+      throw new IOException("Unsupported operation: " + op);
+    }
+  }
+
+  /* create file/directory with the provided umask and permission; then it
+   * checks if the permission is set correctly;
+   * If the delete flag is true, delete the file afterwards; otherwise leave
+   * it in the file system.
+   */
+  private void createAndCheckPermission(OpType op, Path name, short umask,
+      FsPermission permission, boolean delete) throws Exception {
+    // create the file/directory
+    create(op, name, umask, permission);
+
+    // get the short form of the permission
+    short permissionNum = (DEFAULT_PERMISSION.equals(permission)) ? MAX_PERMISSION
+        : permission.toShort();
+
+    // get the expected permission
+    short expectedPermission = (op == OpType.CREATE) ? (short) (~umask
+        & permissionNum & FILE_MASK) : (short) (~umask & permissionNum);
+
+    // check if permission is correctly set
+    checkPermission(name, expectedPermission, delete);
+  }
+
+  /* Check if the permission of a file/directory is the same as the
+   * expected permission; If the delete flag is true, delete the
+   * file/directory afterwards.
+   */
+  private void checkPermission(Path name, short expectedPermission,
+      boolean delete) throws IOException {
+    try {
+      // check its permission
+      assertEquals(getPermission(name), expectedPermission);
+    } finally {
+      // delete the file
+      if (delete) {
+        fs.delete(name, true);
+      }
+    }
+  }
+
+  /* check if the ownership of a file/directory is set correctly */
+  public void testOwnership() throws Exception {
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
+    try {
+      cluster.waitActive();
+      testOwnership(OpType.CREATE); // test file creation
+      testOwnership(OpType.MKDIRS); // test directory creation
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+  }
+
+  /* change a file/directory's owner and group.
+   * if expectDeny is set, expect an AccessControlException.
+   */
+  private void setOwner(Path path, String owner, String group,
+      boolean expectDeny) throws IOException {
+    try {
+      String expectedOwner = (owner == null) ? getOwner(path) : owner;
+      String expectedGroup = (group == null) ? getGroup(path) : group;
+      fs.setOwner(path, owner, group);
+      checkOwnership(path, expectedOwner, expectedGroup);
+      assertFalse(expectDeny);
+    } catch(AccessControlException e) {
+      assertTrue(expectDeny);
+    }
+  }
+
+  /* check ownership is set correctly for a file or directory */
+  private void testOwnership(OpType op) throws Exception {
+    // case 1: superuser create a file/directory
+    fs = FileSystem.get(conf);
+    create(op, FILE_DIR_PATH, DEFAULT_UMASK,
+        new FsPermission(DEFAULT_PERMISSION));
+    checkOwnership(FILE_DIR_PATH, SUPERUSER.getUserName(),
+        getGroup(FILE_DIR_PATH.getParent()));
+
+    // case 2: superuser changes FILE_DIR_PATH's owner to be <user1, group3>
+    setOwner(FILE_DIR_PATH, USER1.getUserName(), GROUP3_NAME, false);
+
+    // case 3: user1 changes FILE_DIR_PATH's owner to be user2
+    login(USER1);
+    setOwner(FILE_DIR_PATH, USER2.getUserName(), null, true);
+
+    // case 4: user1 changes FILE_DIR_PATH's group to be group1 which it belongs
+    // to
+    setOwner(FILE_DIR_PATH, null, GROUP1_NAME, false);
+
+    // case 5: user1 changes FILE_DIR_PATH's group to be group3
+    // which it does not belong to
+    setOwner(FILE_DIR_PATH, null, GROUP3_NAME, true);
+
+    // case 6: user2 (non-owner) changes FILE_DIR_PATH's group to be group3
+    login(USER2);
+    setOwner(FILE_DIR_PATH, null, GROUP3_NAME, true);
+
+    // case 7: user2 (non-owner) changes FILE_DIR_PATH's user to be user2
+    setOwner(FILE_DIR_PATH, USER2.getUserName(), null, true);
+
+    // delete the file/directory
+    login(SUPERUSER);
+    fs.delete(FILE_DIR_PATH, true);
+  }
+
+  /* Return the group owner of the file/directory */
+  private String getGroup(Path path) throws IOException {
+    return fs.getFileStatus(path).getGroup();
+  }
+
+  /* Return the file owner of the file/directory */
+  private String getOwner(Path path) throws IOException {
+    return fs.getFileStatus(path).getOwner();
+  }
+
+  /* check if ownership is set correctly */
+  private void checkOwnership(Path name, String expectedOwner,
+      String expectedGroup) throws IOException {
+    // check its owner and group
+    FileStatus status = fs.getFileStatus(name);
+    assertEquals(status.getOwner(), expectedOwner);
+    assertEquals(status.getGroup(), expectedGroup);
+  }
+
+  final static private String ANCESTOR_NAME = "/ancestor";
+  final static private String PARENT_NAME = "parent";
+  final static private String FILE_NAME = "file";
+  final static private String DIR_NAME = "dir";
+  final static private String FILE_DIR_NAME = "filedir";
+
+  private enum OpType {CREATE, MKDIRS, OPEN, SET_REPLICATION,
+    GET_FILEINFO, IS_DIR, EXISTS, GET_CONTENT_LENGTH, LIST, RENAME, DELETE
+  };
+
+  /* Check if namenode performs permission checking correctly for
+   * superuser, file owner, group owner, and other users */
+  public void testPermissionChecking() throws Exception {
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
+    try {
+      cluster.waitActive();
+      fs = FileSystem.get(conf);
+
+      // set the permission of the root to be world-wide rwx
+      fs.setPermission(new Path("/"), new FsPermission((short)0777));
+      
+      // create a directory hierarchy and sets random permission for each inode
+      PermissionGenerator ancestorPermissionGenerator = 
+        new PermissionGenerator(r);
+      PermissionGenerator dirPermissionGenerator = new PermissionGenerator(r);
+      PermissionGenerator filePermissionGenerator = new PermissionGenerator(r);
+      short[] ancestorPermissions = new short[NUM_TEST_PERMISSIONS];
+      short[] parentPermissions = new short[NUM_TEST_PERMISSIONS];
+      short[] permissions = new short[NUM_TEST_PERMISSIONS];
+      Path[] ancestorPaths = new Path[NUM_TEST_PERMISSIONS];
+      Path[] parentPaths = new Path[NUM_TEST_PERMISSIONS];
+      Path[] filePaths = new Path[NUM_TEST_PERMISSIONS];
+      Path[] dirPaths = new Path[NUM_TEST_PERMISSIONS];
+      for (int i = 0; i < NUM_TEST_PERMISSIONS; i++) {
+        // create ancestor directory
+        ancestorPaths[i] = new Path(ANCESTOR_NAME + i);
+        create(OpType.MKDIRS, ancestorPaths[i]);
+        fs.setOwner(ancestorPaths[i], USER1_NAME, GROUP2_NAME);
+        // create parent directory
+        parentPaths[i] = new Path(ancestorPaths[i], PARENT_NAME + i);
+        create(OpType.MKDIRS, parentPaths[i]);
+        // change parent directory's ownership to be user1
+        fs.setOwner(parentPaths[i], USER1_NAME, GROUP2_NAME);
+
+        filePaths[i] = new Path(parentPaths[i], FILE_NAME + i);
+        dirPaths[i] = new Path(parentPaths[i], DIR_NAME + i);
+
+        // makes sure that each inode at the same level 
+        // has a different permission
+        ancestorPermissions[i] = ancestorPermissionGenerator.next();
+        parentPermissions[i] = dirPermissionGenerator.next();
+        permissions[i] = filePermissionGenerator.next();
+        fs.setPermission(ancestorPaths[i], new FsPermission(
+            ancestorPermissions[i]));
+        fs.setPermission(parentPaths[i], new FsPermission(
+                parentPermissions[i]));
+      }
+
+      /* file owner */
+      testPermissionCheckingPerUser(USER1, ancestorPermissions,
+          parentPermissions, permissions, parentPaths, filePaths, dirPaths);
+      /* group owner */
+      testPermissionCheckingPerUser(USER2, ancestorPermissions,
+          parentPermissions, permissions, parentPaths, filePaths, dirPaths);
+      /* other owner */
+      testPermissionCheckingPerUser(USER3, ancestorPermissions,
+          parentPermissions, permissions, parentPaths, filePaths, dirPaths);
+      /* super owner */
+      testPermissionCheckingPerUser(SUPERUSER, ancestorPermissions,
+          parentPermissions, permissions, parentPaths, filePaths, dirPaths);
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+  }
+
+  /* Check if namenode performs permission checking correctly 
+   * for the given user for operations mkdir, open, setReplication, 
+   * getFileInfo, isDirectory, exists, getContentLength, list, rename,
+   * and delete */
+  private void testPermissionCheckingPerUser(UnixUserGroupInformation ugi,
+      short[] ancestorPermission, short[] parentPermission,
+      short[] filePermission, Path[] parentDirs, Path[] files, Path[] dirs)
+      throws Exception {
+    login(SUPERUSER);
+    for (int i = 0; i < NUM_TEST_PERMISSIONS; i++) {
+      create(OpType.CREATE, files[i]);
+      create(OpType.MKDIRS, dirs[i]);
+      fs.setOwner(files[i], USER1_NAME, GROUP2_NAME);
+      fs.setOwner(dirs[i], USER1_NAME, GROUP2_NAME);
+      checkOwnership(dirs[i], USER1_NAME, GROUP2_NAME);
+      checkOwnership(files[i], USER1_NAME, GROUP2_NAME);
+
+      FsPermission fsPermission = new FsPermission(filePermission[i]);
+      fs.setPermission(files[i], fsPermission);
+      fs.setPermission(dirs[i], fsPermission);
+    }
+
+    login(ugi);
+    for (int i = 0; i < NUM_TEST_PERMISSIONS; i++) {
+      testCreateMkdirs(ugi, new Path(parentDirs[i], FILE_DIR_NAME),
+          ancestorPermission[i], parentPermission[i]);
+      testOpen(ugi, files[i], ancestorPermission[i], parentPermission[i],
+          filePermission[i]);
+      testSetReplication(ugi, files[i], ancestorPermission[i],
+          parentPermission[i], filePermission[i]);
+      testSetTimes(ugi, files[i], ancestorPermission[i],
+          parentPermission[i], filePermission[i]);
+      testStats(ugi, files[i], ancestorPermission[i], parentPermission[i]);
+      testList(ugi, files[i], dirs[i], ancestorPermission[i],
+          parentPermission[i], filePermission[i]);
+      int next = i == NUM_TEST_PERMISSIONS - 1 ? 0 : i + 1;
+      testRename(ugi, files[i], files[next], ancestorPermission[i],
+          parentPermission[i], ancestorPermission[next], parentPermission[next]);
+      testDeleteFile(ugi, files[i], ancestorPermission[i], parentPermission[i]);
+      testDeleteDir(ugi, dirs[i], ancestorPermission[i], parentPermission[i],
+          filePermission[i], null);
+    }
+    
+    // test non existent file
+    checkNonExistentFile();
+  }
+
+  /* A random permission generator that guarantees that each permission
+   * value is generated only once.
+   */
+  static private class PermissionGenerator {
+    private Random r;
+    private short permissions[] = new short[MAX_PERMISSION + 1];
+    private int numLeft = MAX_PERMISSION + 1;
+
+    PermissionGenerator(Random r) {
+      this.r = r;
+      for (int i = 0; i <= MAX_PERMISSION; i++) {
+        permissions[i] = (short) i;
+      }
+    }
+
+    short next() throws IOException {
+      if (numLeft == 0) {
+        throw new IOException("No more permission is avaialbe");
+      }
+      int index = r.nextInt(numLeft); // choose which permission to return
+      numLeft--; // decrement the counter
+
+      // swap the chosen permission with last available permission in the array
+      short temp = permissions[numLeft];
+      permissions[numLeft] = permissions[index];
+      permissions[index] = temp;
+
+      return permissions[numLeft];
+    }
+  }
+
+  /* A base class that verifies the permission checking is correct 
+   * for an operation */
+  abstract class PermissionVerifier {
+    protected Path path;
+    protected short ancestorPermission;
+    protected short parentPermission;
+    private short permission;
+    protected short requiredAncestorPermission;
+    protected short requiredParentPermission;
+    protected short requiredPermission;
+    final static protected short opAncestorPermission = SEARCH_MASK;
+    protected short opParentPermission;
+    protected short opPermission;
+    protected UnixUserGroupInformation ugi;
+
+    /* initialize */
+    protected void set(Path path, short ancestorPermission,
+        short parentPermission, short permission) {
+      this.path = path;
+      this.ancestorPermission = ancestorPermission;
+      this.parentPermission = parentPermission;
+      this.permission = permission;
+      setOpPermission();
+      this.ugi = null;
+    }
+
+    /* Perform an operation and verify if the permission checking is correct */
+    void verifyPermission(UnixUserGroupInformation ugi) throws LoginException,
+        IOException {
+      if (this.ugi != ugi) {
+        setRequiredPermissions(ugi);
+        this.ugi = ugi;
+      }
+
+      try {
+        try {
+          call();
+          assertFalse(expectPermissionDeny());
+        } catch(AccessControlException e) {
+          assertTrue(expectPermissionDeny());
+        }
+      } catch (AssertionFailedError ae) {
+        logPermissions();
+        throw ae;
+      }
+    }
+
+    /** Log the permissions and required permissions */
+    protected void logPermissions() {
+      LOG.info("required ancestor permission:"
+          + Integer.toOctalString(requiredAncestorPermission));
+      LOG.info("ancestor permission: "
+          + Integer.toOctalString(ancestorPermission));
+      LOG.info("required parent permission:"
+          + Integer.toOctalString(requiredParentPermission));
+      LOG.info("parent permission: " + Integer.toOctalString(parentPermission));
+      LOG.info("required permission:"
+          + Integer.toOctalString(requiredPermission));
+      LOG.info("permission: " + Integer.toOctalString(permission));
+    }
+
+    /* Return true if an AccessControlException is expected */
+    protected boolean expectPermissionDeny() {
+      return (requiredPermission & permission) != requiredPermission
+          || (requiredParentPermission & parentPermission) !=
+                            requiredParentPermission
+          || (requiredAncestorPermission & ancestorPermission) !=
+                            requiredAncestorPermission;
+    }
+
+    /* Set the permissions required to pass the permission checking */
+    protected void setRequiredPermissions(UnixUserGroupInformation ugi)
+        throws IOException {
+      if (SUPERUSER.equals(ugi)) {
+        requiredAncestorPermission = SUPER_MASK;
+        requiredParentPermission = SUPER_MASK;
+        requiredPermission = SUPER_MASK;
+      } else if (USER1.equals(ugi)) {
+        requiredAncestorPermission = (short)(opAncestorPermission & OWNER_MASK);
+        requiredParentPermission = (short)(opParentPermission & OWNER_MASK);
+        requiredPermission = (short)(opPermission & OWNER_MASK);
+      } else if (USER2.equals(ugi)) {
+        requiredAncestorPermission = (short)(opAncestorPermission & GROUP_MASK);
+        requiredParentPermission = (short)(opParentPermission & GROUP_MASK);
+        requiredPermission = (short)(opPermission & GROUP_MASK);
+      } else if (USER3.equals(ugi)) {
+        requiredAncestorPermission = (short)(opAncestorPermission & OTHER_MASK);
+        requiredParentPermission = (short)(opParentPermission & OTHER_MASK);
+        requiredPermission = (short)(opPermission & OTHER_MASK);
+      } else {
+        throw new IllegalArgumentException("Non-supported user: " + ugi);
+      }
+    }
+
+    /* Set the rwx permissions required for the operation */
+    abstract void setOpPermission();
+
+    /* Perform the operation */
+    abstract void call() throws IOException;
+  }
+
+  final static private short SUPER_MASK = 0;
+  final static private short READ_MASK = 0444;
+  final static private short WRITE_MASK = 0222;
+  final static private short SEARCH_MASK = 0111;
+  final static private short NULL_MASK = 0;
+  final static private short OWNER_MASK = 0700;
+  final static private short GROUP_MASK = 0070;
+  final static private short OTHER_MASK = 0007;
+
+  /* A class that verifies the permission checking is correct for create/mkdir*/
+  private class CreatePermissionVerifier extends PermissionVerifier {
+    private OpType opType;
+    private boolean cleanup = true;
+
+    /* initialize */
+    protected void set(Path path, OpType opType, short ancestorPermission,
+        short parentPermission) {
+      super.set(path, ancestorPermission, parentPermission, NULL_MASK);
+      setOpType(opType);
+    }
+
+    void setCleanup(boolean cleanup) {
+      this.cleanup = cleanup;
+    }
+    
+    /* set if the operation mkdir/create */
+    void setOpType(OpType opType) {
+      this.opType = opType;
+    }
+
+    @Override
+    void setOpPermission() {
+      this.opParentPermission = SEARCH_MASK | WRITE_MASK;
+    }
+
+    @Override
+    void call() throws IOException {
+      create(opType, path);
+      if (cleanup) {
+        fs.delete(path, true);
+      }
+    }
+  }
+
+  private CreatePermissionVerifier createVerifier =
+    new CreatePermissionVerifier();
+  /* test if the permission checking of create/mkdir is correct */
+  private void testCreateMkdirs(UnixUserGroupInformation ugi, Path path,
+      short ancestorPermission, short parentPermission) throws Exception {
+    createVerifier.set(path, OpType.MKDIRS, ancestorPermission,
+        parentPermission);
+    createVerifier.verifyPermission(ugi);
+    createVerifier.setOpType(OpType.CREATE);
+    createVerifier.setCleanup(false);
+    createVerifier.verifyPermission(ugi);
+    createVerifier.setCleanup(true);
+    createVerifier.verifyPermission(ugi); // test overWritten
+  }
+
+  /* A class that verifies the permission checking is correct for open */
+  private class OpenPermissionVerifier extends PermissionVerifier {
+    @Override
+    void setOpPermission() {
+      this.opParentPermission = SEARCH_MASK;
+      this.opPermission = READ_MASK;
+    }
+
+    @Override
+    void call() throws IOException {
+      FSDataInputStream in = fs.open(path);
+      in.close();
+    }
+  }
+
+  private OpenPermissionVerifier openVerifier = new OpenPermissionVerifier();
+  /* test if the permission checking of open is correct */
+  private void testOpen(UnixUserGroupInformation ugi, Path path,
+      short ancestorPermission, short parentPermission, short filePermission)
+      throws Exception {
+    openVerifier
+        .set(path, ancestorPermission, parentPermission, filePermission);
+    openVerifier.verifyPermission(ugi);
+  }
+
+  /* A class that verifies the permission checking is correct for 
+   * setReplication */
+  private class SetReplicationPermissionVerifier extends PermissionVerifier {
+    @Override
+    void setOpPermission() {
+      this.opParentPermission = SEARCH_MASK;
+      this.opPermission = WRITE_MASK;
+    }
+
+    @Override
+    void call() throws IOException {
+      fs.setReplication(path, (short) 1);
+    }
+  }
+
+  private SetReplicationPermissionVerifier replicatorVerifier =
+    new SetReplicationPermissionVerifier();
+  /* test if the permission checking of setReplication is correct */
+  private void testSetReplication(UnixUserGroupInformation ugi, Path path,
+      short ancestorPermission, short parentPermission, short filePermission)
+      throws Exception {
+    replicatorVerifier.set(path, ancestorPermission, parentPermission,
+        filePermission);
+    replicatorVerifier.verifyPermission(ugi);
+  }
+
+  /* A class that verifies the permission checking is correct for 
+   * setTimes */
+  private class SetTimesPermissionVerifier extends PermissionVerifier {
+    @Override
+    void setOpPermission() {
+      this.opParentPermission = SEARCH_MASK;
+      this.opPermission = WRITE_MASK;
+    }
+
+    @Override
+    void call() throws IOException {
+      fs.setTimes(path, 100, 100);
+      fs.setTimes(path, -1, 100);
+      fs.setTimes(path, 100, -1);
+    }
+  }
+
+  private SetTimesPermissionVerifier timesVerifier =
+    new SetTimesPermissionVerifier();
+  /* test if the permission checking of setReplication is correct */
+  private void testSetTimes(UnixUserGroupInformation ugi, Path path,
+      short ancestorPermission, short parentPermission, short filePermission)
+      throws Exception {
+    timesVerifier.set(path, ancestorPermission, parentPermission,
+        filePermission);
+    timesVerifier.verifyPermission(ugi);
+  }
+
+  /* A class that verifies the permission checking is correct for isDirectory,
+   * exist,  getFileInfo, getContentSummary */
+  private class StatsPermissionVerifier extends PermissionVerifier {
+    OpType opType;
+
+    /* initialize */
+    void set(Path path, OpType opType, short ancestorPermission,
+        short parentPermission) {
+      super.set(path, ancestorPermission, parentPermission, NULL_MASK);
+      setOpType(opType);
+    }
+
+    /* set if operation is getFileInfo, isDirectory, exist, getContenSummary */
+    void setOpType(OpType opType) {
+      this.opType = opType;
+    }
+
+    @Override
+    void setOpPermission() {
+      this.opParentPermission = SEARCH_MASK;
+    }
+
+    @Override
+    void call() throws IOException {
+      switch (opType) {
+      case GET_FILEINFO:
+        fs.getFileStatus(path);
+        break;
+      case IS_DIR:
+        fs.isDirectory(path);
+        break;
+      case EXISTS:
+        fs.exists(path);
+        break;
+      case GET_CONTENT_LENGTH:
+        fs.getContentSummary(path).getLength();
+        break;
+      default:
+        throw new IllegalArgumentException("Unexpected operation type: "
+            + opType);
+      }
+    }
+  }
+
+  private StatsPermissionVerifier statsVerifier = new StatsPermissionVerifier();
+  /* test if the permission checking of isDirectory, exist,
+   * getFileInfo, getContentSummary is correct */
+  private void testStats(UnixUserGroupInformation ugi, Path path,
+      short ancestorPermission, short parentPermission) throws Exception {
+    statsVerifier.set(path, OpType.GET_FILEINFO, ancestorPermission,
+        parentPermission);
+    statsVerifier.verifyPermission(ugi);
+    statsVerifier.setOpType(OpType.IS_DIR);
+    statsVerifier.verifyPermission(ugi);
+    statsVerifier.setOpType(OpType.EXISTS);
+    statsVerifier.verifyPermission(ugi);
+    statsVerifier.setOpType(OpType.GET_CONTENT_LENGTH);
+    statsVerifier.verifyPermission(ugi);
+  }
+
+  private enum InodeType {
+    FILE, DIR
+  };
+
+  /* A class that verifies the permission checking is correct for list */
+  private class ListPermissionVerifier extends PermissionVerifier {
+    private InodeType inodeType;
+
+    /* initialize */
+    void set(Path path, InodeType inodeType, short ancestorPermission,
+        short parentPermission, short permission) {
+      this.inodeType = inodeType;
+      super.set(path, ancestorPermission, parentPermission, permission);
+    }
+
+    /* set if the given path is a file/directory */
+    void setInodeType(Path path, InodeType inodeType) {
+      this.path = path;
+      this.inodeType = inodeType;
+      setOpPermission();
+      this.ugi = null;
+    }
+
+    @Override
+    void setOpPermission() {
+      this.opParentPermission = SEARCH_MASK;
+      switch (inodeType) {
+      case FILE:
+        this.opPermission = 0;
+        break;
+      case DIR:
+        this.opPermission = READ_MASK | SEARCH_MASK;
+        break;
+      default:
+        throw new IllegalArgumentException("Illegal inode type: " + inodeType);
+      }
+    }
+
+    @Override
+    void call() throws IOException {
+      fs.listStatus(path);
+    }
+  }
+
+  ListPermissionVerifier listVerifier = new ListPermissionVerifier();
+  /* test if the permission checking of list is correct */
+  private void testList(UnixUserGroupInformation ugi, Path file, Path dir,
+      short ancestorPermission, short parentPermission, short filePermission)
+      throws Exception {
+    listVerifier.set(file, InodeType.FILE, ancestorPermission,
+        parentPermission, filePermission);
+    listVerifier.verifyPermission(ugi);
+    listVerifier.setInodeType(dir, InodeType.DIR);
+    listVerifier.verifyPermission(ugi);
+  }
+
+  /* A class that verifies the permission checking is correct for rename */
+  private class RenamePermissionVerifier extends PermissionVerifier {
+    private Path dst;
+    private short dstAncestorPermission;
+    private short dstParentPermission;
+
+    /* initialize */
+    void set(Path src, short srcAncestorPermission, short srcParentPermission,
+        Path dst, short dstAncestorPermission, short dstParentPermission) {
+      super.set(src, srcAncestorPermission, srcParentPermission, NULL_MASK);
+      this.dst = dst;
+      this.dstAncestorPermission = dstAncestorPermission;
+      this.dstParentPermission = dstParentPermission;
+    }
+
+    @Override
+    void setOpPermission() {
+      opParentPermission = SEARCH_MASK | WRITE_MASK;
+    }
+
+    @Override
+    void call() throws IOException {
+      fs.rename(path, dst);
+    }
+
+    @Override
+    protected boolean expectPermissionDeny() {
+      return super.expectPermissionDeny()
+          || (requiredParentPermission & dstParentPermission) != 
+                requiredParentPermission
+          || (requiredAncestorPermission & dstAncestorPermission) != 
+                requiredAncestorPermission;
+    }
+
+    protected void logPermissions() {
+      super.logPermissions();
+      LOG.info("dst ancestor permission: "
+          + Integer.toOctalString(dstAncestorPermission));
+      LOG.info("dst parent permission: "
+          + Integer.toOctalString(dstParentPermission));
+    }
+  }
+
+  RenamePermissionVerifier renameVerifier = new RenamePermissionVerifier();
+  /* test if the permission checking of rename is correct */
+  private void testRename(UnixUserGroupInformation ugi, Path src, Path dst,
+      short srcAncestorPermission, short srcParentPermission,
+      short dstAncestorPermission, short dstParentPermission) throws Exception {
+    renameVerifier.set(src, srcAncestorPermission, srcParentPermission, dst,
+        dstAncestorPermission, dstParentPermission);
+    renameVerifier.verifyPermission(ugi);
+  }
+
+  /* A class that verifies the permission checking is correct for delete */
+  private class DeletePermissionVerifier extends PermissionVerifier {
+    void set(Path path, short ancestorPermission, short parentPermission) {
+      super.set(path, ancestorPermission, parentPermission, NULL_MASK);
+    }
+
+    @Override
+    void setOpPermission() {
+      this.opParentPermission = SEARCH_MASK | WRITE_MASK;
+    }
+
+    @Override
+    void call() throws IOException {
+      fs.delete(path, true);
+    }
+  }
+
+  /* A class that verifies the permission checking is correct for
+   * directory deletion */
+  private class DeleteDirPermissionVerifier extends DeletePermissionVerifier {
+    private short[] childPermissions;
+
+    /* initialize */
+    void set(Path path, short ancestorPermission, short parentPermission,
+        short permission, short[] childPermissions) {
+      set(path, ancestorPermission, parentPermission, permission);
+      this.childPermissions = childPermissions;
+    }
+
+    @Override
+    void setOpPermission() {
+      this.opParentPermission = SEARCH_MASK | WRITE_MASK;
+      this.opPermission = SEARCH_MASK | WRITE_MASK | READ_MASK;
+    }
+
+    @Override
+    protected boolean expectPermissionDeny() {
+      if (super.expectPermissionDeny()) {
+        return true;
+      } else {
+        if (childPermissions != null) {
+          for (short childPermission : childPermissions) {
+            if ((requiredPermission & childPermission) != requiredPermission) {
+              return true;
+            }
+          }
+        }
+        return false;
+      }
+    }
+  }
+
+  DeletePermissionVerifier fileDeletionVerifier =
+    new DeletePermissionVerifier();
+
+  /* test if the permission checking of file deletion is correct */
+  private void testDeleteFile(UnixUserGroupInformation ugi, Path file,
+      short ancestorPermission, short parentPermission) throws Exception {
+    fileDeletionVerifier.set(file, ancestorPermission, parentPermission);
+    fileDeletionVerifier.verifyPermission(ugi);
+  }
+
+  DeleteDirPermissionVerifier dirDeletionVerifier =
+    new DeleteDirPermissionVerifier();
+
+  /* test if the permission checking of directory deletion is correct */
+  private void testDeleteDir(UnixUserGroupInformation ugi, Path path,
+      short ancestorPermission, short parentPermission, short permission,
+      short[] childPermissions) throws Exception {
+    dirDeletionVerifier.set(path, ancestorPermission, parentPermission,
+        permission, childPermissions);
+    dirDeletionVerifier.verifyPermission(ugi);
+
+  }
+
+  /* log into dfs as the given user */
+  private void login(UnixUserGroupInformation ugi) throws IOException {
+    if (fs != null) {
+      fs.close();
+    }
+    UnixUserGroupInformation.saveToConf(conf,
+        UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
+    fs = FileSystem.get(conf); // login as ugi
+  }
+
+  /* test non-existent file */
+  private void checkNonExistentFile() {
+    try {
+      assertFalse(fs.exists(NON_EXISTENT_FILE));
+    } catch (IOException e) {
+      checkNoPermissionDeny(e);
+    }
+    try {
+      fs.open(NON_EXISTENT_FILE);
+    } catch (IOException e) {
+      checkNoPermissionDeny(e);
+    }
+    try {
+      fs.setReplication(NON_EXISTENT_FILE, (short)4);
+    } catch (IOException e) {
+      checkNoPermissionDeny(e);
+    }
+    try {
+      fs.getFileStatus(NON_EXISTENT_FILE);
+    } catch (IOException e) {
+      checkNoPermissionDeny(e);
+    }
+    try {      
+      fs.getContentSummary(NON_EXISTENT_FILE).getLength();
+    } catch (IOException e) {
+      checkNoPermissionDeny(e);
+    }
+    try {
+      fs.listStatus(NON_EXISTENT_FILE);
+    } catch (IOException e) {
+      checkNoPermissionDeny(e);
+    }
+    try {
+      fs.delete(NON_EXISTENT_FILE, true);
+    } catch (IOException e) {
+      checkNoPermissionDeny(e);
+    }
+    try {
+      fs.rename(NON_EXISTENT_FILE, new Path(NON_EXISTENT_FILE+".txt"));
+    } catch (IOException e) {
+      checkNoPermissionDeny(e);
+    }
+  }
+  
+  private void checkNoPermissionDeny(IOException e) {
+    assertFalse(e instanceof AccessControlException);
+  }
+}

+ 98 - 0
src/test/org/apache/hadoop/hdfs/TestDFSRename.java

@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.*;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+public class TestDFSRename extends junit.framework.TestCase {
+  static int countLease(MiniDFSCluster cluster) {
+    return cluster.getNamesystem().leaseManager.countLease();
+  }
+  
+  final Path dir = new Path("/test/rename/");
+
+  void list(FileSystem fs, String name) throws IOException {
+    FileSystem.LOG.info("\n\n" + name);
+    for(FileStatus s : fs.listStatus(dir)) {
+      FileSystem.LOG.info("" + s.getPath());
+    }
+  }
+
+  public void testRename() throws Exception {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    try {
+      FileSystem fs = cluster.getFileSystem();
+      assertTrue(fs.mkdirs(dir));
+      
+      { //test lease
+        Path a = new Path(dir, "a");
+        Path aa = new Path(dir, "aa");
+        Path b = new Path(dir, "b");
+  
+        DataOutputStream a_out = fs.create(a);
+        a_out.writeBytes("something");
+        a_out.close();
+        
+        //should not have any lease
+        assertEquals(0, countLease(cluster)); 
+  
+        DataOutputStream aa_out = fs.create(aa);
+        aa_out.writeBytes("something");
+  
+        //should have 1 lease
+        assertEquals(1, countLease(cluster)); 
+        list(fs, "rename0");
+        fs.rename(a, b);
+        list(fs, "rename1");
+        aa_out.writeBytes(" more");
+        aa_out.close();
+        list(fs, "rename2");
+  
+        //should not have any lease
+        assertEquals(0, countLease(cluster));
+      }
+
+      { // test non-existent destination
+        Path dstPath = new Path("/c/d");
+        assertFalse(fs.exists(dstPath));
+        assertFalse(fs.rename(dir, dstPath));
+      }
+
+      { // test rename /a/b to /a/b/c
+        Path src = new Path("/a/b");
+        Path dst = new Path("/a/b/c");
+
+        DataOutputStream a_out = fs.create(new Path(src, "foo"));
+        a_out.writeBytes("something");
+        a_out.close();
+        
+        assertFalse(fs.rename(src, dst));
+      }
+      
+      fs.delete(dir, true);
+    } finally {
+      if (cluster != null) {cluster.shutdown();}
+    }
+  }
+}

+ 244 - 0
src/test/org/apache/hadoop/hdfs/TestDFSRollback.java

@@ -0,0 +1,244 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.hdfs;
+
+import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE;
+import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+
+/**
+* This test ensures the appropriate response (successful or failure) from
+* the system when the system is rolled back under various storage state and
+* version conditions.
+*/
+public class TestDFSRollback extends TestCase {
+ 
+  private static final Log LOG = LogFactory.getLog(
+                                                   "org.apache.hadoop.hdfs.TestDFSRollback");
+  private Configuration conf;
+  private int testCounter = 0;
+  private MiniDFSCluster cluster = null;
+  
+  /**
+   * Writes an INFO log message containing the parameters.
+   */
+  void log(String label, int numDirs) {
+    LOG.info("============================================================");
+    LOG.info("***TEST " + (testCounter++) + "*** " 
+             + label + ":"
+             + " numDirs="+numDirs);
+  }
+  
+  /**
+   * Verify that the new current directory is the old previous.  
+   * It is assumed that the server has recovered and rolled back.
+   */
+  void checkResult(NodeType nodeType, String[] baseDirs) throws IOException {
+    switch (nodeType) {
+    case NAME_NODE:
+      for (int i = 0; i < baseDirs.length; i++) {
+        assertTrue(new File(baseDirs[i],"current").isDirectory());
+        assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
+        assertTrue(new File(baseDirs[i],"current/edits").isFile());
+        assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
+        assertTrue(new File(baseDirs[i],"current/fstime").isFile());
+      }
+      break;
+    case DATA_NODE:
+      for (int i = 0; i < baseDirs.length; i++) {
+        assertEquals(
+                     UpgradeUtilities.checksumContents(
+                                                       nodeType, new File(baseDirs[i],"current")),
+                     UpgradeUtilities.checksumMasterContents(nodeType));
+      }
+      break;
+    }
+    for (int i = 0; i < baseDirs.length; i++) {
+      assertFalse(new File(baseDirs[i],"previous").isDirectory());
+    }
+  }
+ 
+  /**
+   * Attempts to start a NameNode with the given operation.  Starting
+   * the NameNode should throw an exception.
+   */
+  void startNameNodeShouldFail(StartupOption operation) {
+    try {
+      cluster = new MiniDFSCluster(conf, 0, operation); // should fail
+      throw new AssertionError("NameNode should have failed to start");
+    } catch (Exception expected) {
+      // expected
+    }
+  }
+  
+  /**
+   * Attempts to start a DataNode with the given operation.  Starting
+   * the DataNode should throw an exception.
+   */
+  void startDataNodeShouldFail(StartupOption operation) {
+    try {
+      cluster.startDataNodes(conf, 1, false, operation, null); // should fail
+      throw new AssertionError("DataNode should have failed to start");
+    } catch (Exception expected) {
+      // expected
+      assertFalse(cluster.isDataNodeUp());
+    }
+  }
+ 
+  /**
+   * This test attempts to rollback the NameNode and DataNode under
+   * a number of valid and invalid conditions.
+   */
+  public void testRollback() throws Exception {
+    File[] baseDirs;
+    UpgradeUtilities.initialize();
+    
+    for (int numDirs = 1; numDirs <= 2; numDirs++) {
+      conf = new Configuration();
+      conf.setInt("dfs.datanode.scan.period.hours", -1);      
+      conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
+      String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
+      String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
+      
+      log("Normal NameNode rollback", numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
+      cluster = new MiniDFSCluster(conf, 0, StartupOption.ROLLBACK);
+      checkResult(NAME_NODE, nameNodeDirs);
+      cluster.shutdown();
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      
+      log("Normal DataNode rollback", numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
+      cluster = new MiniDFSCluster(conf, 0, StartupOption.ROLLBACK);
+      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
+      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
+      cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
+      checkResult(DATA_NODE, dataNodeDirs);
+      cluster.shutdown();
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
+
+      log("NameNode rollback without existing previous dir", numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      startNameNodeShouldFail(StartupOption.ROLLBACK);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      
+      log("DataNode rollback without existing previous dir", numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
+      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
+      cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
+      cluster.shutdown();
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
+
+      log("DataNode rollback with future stored layout version in previous", numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
+      cluster = new MiniDFSCluster(conf, 0, StartupOption.ROLLBACK);
+      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
+      baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
+      UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
+                                         new StorageInfo(Integer.MIN_VALUE,
+                                                         UpgradeUtilities.getCurrentNamespaceID(cluster),
+                                                         UpgradeUtilities.getCurrentFsscTime(cluster)));
+      startDataNodeShouldFail(StartupOption.ROLLBACK);
+      cluster.shutdown();
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
+      
+      log("DataNode rollback with newer fsscTime in previous", numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
+      cluster = new MiniDFSCluster(conf, 0, StartupOption.ROLLBACK);
+      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
+      baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
+      UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
+                                         new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(),
+                                                         UpgradeUtilities.getCurrentNamespaceID(cluster),
+                                                         Long.MAX_VALUE));
+      startDataNodeShouldFail(StartupOption.ROLLBACK);
+      cluster.shutdown();
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
+
+      log("NameNode rollback with no edits file", numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
+      for (File f : baseDirs) { 
+        FileUtil.fullyDelete(new File(f,"edits"));
+      }
+      startNameNodeShouldFail(StartupOption.ROLLBACK);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      
+      log("NameNode rollback with no image file", numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
+      for (File f : baseDirs) { 
+        FileUtil.fullyDelete(new File(f,"fsimage")); 
+      }
+      startNameNodeShouldFail(StartupOption.ROLLBACK);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      
+      log("NameNode rollback with corrupt version file", numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
+      for (File f : baseDirs) { 
+        UpgradeUtilities.corruptFile(new File(f,"VERSION")); 
+      }
+      startNameNodeShouldFail(StartupOption.ROLLBACK);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      
+      log("NameNode rollback with old layout version in previous", numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
+      UpgradeUtilities.createVersionFile(NAME_NODE, baseDirs,
+                                         new StorageInfo(1,
+                                                         UpgradeUtilities.getCurrentNamespaceID(null),
+                                                         UpgradeUtilities.getCurrentFsscTime(null)));
+      startNameNodeShouldFail(StartupOption.UPGRADE);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+    } // end numDir loop
+  }
+ 
+  protected void tearDown() throws Exception {
+    LOG.info("Shutting down MiniDFSCluster");
+    if (cluster != null) cluster.shutdown();
+  }
+  
+  public static void main(String[] args) throws Exception {
+    new TestDFSRollback().testRollback();
+  }
+  
+}
+
+

+ 1267 - 0
src/test/org/apache/hadoop/hdfs/TestDFSShell.java

@@ -0,0 +1,1267 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.PrintStream;
+import java.io.PrintWriter;
+import java.security.Permission;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+import java.util.Scanner;
+import java.util.zip.GZIPOutputStream;
+
+import junit.framework.TestCase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSInputChecker;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.shell.Count;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.FSDataset;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.ToolRunner;
+
+/**
+ * This class tests commands from DFSShell.
+ */
+public class TestDFSShell extends TestCase {
+  private static final Log LOG = LogFactory.getLog(TestDFSShell.class);
+  
+  static final String TEST_ROOT_DIR =
+    new Path(System.getProperty("test.build.data","/tmp"))
+    .toString().replace(' ', '+');
+
+  static Path writeFile(FileSystem fs, Path f) throws IOException {
+    DataOutputStream out = fs.create(f);
+    out.writeBytes("dhruba: " + f);
+    out.close();
+    assertTrue(fs.exists(f));
+    return f;
+  }
+
+  static Path mkdir(FileSystem fs, Path p) throws IOException {
+    assertTrue(fs.mkdirs(p));
+    assertTrue(fs.exists(p));
+    assertTrue(fs.getFileStatus(p).isDir());
+    return p;
+  }
+
+  static File createLocalFile(File f) throws IOException {
+    assertTrue(!f.exists());
+    PrintWriter out = new PrintWriter(f);
+    out.print("createLocalFile: " + f.getAbsolutePath());
+    out.flush();
+    out.close();
+    assertTrue(f.exists());
+    assertTrue(f.isFile());
+    return f;
+  }
+
+  static void show(String s) {
+    System.out.println(Thread.currentThread().getStackTrace()[2] + " " + s);
+  }
+
+  public void testZeroSizeFile() throws IOException {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    FileSystem fs = cluster.getFileSystem();
+    assertTrue("Not a HDFS: "+fs.getUri(),
+               fs instanceof DistributedFileSystem);
+    final DistributedFileSystem dfs = (DistributedFileSystem)fs;
+
+    try {
+      //create a zero size file
+      final File f1 = new File(TEST_ROOT_DIR, "f1");
+      assertTrue(!f1.exists());
+      assertTrue(f1.createNewFile());
+      assertTrue(f1.exists());
+      assertTrue(f1.isFile());
+      assertEquals(0L, f1.length());
+      
+      //copy to remote
+      final Path root = mkdir(dfs, new Path("/test/zeroSizeFile"));
+      final Path remotef = new Path(root, "dst");
+      show("copy local " + f1 + " to remote " + remotef);
+      dfs.copyFromLocalFile(false, false, new Path(f1.getPath()), remotef);
+      
+      //getBlockSize() should not throw exception
+      show("Block size = " + dfs.getFileStatus(remotef).getBlockSize());
+
+      //copy back
+      final File f2 = new File(TEST_ROOT_DIR, "f2");
+      assertTrue(!f2.exists());
+      dfs.copyToLocalFile(remotef, new Path(f2.getPath()));
+      assertTrue(f2.exists());
+      assertTrue(f2.isFile());
+      assertEquals(0L, f2.length());
+  
+      f1.delete();
+      f2.delete();
+    } finally {
+      try {dfs.close();} catch (Exception e) {}
+      cluster.shutdown();
+    }
+  }
+  
+  public void testRecrusiveRm() throws IOException {
+	  Configuration conf = new Configuration();
+	  MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+	  FileSystem fs = cluster.getFileSystem();
+	  assertTrue("Not a HDFS: " + fs.getUri(), 
+			  fs instanceof DistributedFileSystem);
+	  try {
+      fs.mkdirs(new Path(new Path("parent"), "child"));
+      try {
+        fs.delete(new Path("parent"), false);
+        assert(false); // should never reach here.
+      } catch(IOException e) {
+         //should have thrown an exception
+      }
+      try {
+        fs.delete(new Path("parent"), true);
+      } catch(IOException e) {
+        assert(false);
+      }
+    } finally {  
+      try { fs.close();}catch(IOException e){};
+      cluster.shutdown();
+    }
+  }
+    
+  public void testDu() throws IOException {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    FileSystem fs = cluster.getFileSystem();
+    assertTrue("Not a HDFS: "+fs.getUri(),
+                fs instanceof DistributedFileSystem);
+    final DistributedFileSystem dfs = (DistributedFileSystem)fs;
+    PrintStream psBackup = System.out;
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
+    PrintStream psOut = new PrintStream(out);
+    System.setOut(psOut);
+    FsShell shell = new FsShell();
+    shell.setConf(conf);
+    
+    try {
+      Path myPath = new Path("/test/dir");
+      assertTrue(fs.mkdirs(myPath));
+      assertTrue(fs.exists(myPath));
+      Path myFile = new Path("/test/dir/file");
+      writeFile(fs, myFile);
+      assertTrue(fs.exists(myFile));
+      Path myFile2 = new Path("/test/dir/file2");
+      writeFile(fs, myFile2);
+      assertTrue(fs.exists(myFile2));
+      
+      String[] args = new String[2];
+      args[0] = "-du";
+      args[1] = "/test/dir";
+      int val = -1;
+      try {
+        val = shell.run(args);
+      } catch (Exception e) {
+        System.err.println("Exception raised from DFSShell.run " +
+                            e.getLocalizedMessage());
+      }
+      assertTrue(val == 0);
+      String returnString = out.toString();
+      out.reset();
+      // Check if size matchs as expected
+      assertTrue(returnString.contains("22"));
+      assertTrue(returnString.contains("23"));
+      
+    } finally {
+      try {dfs.close();} catch (Exception e) {}
+      System.setOut(psBackup);
+      cluster.shutdown();
+    }
+                                  
+  }
+  public void testPut() throws IOException {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    FileSystem fs = cluster.getFileSystem();
+    assertTrue("Not a HDFS: "+fs.getUri(),
+               fs instanceof DistributedFileSystem);
+    final DistributedFileSystem dfs = (DistributedFileSystem)fs;
+
+    try {
+      // remove left over crc files:
+      new File(TEST_ROOT_DIR, ".f1.crc").delete();
+      new File(TEST_ROOT_DIR, ".f2.crc").delete();    
+      final File f1 = createLocalFile(new File(TEST_ROOT_DIR, "f1"));
+      final File f2 = createLocalFile(new File(TEST_ROOT_DIR, "f2"));
+  
+      final Path root = mkdir(dfs, new Path("/test/put"));
+      final Path dst = new Path(root, "dst");
+  
+      show("begin");
+      
+      final Thread copy2ndFileThread = new Thread() {
+        public void run() {
+          try {
+            show("copy local " + f2 + " to remote " + dst);
+            dfs.copyFromLocalFile(false, false, new Path(f2.getPath()), dst);
+          } catch (IOException ioe) {
+            show("good " + StringUtils.stringifyException(ioe));
+            return;
+          }
+          //should not be here, must got IOException
+          assertTrue(false);
+        }
+      };
+      
+      //use SecurityManager to pause the copying of f1 and begin copying f2
+      SecurityManager sm = System.getSecurityManager();
+      System.out.println("SecurityManager = " + sm);
+      System.setSecurityManager(new SecurityManager() {
+        private boolean firstTime = true;
+  
+        public void checkPermission(Permission perm) {
+          if (firstTime) {
+            Thread t = Thread.currentThread();
+            if (!t.toString().contains("DataNode")) {
+              String s = "" + Arrays.asList(t.getStackTrace());
+              if (s.contains("FileUtil.copyContent")) {
+                //pause at FileUtil.copyContent
+  
+                firstTime = false;
+                copy2ndFileThread.start();
+                try {Thread.sleep(5000);} catch (InterruptedException e) {}
+              }
+            }
+          }
+        }
+      });
+      show("copy local " + f1 + " to remote " + dst);
+      dfs.copyFromLocalFile(false, false, new Path(f1.getPath()), dst);
+      show("done");
+  
+      try {copy2ndFileThread.join();} catch (InterruptedException e) { }
+      System.setSecurityManager(sm);
+
+      // copy multiple files to destination directory
+      final Path destmultiple = mkdir(dfs, new Path("/test/putmultiple"));
+      Path[] srcs = new Path[2];
+      srcs[0] = new Path(f1.getPath());
+      srcs[1] = new Path(f2.getPath());
+      dfs.copyFromLocalFile(false, false, srcs, destmultiple);
+      srcs[0] = new Path(destmultiple,"f1"); 
+      srcs[1] = new Path(destmultiple,"f2"); 
+      assertTrue(dfs.exists(srcs[0]));
+      assertTrue(dfs.exists(srcs[1]));
+
+      // move multiple files to destination directory
+      final Path destmultiple2 = mkdir(dfs, new Path("/test/movemultiple"));
+      srcs[0] = new Path(f1.getPath());
+      srcs[1] = new Path(f2.getPath());
+      dfs.moveFromLocalFile(srcs, destmultiple2);
+      assertFalse(f1.exists());
+      assertFalse(f2.exists());
+      srcs[0] = new Path(destmultiple2, "f1");
+      srcs[1] = new Path(destmultiple2, "f2");
+      assertTrue(dfs.exists(srcs[0]));
+      assertTrue(dfs.exists(srcs[1]));
+
+      f1.delete();
+      f2.delete();
+    } finally {
+      try {dfs.close();} catch (Exception e) {}
+      cluster.shutdown();
+    }
+  }
+
+
+  /** check command error outputs and exit statuses. */
+  public void testErrOutPut() throws Exception {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = null;
+    PrintStream bak = null;
+    try {
+      cluster = new MiniDFSCluster(conf, 2, true, null);
+      FileSystem srcFs = cluster.getFileSystem();
+      Path root = new Path("/nonexistentfile");
+      bak = System.err;
+      ByteArrayOutputStream out = new ByteArrayOutputStream();
+      PrintStream tmp = new PrintStream(out);
+      System.setErr(tmp);
+      String[] argv = new String[2];
+      argv[0] = "-cat";
+      argv[1] = root.toUri().getPath();
+      int ret = ToolRunner.run(new FsShell(), argv);
+      assertTrue(" -cat returned -1 ", 0>=ret);
+      String returned = out.toString();
+      assertTrue("cat does not print exceptions ",
+          (returned.lastIndexOf("Exception") == -1));
+      out.reset();
+      argv[0] = "-rm";
+      argv[1] = root.toString();
+      FsShell shell = new FsShell();
+      shell.setConf(conf);
+      ret = ToolRunner.run(shell, argv);
+      assertTrue(" -rm returned -1 ", 0>=ret);
+      returned = out.toString();
+      out.reset();
+      assertTrue("rm prints reasonable error ",
+          (returned.lastIndexOf("No such file or directory") != -1));
+      argv[0] = "-rmr";
+      argv[1] = root.toString();
+      ret = ToolRunner.run(shell, argv);
+      assertTrue(" -rmr returned -1", 0>=ret);
+      returned = out.toString();
+      assertTrue("rmr prints reasonable error ",
+    		  (returned.lastIndexOf("No such file or directory") != -1));
+      out.reset();
+      argv[0] = "-du";
+      argv[1] = "/nonexistentfile";
+      ret = ToolRunner.run(shell, argv);
+      returned = out.toString();
+      assertTrue(" -du prints reasonable error ",
+          (returned.lastIndexOf("No such file or directory") != -1));
+      out.reset();
+      argv[0] = "-dus";
+      argv[1] = "/nonexistentfile";
+      ret = ToolRunner.run(shell, argv);
+      returned = out.toString();
+      assertTrue(" -dus prints reasonable error",
+          (returned.lastIndexOf("No such file or directory") != -1));
+      out.reset();
+      argv[0] = "-ls";
+      argv[1] = "/nonexistenfile";
+      ret = ToolRunner.run(shell, argv);
+      returned = out.toString();
+      assertTrue(" -ls does not return Found 0 items",
+          (returned.lastIndexOf("Found 0") == -1));
+      out.reset();
+      argv[0] = "-ls";
+      argv[1] = "/nonexistentfile";
+      ret = ToolRunner.run(shell, argv);
+      assertTrue(" -lsr should fail ",
+          (ret < 0));
+      out.reset();
+      srcFs.mkdirs(new Path("/testdir"));
+      argv[0] = "-ls";
+      argv[1] = "/testdir";
+      ret = ToolRunner.run(shell, argv);
+      returned = out.toString();
+      assertTrue(" -ls does not print out anything ",
+          (returned.lastIndexOf("Found 0") == -1));
+      out.reset();
+      argv[0] = "-ls";
+      argv[1] = "/user/nonxistant/*";
+      ret = ToolRunner.run(shell, argv);
+      assertTrue(" -ls on nonexistent glob returns -1",
+          (ret < 0));
+      out.reset();
+      argv[0] = "-mkdir";
+      argv[1] = "/testdir";
+      ret = ToolRunner.run(shell, argv);
+      returned = out.toString();
+      assertTrue(" -mkdir returned -1 ", (ret < 0));
+      assertTrue(" -mkdir returned File exists", 
+          (returned.lastIndexOf("File exists") != -1));
+      Path testFile = new Path("/testfile");
+      OutputStream outtmp = srcFs.create(testFile);
+      outtmp.write(testFile.toString().getBytes());
+      outtmp.close();
+      out.reset();
+      argv[0] = "-mkdir";
+      argv[1] = "/testfile";
+      ret = ToolRunner.run(shell, argv);
+      returned = out.toString();
+      assertTrue(" -mkdir returned -1", (ret < 0));
+      assertTrue(" -mkdir returned this is a file ",
+          (returned.lastIndexOf("not a directory") != -1));
+      out.reset();
+      argv = new String[3];
+      argv[0] = "-mv";
+      argv[1] = "/testfile";
+      argv[2] = "file";
+      ret = ToolRunner.run(shell, argv);
+      assertTrue("mv failed to rename", ret == -1);
+      out.reset();
+      argv = new String[3];
+      argv[0] = "-mv";
+      argv[1] = "/testfile";
+      argv[2] = "/testfiletest";
+      ret = ToolRunner.run(shell, argv);
+      returned = out.toString();
+      assertTrue("no output from rename", 
+          (returned.lastIndexOf("Renamed") == -1));
+      out.reset();
+      argv[0] = "-mv";
+      argv[1] = "/testfile";
+      argv[2] = "/testfiletmp";
+      ret = ToolRunner.run(shell, argv);
+      returned = out.toString();
+      assertTrue(" unix like output",
+          (returned.lastIndexOf("No such file or") != -1));
+      out.reset();
+      argv = new String[1];
+      argv[0] = "-du";
+      srcFs.mkdirs(srcFs.getHomeDirectory());
+      ret = ToolRunner.run(shell, argv);
+      returned = out.toString();
+      assertTrue(" no error ", (ret == 0));
+      assertTrue("empty path specified",
+          (returned.lastIndexOf("empty string") == -1));
+    } finally {
+      if (bak != null) {
+        System.setErr(bak);
+      }
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+  
+  public void testURIPaths() throws Exception {
+    Configuration srcConf = new Configuration();
+    Configuration dstConf = new Configuration();
+    MiniDFSCluster srcCluster =  null;
+    MiniDFSCluster dstCluster = null;
+    String bak = System.getProperty("test.build.data");
+    try{
+      srcCluster = new MiniDFSCluster(srcConf, 2, true, null);
+      File nameDir = new File(new File(bak), "dfs_tmp_uri/");
+      nameDir.mkdirs();
+      System.setProperty("test.build.data", nameDir.toString());
+      dstCluster = new MiniDFSCluster(dstConf, 2, true, null);
+      FileSystem srcFs = srcCluster.getFileSystem();
+      FileSystem dstFs = dstCluster.getFileSystem();
+      FsShell shell = new FsShell();
+      shell.setConf(srcConf);
+      //check for ls
+      String[] argv = new String[2];
+      argv[0] = "-ls";
+      argv[1] = dstFs.getUri().toString() + "/";
+      int ret = ToolRunner.run(shell, argv);
+      assertTrue("ls works on remote uri ", (ret==0));
+      //check for rm -r 
+      dstFs.mkdirs(new Path("/hadoopdir"));
+      argv = new String[2];
+      argv[0] = "-rmr";
+      argv[1] = dstFs.getUri().toString() + "/hadoopdir";
+      ret = ToolRunner.run(shell, argv);
+      assertTrue("-rmr works on remote uri " + argv[1], (ret==0));
+      //check du 
+      argv[0] = "-du";
+      argv[1] = dstFs.getUri().toString() + "/";
+      ret = ToolRunner.run(shell, argv);
+      assertTrue("du works on remote uri ", (ret ==0));
+      //check put
+      File furi = new File(TEST_ROOT_DIR, "furi");
+      createLocalFile(furi);
+      argv = new String[3];
+      argv[0] = "-put";
+      argv[1] = furi.toString();
+      argv[2] = dstFs.getUri().toString() + "/furi";
+      ret = ToolRunner.run(shell, argv);
+      assertTrue(" put is working ", (ret==0));
+      //check cp 
+      argv[0] = "-cp";
+      argv[1] = dstFs.getUri().toString() + "/furi";
+      argv[2] = srcFs.getUri().toString() + "/furi";
+      ret = ToolRunner.run(shell, argv);
+      assertTrue(" cp is working ", (ret==0));
+      assertTrue(srcFs.exists(new Path("/furi")));
+      //check cat 
+      argv = new String[2];
+      argv[0] = "-cat";
+      argv[1] = dstFs.getUri().toString() + "/furi";
+      ret = ToolRunner.run(shell, argv);
+      assertTrue(" cat is working ", (ret == 0));
+      //check chown
+      dstFs.delete(new Path("/furi"), true);
+      dstFs.delete(new Path("/hadoopdir"), true);
+      String file = "/tmp/chownTest";
+      Path path = new Path(file);
+      Path parent = new Path("/tmp");
+      Path root = new Path("/");
+      TestDFSShell.writeFile(dstFs, path);
+      runCmd(shell, "-chgrp", "-R", "herbivores", dstFs.getUri().toString() +"/*");
+      confirmOwner(null, "herbivores", dstFs, parent, path);
+      runCmd(shell, "-chown", "-R", ":reptiles", dstFs.getUri().toString() + "/");
+      confirmOwner(null, "reptiles", dstFs, root, parent, path);
+      //check if default hdfs:/// works 
+      argv[0] = "-cat";
+      argv[1] = "hdfs:///furi";
+      ret = ToolRunner.run(shell, argv);
+      assertTrue(" default works for cat", (ret == 0));
+      argv[0] = "-ls";
+      argv[1] = "hdfs:///";
+      ret = ToolRunner.run(shell, argv);
+      assertTrue("default works for ls ", (ret == 0));
+      argv[0] = "-rmr";
+      argv[1] = "hdfs:///furi";
+      ret = ToolRunner.run(shell, argv);
+      assertTrue("default works for rm/rmr", (ret ==0));
+    } finally {
+      System.setProperty("test.build.data", bak);
+      if (null != srcCluster) {
+        srcCluster.shutdown();
+      }
+      if (null != dstCluster) {
+        dstCluster.shutdown();
+      }
+    }
+  }
+
+  public void testText() throws Exception {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = null;
+    PrintStream bak = null;
+    try {
+      cluster = new MiniDFSCluster(conf, 2, true, null);
+      FileSystem fs = cluster.getFileSystem();
+      Path root = new Path("/texttest");
+      fs.mkdirs(root);
+      OutputStream zout = new GZIPOutputStream(
+          fs.create(new Path(root, "file.gz")));
+      Random r = new Random();
+      ByteArrayOutputStream file = new ByteArrayOutputStream();
+      for (int i = 0; i < 1024; ++i) {
+        char c = Character.forDigit(r.nextInt(26) + 10, 36);
+        file.write(c);
+        zout.write(c);
+      }
+      zout.close();
+
+      bak = System.out;
+      ByteArrayOutputStream out = new ByteArrayOutputStream();
+      System.setOut(new PrintStream(out));
+
+      String[] argv = new String[2];
+      argv[0] = "-text";
+      argv[1] = new Path(root, "file.gz").toUri().getPath();
+      int ret = ToolRunner.run(new FsShell(), argv);
+      assertTrue("-text returned -1", 0 >= ret);
+      file.reset();
+      out.reset();
+      assertTrue("Output doesn't match input",
+          Arrays.equals(file.toByteArray(), out.toByteArray()));
+
+    } finally {
+      if (null != bak) {
+        System.setOut(bak);
+      }
+      if (null != cluster) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  public void testCopyToLocal() throws IOException {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    FileSystem fs = cluster.getFileSystem();
+    assertTrue("Not a HDFS: "+fs.getUri(),
+               fs instanceof DistributedFileSystem);
+    DistributedFileSystem dfs = (DistributedFileSystem)fs;
+    FsShell shell = new FsShell();
+    shell.setConf(conf);
+
+    try {
+      String root = createTree(dfs, "copyToLocal");
+
+      // Verify copying the tree
+      {
+        try {
+          assertEquals(0,
+              runCmd(shell, "-copyToLocal", root + "*", TEST_ROOT_DIR));
+        } catch (Exception e) {
+          System.err.println("Exception raised from DFSShell.run " +
+                             e.getLocalizedMessage());
+        }
+
+        File localroot = new File(TEST_ROOT_DIR, "copyToLocal");
+        File localroot2 = new File(TEST_ROOT_DIR, "copyToLocal2");        
+        
+        File f1 = new File(localroot, "f1");
+        assertTrue("Copying failed.", f1.isFile());
+
+        File f2 = new File(localroot, "f2");
+        assertTrue("Copying failed.", f2.isFile());
+
+        File sub = new File(localroot, "sub");
+        assertTrue("Copying failed.", sub.isDirectory());
+
+        File f3 = new File(sub, "f3");
+        assertTrue("Copying failed.", f3.isFile());
+
+        File f4 = new File(sub, "f4");
+        assertTrue("Copying failed.", f4.isFile());
+        
+        File f5 = new File(localroot2, "f1");
+        assertTrue("Copying failed.", f5.isFile());        
+
+        f1.delete();
+        f2.delete();
+        f3.delete();
+        f4.delete();
+        f5.delete();
+        sub.delete();
+      }
+      // Verify copying non existing sources do not create zero byte
+      // destination files
+      {
+        String[] args = {"-copyToLocal", "nosuchfile", TEST_ROOT_DIR};
+        try {   
+          assertEquals(-1, shell.run(args));
+        } catch (Exception e) {
+          System.err.println("Exception raised from DFSShell.run " +
+                            e.getLocalizedMessage());
+        }                            
+        File f6 = new File(TEST_ROOT_DIR, "nosuchfile");
+        assertTrue(!f6.exists());
+      }
+    } finally {
+      try {
+        dfs.close();
+      } catch (Exception e) {
+      }
+      cluster.shutdown();
+    }
+  }
+
+  static String createTree(FileSystem fs, String name) throws IOException {
+    // create a tree
+    //   ROOT
+    //   |- f1
+    //   |- f2
+    //   + sub
+    //      |- f3
+    //      |- f4
+    //   ROOT2
+    //   |- f1
+    String path = "/test/" + name;
+    Path root = mkdir(fs, new Path(path));
+    Path sub = mkdir(fs, new Path(root, "sub"));
+    Path root2 = mkdir(fs, new Path(path + "2"));        
+
+    writeFile(fs, new Path(root, "f1"));
+    writeFile(fs, new Path(root, "f2"));
+    writeFile(fs, new Path(sub, "f3"));
+    writeFile(fs, new Path(sub, "f4"));
+    writeFile(fs, new Path(root2, "f1"));
+    mkdir(fs, new Path(root2, "sub"));
+    return path;
+  }
+
+  public void testCount() throws Exception {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
+    FsShell shell = new FsShell();
+    shell.setConf(conf);
+
+    try {
+      String root = createTree(dfs, "count");
+
+      // Verify the counts
+      runCount(root, 2, 4, conf);
+      runCount(root + "2", 2, 1, conf);
+      runCount(root + "2/f1", 0, 1, conf);
+      runCount(root + "2/sub", 1, 0, conf);
+
+      final FileSystem localfs = FileSystem.getLocal(conf);
+      Path localpath = new Path(TEST_ROOT_DIR, "testcount");
+      localpath = localpath.makeQualified(localfs);
+      localfs.mkdirs(localpath);
+      
+      final String localstr = localpath.toString();
+      System.out.println("localstr=" + localstr);
+      runCount(localstr, 1, 0, conf);
+      assertEquals(0, new Count(new String[]{root, localstr}, 0, conf).runAll());
+    } finally {
+      try {
+        dfs.close();
+      } catch (Exception e) {
+      }
+      cluster.shutdown();
+    }
+  }
+  private void runCount(String path, long dirs, long files, Configuration conf
+    ) throws IOException {
+    ByteArrayOutputStream bytes = new ByteArrayOutputStream(); 
+    PrintStream out = new PrintStream(bytes);
+    PrintStream oldOut = System.out;
+    System.setOut(out);
+    Scanner in = null;
+    String results = null;
+    try {
+      new Count(new String[]{path}, 0, conf).runAll();
+      results = bytes.toString();
+      in = new Scanner(results);
+      assertEquals(dirs, in.nextLong());
+      assertEquals(files, in.nextLong());
+    } finally {
+      if (in!=null) in.close();
+      IOUtils.closeStream(out);
+      System.setOut(oldOut);
+      System.out.println("results:\n" + results);
+    }
+  }
+
+  //throws IOException instead of Exception as shell.run() does.
+  private int runCmd(FsShell shell, String... args) throws IOException {
+    try {
+      return shell.run(args);
+    } catch (IOException e) {
+      throw e;
+    } catch (RuntimeException e) {
+      throw e;
+    } catch (Exception e) {
+      throw new IOException(StringUtils.stringifyException(e));
+    }
+  }
+  
+  /**
+   * Test chmod.
+   */
+  void testChmod(Configuration conf, FileSystem fs, String chmodDir) 
+                                                    throws IOException {
+    FsShell shell = new FsShell();
+    shell.setConf(conf);
+    
+    try {
+     //first make dir
+     Path dir = new Path(chmodDir);
+     fs.delete(dir, true);
+     fs.mkdirs(dir);
+
+     confirmPermissionChange(/* Setting */ "u+rwx,g=rw,o-rwx",
+                             /* Should give */ "rwxrw----", fs, shell, dir);
+     
+     //create an empty file
+     Path file = new Path(chmodDir, "file");
+     TestDFSShell.writeFile(fs, file);
+
+     //test octal mode
+     confirmPermissionChange( "644", "rw-r--r--", fs, shell, file);
+
+     //test recursive
+     runCmd(shell, "-chmod", "-R", "a+rwX", chmodDir);
+     assertEquals("rwxrwxrwx",
+                  fs.getFileStatus(dir).getPermission().toString()); 
+     assertEquals("rw-rw-rw-",
+                  fs.getFileStatus(file).getPermission().toString());
+
+     // test sticky bit on directories
+     Path dir2 = new Path(dir, "stickybit" );
+     fs.mkdirs(dir2 );
+     LOG.info("Testing sticky bit on: " + dir2);
+     LOG.info("Sticky bit directory initial mode: " + 
+                   fs.getFileStatus(dir2).getPermission());
+     
+     confirmPermissionChange("u=rwx,g=rx,o=rx", "rwxr-xr-x", fs, shell, dir2);
+     
+     confirmPermissionChange("+t", "rwxr-xr-t", fs, shell, dir2);
+
+     confirmPermissionChange("-t", "rwxr-xr-x", fs, shell, dir2);
+
+     confirmPermissionChange("=t", "--------T", fs, shell, dir2);
+
+     confirmPermissionChange("0000", "---------", fs, shell, dir2);
+
+     confirmPermissionChange("1666", "rw-rw-rwT", fs, shell, dir2);
+
+     confirmPermissionChange("777", "rwxrwxrwt", fs, shell, dir2);
+     
+     fs.delete(dir2, true);
+     fs.delete(dir, true);
+     
+    } finally {
+      try {
+        fs.close();
+        shell.close();
+      } catch (IOException ignored) {}
+    }
+  }
+
+  // Apply a new permission to a path and confirm that the new permission
+  // is the one you were expecting
+  private void confirmPermissionChange(String toApply, String expected,
+      FileSystem fs, FsShell shell, Path dir2) throws IOException {
+    LOG.info("Confirming permission change of " + toApply + " to " + expected);
+    runCmd(shell, "-chmod", toApply, dir2.toString());
+
+    String result = fs.getFileStatus(dir2).getPermission().toString();
+
+    LOG.info("Permission change result: " + result);
+    assertEquals(expected, result);
+  }
+   
+  private void confirmOwner(String owner, String group, 
+                            FileSystem fs, Path... paths) throws IOException {
+    for(Path path : paths) {
+      if (owner != null) {
+        assertEquals(owner, fs.getFileStatus(path).getOwner());
+      }
+      if (group != null) {
+        assertEquals(group, fs.getFileStatus(path).getGroup());
+      }
+    }
+  }
+  
+  public void testFilePermissions() throws IOException {
+    Configuration conf = new Configuration();
+    
+    //test chmod on local fs
+    FileSystem fs = FileSystem.getLocal(conf);
+    testChmod(conf, fs, 
+              (new File(TEST_ROOT_DIR, "chmodTest")).getAbsolutePath());
+    
+    conf.set("dfs.permissions", "true");
+    
+    //test chmod on DFS
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    fs = cluster.getFileSystem();
+    testChmod(conf, fs, "/tmp/chmodTest");
+    
+    // test chown and chgrp on DFS:
+    
+    FsShell shell = new FsShell();
+    shell.setConf(conf);
+    fs = cluster.getFileSystem();
+    
+    /* For dfs, I am the super user and I can change owner of any file to
+     * anything. "-R" option is already tested by chmod test above.
+     */
+    
+    String file = "/tmp/chownTest";
+    Path path = new Path(file);
+    Path parent = new Path("/tmp");
+    Path root = new Path("/");
+    TestDFSShell.writeFile(fs, path);
+    
+    runCmd(shell, "-chgrp", "-R", "herbivores", "/*", "unknownFile*");
+    confirmOwner(null, "herbivores", fs, parent, path);
+    
+    runCmd(shell, "-chgrp", "mammals", file);
+    confirmOwner(null, "mammals", fs, path);
+    
+    runCmd(shell, "-chown", "-R", ":reptiles", "/");
+    confirmOwner(null, "reptiles", fs, root, parent, path);
+    
+    runCmd(shell, "-chown", "python:", "/nonExistentFile", file);
+    confirmOwner("python", "reptiles", fs, path);
+
+    runCmd(shell, "-chown", "-R", "hadoop:toys", "unknownFile", "/");
+    confirmOwner("hadoop", "toys", fs, root, parent, path);
+    
+    // Test different characters in names
+
+    runCmd(shell, "-chown", "hdfs.user", file);
+    confirmOwner("hdfs.user", null, fs, path);
+    
+    runCmd(shell, "-chown", "_Hdfs.User-10:_hadoop.users--", file);
+    confirmOwner("_Hdfs.User-10", "_hadoop.users--", fs, path);
+    
+    runCmd(shell, "-chown", "hdfs/hadoop-core@apache.org:asf-projects", file);
+    confirmOwner("hdfs/hadoop-core@apache.org", "asf-projects", fs, path);
+    
+    runCmd(shell, "-chgrp", "hadoop-core@apache.org/100", file);
+    confirmOwner(null, "hadoop-core@apache.org/100", fs, path);
+    
+    cluster.shutdown();
+  }
+  /**
+   * Tests various options of DFSShell.
+   */
+  public void testDFSShell() throws IOException {
+    Configuration conf = new Configuration();
+    /* This tests some properties of ChecksumFileSystem as well.
+     * Make sure that we create ChecksumDFS */
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    FileSystem fs = cluster.getFileSystem();
+    assertTrue("Not a HDFS: "+fs.getUri(),
+            fs instanceof DistributedFileSystem);
+    DistributedFileSystem fileSys = (DistributedFileSystem)fs;
+    FsShell shell = new FsShell();
+    shell.setConf(conf);
+
+    try {
+      // First create a new directory with mkdirs
+      Path myPath = new Path("/test/mkdirs");
+      assertTrue(fileSys.mkdirs(myPath));
+      assertTrue(fileSys.exists(myPath));
+      assertTrue(fileSys.mkdirs(myPath));
+
+      // Second, create a file in that directory.
+      Path myFile = new Path("/test/mkdirs/myFile");
+      writeFile(fileSys, myFile);
+      assertTrue(fileSys.exists(myFile));
+      Path myFile2 = new Path("/test/mkdirs/myFile2");      
+      writeFile(fileSys, myFile2);
+      assertTrue(fileSys.exists(myFile2));
+
+      // Verify that rm with a pattern
+      {
+        String[] args = new String[2];
+        args[0] = "-rm";
+        args[1] = "/test/mkdirs/myFile*";
+        int val = -1;
+        try {
+          val = shell.run(args);
+        } catch (Exception e) {
+          System.err.println("Exception raised from DFSShell.run " +
+                             e.getLocalizedMessage()); 
+        }
+        assertTrue(val == 0);
+        assertFalse(fileSys.exists(myFile));
+        assertFalse(fileSys.exists(myFile2));
+
+        //re-create the files for other tests
+        writeFile(fileSys, myFile);
+        assertTrue(fileSys.exists(myFile));
+        writeFile(fileSys, myFile2);
+        assertTrue(fileSys.exists(myFile2));
+      }
+
+      // Verify that we can read the file
+      {
+        String[] args = new String[3];
+        args[0] = "-cat";
+        args[1] = "/test/mkdirs/myFile";
+        args[2] = "/test/mkdirs/myFile2";        
+        int val = -1;
+        try {
+          val = shell.run(args);
+        } catch (Exception e) {
+          System.err.println("Exception raised from DFSShell.run: " +
+                             StringUtils.stringifyException(e)); 
+        }
+        assertTrue(val == 0);
+      }
+      fileSys.delete(myFile2, true);
+
+      // Verify that we get an error while trying to read an nonexistent file
+      {
+        String[] args = new String[2];
+        args[0] = "-cat";
+        args[1] = "/test/mkdirs/myFile1";
+        int val = -1;
+        try {
+          val = shell.run(args);
+        } catch (Exception e) {
+          System.err.println("Exception raised from DFSShell.run " +
+                             e.getLocalizedMessage()); 
+        }
+        assertTrue(val != 0);
+      }
+
+      // Verify that we get an error while trying to delete an nonexistent file
+      {
+        String[] args = new String[2];
+        args[0] = "-rm";
+        args[1] = "/test/mkdirs/myFile1";
+        int val = -1;
+        try {
+          val = shell.run(args);
+        } catch (Exception e) {
+          System.err.println("Exception raised from DFSShell.run " +
+                             e.getLocalizedMessage()); 
+        }
+        assertTrue(val != 0);
+      }
+
+      // Verify that we succeed in removing the file we created
+      {
+        String[] args = new String[2];
+        args[0] = "-rm";
+        args[1] = "/test/mkdirs/myFile";
+        int val = -1;
+        try {
+          val = shell.run(args);
+        } catch (Exception e) {
+          System.err.println("Exception raised from DFSShell.run " +
+                             e.getLocalizedMessage()); 
+        }
+        assertTrue(val == 0);
+      }
+
+      // Verify touch/test
+      {
+        String[] args = new String[2];
+        args[0] = "-touchz";
+        args[1] = "/test/mkdirs/noFileHere";
+        int val = -1;
+        try {
+          val = shell.run(args);
+        } catch (Exception e) {
+          System.err.println("Exception raised from DFSShell.run " +
+                             e.getLocalizedMessage());
+        }
+        assertTrue(val == 0);
+
+        args = new String[3];
+        args[0] = "-test";
+        args[1] = "-e";
+        args[2] = "/test/mkdirs/noFileHere";
+        val = -1;
+        try {
+          val = shell.run(args);
+        } catch (Exception e) {
+          System.err.println("Exception raised from DFSShell.run " +
+                             e.getLocalizedMessage());
+        }
+        assertTrue(val == 0);
+      }
+
+      // Verify that cp from a directory to a subdirectory fails
+      {
+        String[] args = new String[2];
+        args[0] = "-mkdir";
+        args[1] = "/test/dir1";
+        int val = -1;
+        try {
+          val = shell.run(args);
+        } catch (Exception e) {
+          System.err.println("Exception raised from DFSShell.run " +
+                             e.getLocalizedMessage());
+        }
+        assertTrue(val == 0);
+
+        // this should fail
+        String[] args1 = new String[3];
+        args1[0] = "-cp";
+        args1[1] = "/test/dir1";
+        args1[2] = "/test/dir1/dir2";
+        val = 0;
+        try {
+          val = shell.run(args1);
+        } catch (Exception e) {
+          System.err.println("Exception raised from DFSShell.run " +
+                             e.getLocalizedMessage());
+        }
+        assertTrue(val == -1);
+
+        // this should succeed
+        args1[0] = "-cp";
+        args1[1] = "/test/dir1";
+        args1[2] = "/test/dir1foo";
+        val = -1;
+        try {
+          val = shell.run(args1);
+        } catch (Exception e) {
+          System.err.println("Exception raised from DFSShell.run " +
+                             e.getLocalizedMessage());
+        }
+        assertTrue(val == 0);
+      }
+        
+    } finally {
+      try {
+        fileSys.close();
+      } catch (Exception e) {
+      }
+      cluster.shutdown();
+    }
+  }
+
+  static List<File> getBlockFiles(MiniDFSCluster cluster) throws IOException {
+    List<File> files = new ArrayList<File>();
+    List<DataNode> datanodes = cluster.getDataNodes();
+    Block[][] blocks = cluster.getAllBlockReports();
+    for(int i = 0; i < blocks.length; i++) {
+      FSDataset ds = (FSDataset)datanodes.get(i).getFSDataset();
+      for(Block b : blocks[i]) {
+        files.add(ds.getBlockFile(b));
+      }        
+    }
+    return files;
+  }
+
+  static void corrupt(List<File> files) throws IOException {
+    for(File f : files) {
+      StringBuilder content = new StringBuilder(DFSTestUtil.readFile(f));
+      char c = content.charAt(0);
+      content.setCharAt(0, ++c);
+      PrintWriter out = new PrintWriter(f);
+      out.print(content);
+      out.flush();
+      out.close();      
+    }
+  }
+
+  static interface TestGetRunner {
+    String run(int exitcode, String... options) throws IOException;
+  }
+
+  public void testRemoteException() throws Exception {
+    UnixUserGroupInformation tmpUGI = new UnixUserGroupInformation("tmpname",
+        new String[] {
+        "mygroup"});
+    MiniDFSCluster dfs = null;
+    PrintStream bak = null;
+    try {
+      Configuration conf = new Configuration();
+      dfs = new MiniDFSCluster(conf, 2, true, null);
+      FileSystem fs = dfs.getFileSystem();
+      Path p = new Path("/foo");
+      fs.mkdirs(p);
+      fs.setPermission(p, new FsPermission((short)0700));
+      UnixUserGroupInformation.saveToConf(conf,
+          UnixUserGroupInformation.UGI_PROPERTY_NAME, tmpUGI);
+      FsShell fshell = new FsShell(conf);
+      bak = System.err;
+      ByteArrayOutputStream out = new ByteArrayOutputStream();
+      PrintStream tmp = new PrintStream(out);
+      System.setErr(tmp);
+      String[] args = new String[2];
+      args[0] = "-ls";
+      args[1] = "/foo";
+      int ret = ToolRunner.run(fshell, args);
+      assertTrue("returned should be -1", (ret == -1));
+      String str = out.toString();
+      assertTrue("permission denied printed", str.indexOf("Permission denied") != -1);
+      out.reset();
+    } finally {
+      if (bak != null) {
+        System.setErr(bak);
+      }
+      if (dfs != null) {
+        dfs.shutdown();
+      }
+    }
+  }
+  
+  public void testGet() throws IOException {
+    DFSTestUtil.setLogLevel2All(FSInputChecker.LOG);
+    final Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
+
+    try {
+      final String fname = "testGet.txt";
+      final File localf = createLocalFile(new File(TEST_ROOT_DIR, fname));
+      final String localfcontent = DFSTestUtil.readFile(localf);
+      final Path root = mkdir(dfs, new Path("/test/get"));
+      final Path remotef = new Path(root, fname);
+      dfs.copyFromLocalFile(false, false, new Path(localf.getPath()), remotef);
+
+      final FsShell shell = new FsShell();
+      shell.setConf(conf);
+      TestGetRunner runner = new TestGetRunner() {
+        private int count = 0;
+
+        public String run(int exitcode, String... options) throws IOException {
+          String dst = TEST_ROOT_DIR + "/" + fname+ ++count;
+          String[] args = new String[options.length + 3];
+          args[0] = "-get"; 
+          args[args.length - 2] = remotef.toString();
+          args[args.length - 1] = dst;
+          for(int i = 0; i < options.length; i++) {
+            args[i + 1] = options[i];
+          }
+          show("args=" + Arrays.asList(args));
+          
+          try {
+            assertEquals(exitcode, shell.run(args));
+          } catch (Exception e) {
+            assertTrue(StringUtils.stringifyException(e), false); 
+          }
+          return exitcode == 0? DFSTestUtil.readFile(new File(dst)): null; 
+        }
+      };
+
+      assertEquals(localfcontent, runner.run(0));
+      assertEquals(localfcontent, runner.run(0, "-ignoreCrc"));
+
+      //find and modify the block files
+      List<File> files = getBlockFiles(cluster);
+      show("files=" + files);
+      corrupt(files);
+
+      assertEquals(null, runner.run(-1));
+      String corruptedcontent = runner.run(0, "-ignoreCrc");
+      assertEquals(localfcontent.substring(1), corruptedcontent.substring(1));
+      assertEquals(localfcontent.charAt(0)+1, corruptedcontent.charAt(0));
+
+      localf.delete();
+    } finally {
+      try {dfs.close();} catch (Exception e) {}
+      cluster.shutdown();
+    }
+  }
+
+  public void testLsr() throws Exception {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
+
+    try {
+      final String root = createTree(dfs, "lsr");
+      dfs.mkdirs(new Path(root, "zzz"));
+      
+      runLsr(new FsShell(conf), root, 0);
+      
+      final Path sub = new Path(root, "sub");
+      dfs.setPermission(sub, new FsPermission((short)0));
+
+      final UserGroupInformation ugi = UserGroupInformation.getCurrentUGI();
+      final String tmpusername = ugi.getUserName() + "1";
+      UnixUserGroupInformation tmpUGI = new UnixUserGroupInformation(
+          tmpusername, new String[] {tmpusername});
+      UnixUserGroupInformation.saveToConf(conf,
+            UnixUserGroupInformation.UGI_PROPERTY_NAME, tmpUGI);
+      String results = runLsr(new FsShell(conf), root, -1);
+      assertTrue(results.contains("zzz"));
+    } finally {
+      cluster.shutdown();
+    }
+  }
+  private static String runLsr(final FsShell shell, String root, int returnvalue
+      ) throws Exception {
+    System.out.println("root=" + root + ", returnvalue=" + returnvalue);
+    final ByteArrayOutputStream bytes = new ByteArrayOutputStream(); 
+    final PrintStream out = new PrintStream(bytes);
+    final PrintStream oldOut = System.out;
+    final PrintStream oldErr = System.err;
+    System.setOut(out);
+    System.setErr(out);
+    final String results;
+    try {
+      assertEquals(returnvalue, shell.run(new String[]{"-lsr", root}));
+      results = bytes.toString();
+    } finally {
+      IOUtils.closeStream(out);
+      System.setOut(oldOut);
+      System.setErr(oldErr);
+    }
+    System.out.println("results:\n" + results);
+    return results;
+  }
+}

+ 122 - 0
src/test/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java

@@ -0,0 +1,122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.PrintWriter;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.util.ToolRunner;
+
+public class TestDFSShellGenericOptions extends TestCase {
+
+  public void testDFSCommand() throws IOException {
+    String namenode = null;
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new Configuration();
+      cluster = new MiniDFSCluster(conf, 1, true, null);
+      namenode = FileSystem.getDefaultUri(conf).toString();
+      String [] args = new String[4];
+      args[2] = "-mkdir";
+      args[3] = "/data";
+      testFsOption(args, namenode);
+      testConfOption(args, namenode);
+      testPropertyOption(args, namenode);
+    } finally {
+      if (cluster != null) { cluster.shutdown(); }
+    }
+  }
+
+  private void testFsOption(String [] args, String namenode) {        
+    // prepare arguments to create a directory /data
+    args[0] = "-fs";
+    args[1] = namenode;
+    execute(args, namenode);
+  }
+    
+  private void testConfOption(String[] args, String namenode) {
+    // prepare configuration hdfs-site.xml
+    File configDir = new File(new File("build", "test"), "minidfs");
+    assertTrue(configDir.mkdirs());
+    File siteFile = new File(configDir, "hdfs-site.xml");
+    PrintWriter pw;
+    try {
+      pw = new PrintWriter(siteFile);
+      pw.print("<?xml version=\"1.0\"?>\n"+
+               "<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>\n"+
+               "<configuration>\n"+
+               " <property>\n"+
+               "   <name>fs.default.name</name>\n"+
+               "   <value>"+namenode+"</value>\n"+
+               " </property>\n"+
+               "</configuration>\n");
+      pw.close();
+    
+      // prepare arguments to create a directory /data
+      args[0] = "-conf";
+      args[1] = siteFile.getPath();
+      execute(args, namenode); 
+    } catch (FileNotFoundException e) {
+      e.printStackTrace();
+    } finally {
+      siteFile.delete();
+      configDir.delete();
+    }
+  }
+    
+  private void testPropertyOption(String[] args, String namenode) {
+    // prepare arguments to create a directory /data
+    args[0] = "-D";
+    args[1] = "fs.default.name="+namenode;
+    execute(args, namenode);        
+  }
+    
+  private void execute(String [] args, String namenode) {
+    FsShell shell=new FsShell();
+    FileSystem fs=null;
+    try {
+      ToolRunner.run(shell, args);
+      fs = new DistributedFileSystem(NameNode.getAddress(namenode), 
+                                     shell.getConf());
+      assertTrue("Directory does not get created", 
+                 fs.isDirectory(new Path("/data")));
+      fs.delete(new Path("/data"), true);
+    } catch (Exception e) {
+      System.err.println(e.getMessage());
+      e.printStackTrace();
+    } finally {
+      if (fs!=null) {
+        try {
+          fs.close();
+        } catch (IOException ignored) {
+        }
+      }
+    }
+  }
+
+}

+ 213 - 0
src/test/org/apache/hadoop/hdfs/TestDFSStartupVersions.java

@@ -0,0 +1,213 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.File;
+import junit.framework.TestCase;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+
+import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE;
+import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE;
+
+import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.fs.Path;
+
+/**
+ * This test ensures the appropriate response (successful or failure) from 
+ * a Datanode when the system is started with differing version combinations. 
+ */
+public class TestDFSStartupVersions extends TestCase {
+  
+  private static final Log LOG = LogFactory.getLog(
+                                                   "org.apache.hadoop.hdfs.TestDFSStartupVersions");
+  private static Path TEST_ROOT_DIR = new Path(
+                                               System.getProperty("test.build.data","/tmp").toString().replace(' ', '+'));
+  private MiniDFSCluster cluster = null;
+  
+  /**
+   * Writes an INFO log message containing the parameters.
+   */
+  void log(String label, NodeType nodeType, Integer testCase, StorageInfo version) {
+    String testCaseLine = "";
+    if (testCase != null) {
+      testCaseLine = " testCase="+testCase;
+    }
+    LOG.info("============================================================");
+    LOG.info("***TEST*** " + label + ":"
+             + testCaseLine
+             + " nodeType="+nodeType
+             + " layoutVersion="+version.getLayoutVersion()
+             + " namespaceID="+version.getNamespaceID()
+             + " fsscTime="+version.getCTime());
+  }
+  
+  /**
+   * Initialize the versions array.  This array stores all combinations 
+   * of cross product:
+   *  {oldLayoutVersion,currentLayoutVersion,futureLayoutVersion} X
+   *    {currentNamespaceId,incorrectNamespaceId} X
+   *      {pastFsscTime,currentFsscTime,futureFsscTime}
+   */
+  private StorageInfo[] initializeVersions() throws Exception {
+    int layoutVersionOld = Storage.LAST_UPGRADABLE_LAYOUT_VERSION;
+    int layoutVersionCur = UpgradeUtilities.getCurrentLayoutVersion();
+    int layoutVersionNew = Integer.MIN_VALUE;
+    int namespaceIdCur = UpgradeUtilities.getCurrentNamespaceID(null);
+    int namespaceIdOld = Integer.MIN_VALUE;
+    long fsscTimeOld = Long.MIN_VALUE;
+    long fsscTimeCur = UpgradeUtilities.getCurrentFsscTime(null);
+    long fsscTimeNew = Long.MAX_VALUE;
+    
+    return new StorageInfo[] {
+      new StorageInfo(layoutVersionOld, namespaceIdCur, fsscTimeOld), // 0
+      new StorageInfo(layoutVersionOld, namespaceIdCur, fsscTimeCur), // 1
+      new StorageInfo(layoutVersionOld, namespaceIdCur, fsscTimeNew), // 2
+      new StorageInfo(layoutVersionOld, namespaceIdOld, fsscTimeOld), // 3
+      new StorageInfo(layoutVersionOld, namespaceIdOld, fsscTimeCur), // 4
+      new StorageInfo(layoutVersionOld, namespaceIdOld, fsscTimeNew), // 5
+      new StorageInfo(layoutVersionCur, namespaceIdCur, fsscTimeOld), // 6
+      new StorageInfo(layoutVersionCur, namespaceIdCur, fsscTimeCur), // 7
+      new StorageInfo(layoutVersionCur, namespaceIdCur, fsscTimeNew), // 8
+      new StorageInfo(layoutVersionCur, namespaceIdOld, fsscTimeOld), // 9
+      new StorageInfo(layoutVersionCur, namespaceIdOld, fsscTimeCur), // 10
+      new StorageInfo(layoutVersionCur, namespaceIdOld, fsscTimeNew), // 11
+      new StorageInfo(layoutVersionNew, namespaceIdCur, fsscTimeOld), // 12
+      new StorageInfo(layoutVersionNew, namespaceIdCur, fsscTimeCur), // 13
+      new StorageInfo(layoutVersionNew, namespaceIdCur, fsscTimeNew), // 14
+      new StorageInfo(layoutVersionNew, namespaceIdOld, fsscTimeOld), // 15
+      new StorageInfo(layoutVersionNew, namespaceIdOld, fsscTimeCur), // 16
+      new StorageInfo(layoutVersionNew, namespaceIdOld, fsscTimeNew), // 17
+    };
+  }
+  
+  /**
+   * Determines if the given Namenode version and Datanode version
+   * are compatible with each other. Compatibility in this case mean
+   * that the Namenode and Datanode will successfully start up and
+   * will work together. The rules for compatibility,
+   * taken from the DFS Upgrade Design, are as follows:
+   * <pre>
+   * 1. The data-node does regular startup (no matter which options 
+   *    it is started with) if
+   *       softwareLV == storedLV AND 
+   *       DataNode.FSSCTime == NameNode.FSSCTime
+   * 2. The data-node performs an upgrade if it is started without any 
+   *    options and
+   *       |softwareLV| > |storedLV| OR 
+   *       (softwareLV == storedLV AND
+   *        DataNode.FSSCTime < NameNode.FSSCTime)
+   * 3. NOT TESTED: The data-node rolls back if it is started with
+   *    the -rollback option and
+   *       |softwareLV| >= |previous.storedLV| AND 
+   *       DataNode.previous.FSSCTime <= NameNode.FSSCTime
+   * 4. In all other cases the startup fails.
+   * </pre>
+   */
+  boolean isVersionCompatible(StorageInfo namenodeVer, StorageInfo datanodeVer) {
+    // check #0
+    if (namenodeVer.getNamespaceID() != datanodeVer.getNamespaceID()) {
+      LOG.info("namespaceIDs are not equal: isVersionCompatible=false");
+      return false;
+    }
+    // check #1
+    int softwareLV = FSConstants.LAYOUT_VERSION;  // will also be Namenode's LV
+    int storedLV = datanodeVer.getLayoutVersion();
+    if (softwareLV == storedLV &&  
+        datanodeVer.getCTime() == namenodeVer.getCTime()) 
+      {
+        LOG.info("layoutVersions and cTimes are equal: isVersionCompatible=true");
+        return true;
+      }
+    // check #2
+    long absSoftwareLV = Math.abs((long)softwareLV);
+    long absStoredLV = Math.abs((long)storedLV);
+    if (absSoftwareLV > absStoredLV ||
+        (softwareLV == storedLV &&
+         datanodeVer.getCTime() < namenodeVer.getCTime())) 
+      {
+        LOG.info("softwareLayoutVersion is newer OR namenode cTime is newer: isVersionCompatible=true");
+        return true;
+      }
+    // check #4
+    LOG.info("default case: isVersionCompatible=false");
+    return false;
+  }
+  
+  /**
+   * This test ensures the appropriate response (successful or failure) from 
+   * a Datanode when the system is started with differing version combinations. 
+   * <pre>
+   * For each 3-tuple in the cross product
+   *   ({oldLayoutVersion,currentLayoutVersion,futureLayoutVersion},
+   *    {currentNamespaceId,incorrectNamespaceId},
+   *    {pastFsscTime,currentFsscTime,futureFsscTime})
+   *      1. Startup Namenode with version file containing 
+   *         (currentLayoutVersion,currentNamespaceId,currentFsscTime)
+   *      2. Attempt to startup Datanode with version file containing 
+   *         this iterations version 3-tuple
+   * </pre>
+   */
+  public void testVersions() throws Exception {
+    UpgradeUtilities.initialize();
+    Configuration conf = UpgradeUtilities.initializeStorageStateConf(1, 
+                                                      new Configuration());
+    StorageInfo[] versions = initializeVersions();
+    UpgradeUtilities.createStorageDirs(
+                                       NAME_NODE, conf.getStrings("dfs.name.dir"), "current");
+    cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
+    StorageInfo nameNodeVersion = new StorageInfo(
+                                                  UpgradeUtilities.getCurrentLayoutVersion(),
+                                                  UpgradeUtilities.getCurrentNamespaceID(cluster),
+                                                  UpgradeUtilities.getCurrentFsscTime(cluster));
+    log("NameNode version info", NAME_NODE, null, nameNodeVersion);
+    for (int i = 0; i < versions.length; i++) {
+      File[] storage = UpgradeUtilities.createStorageDirs(
+                                                          DATA_NODE, conf.getStrings("dfs.data.dir"), "current");
+      log("DataNode version info", DATA_NODE, i, versions[i]);
+      UpgradeUtilities.createVersionFile(DATA_NODE, storage, versions[i]);
+      try {
+        cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
+      } catch (Exception ignore) {
+        // Ignore.  The asserts below will check for problems.
+        // ignore.printStackTrace();
+      }
+      assertTrue(cluster.getNameNode() != null);
+      assertEquals(isVersionCompatible(nameNodeVersion, versions[i]),
+                   cluster.isDataNodeUp());
+      cluster.shutdownDataNodes();
+    }
+  }
+  
+  protected void tearDown() throws Exception {
+    LOG.info("Shutting down MiniDFSCluster");
+    if (cluster != null) cluster.shutdown();
+  }
+  
+  public static void main(String[] args) throws Exception {
+    new TestDFSStartupVersions().testVersions();
+  }
+  
+}
+

+ 249 - 0
src/test/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java

@@ -0,0 +1,249 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.hdfs;
+
+import java.io.File;
+import java.io.IOException;
+import junit.framework.TestCase;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+
+import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE;
+import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE;
+
+/**
+* This test ensures the appropriate response (successful or failure) from
+* the system when the system is started under various storage state and
+* version conditions.
+*/
+public class TestDFSStorageStateRecovery extends TestCase {
+ 
+  private static final Log LOG = LogFactory.getLog(
+                                                   "org.apache.hadoop.hdfs.TestDFSStorageStateRecovery");
+  private Configuration conf = null;
+  private int testCounter = 0;
+  private MiniDFSCluster cluster = null;
+  
+  /**
+   * The test case table.  Each row represents a test case.  This table is
+   * taken from the table in Apendix A of the HDFS Upgrade Test Plan
+   * (TestPlan-HdfsUpgrade.html) attached to
+   * http://issues.apache.org/jira/browse/HADOOP-702
+   * The column meanings are:
+   *  0) current directory exists
+   *  1) previous directory exists
+   *  2) previous.tmp directory exists
+   *  3) removed.tmp directory exists
+   *  4) node should recover and startup
+   *  5) current directory should exist after recovery but before startup
+   *  6) previous directory should exist after recovery but before startup
+   */
+  static boolean[][] testCases = new boolean[][] {
+    new boolean[] {true,  false, false, false, true,  true,  false}, // 1
+    new boolean[] {true,  true,  false, false, true,  true,  true }, // 2
+    new boolean[] {true,  false, true,  false, true,  true,  true }, // 3
+    new boolean[] {true,  true,  true,  true,  false, false, false }, // 4
+    new boolean[] {true,  true,  true,  false, false, false, false }, // 4
+    new boolean[] {false, true,  true,  true,  false, false, false }, // 4
+    new boolean[] {false, true,  true,  false, false, false, false }, // 4
+    new boolean[] {false, false, false, false, false, false, false }, // 5
+    new boolean[] {false, true,  false, false, false, false, false }, // 6
+    new boolean[] {false, false, true,  false, true,  true,  false}, // 7
+    new boolean[] {true,  false, false, true,  true,  true,  false}, // 8
+    new boolean[] {true,  true,  false, true,  false, false, false }, // 9
+    new boolean[] {true,  true,  true,  true,  false, false, false }, // 10
+    new boolean[] {true,  false, true,  true,  false, false, false }, // 10
+    new boolean[] {false, true,  true,  true,  false, false, false }, // 10
+    new boolean[] {false, false, true,  true,  false, false, false }, // 10
+    new boolean[] {false, false, false, true,  false, false, false }, // 11
+    new boolean[] {false, true,  false, true,  true,  true,  true }, // 12
+  };
+  
+  /**
+   * Writes an INFO log message containing the parameters. Only
+   * the first 4 elements of the state array are included in the message.
+   */
+  void log(String label, int numDirs, int testCaseNum, boolean[] state) {
+    LOG.info("============================================================");
+    LOG.info("***TEST " + (testCounter++) + "*** " 
+             + label + ":"
+             + " numDirs="+numDirs
+             + " testCase="+testCaseNum
+             + " current="+state[0]
+             + " previous="+state[1]
+             + " previous.tmp="+state[2]
+             + " removed.tmp="+state[3]);
+  }
+  
+  /**
+   * Sets up the storage directories for the given node type, either
+   * dfs.name.dir or dfs.data.dir. For each element in dfs.name.dir or
+   * dfs.data.dir, the subdirectories represented by the first four elements 
+   * of the <code>state</code> array will be created and populated.
+   * See UpgradeUtilities.createStorageDirs().
+   * 
+   * @param nodeType
+   *   the type of node that storage should be created for. Based on this
+   *   parameter either dfs.name.dir or dfs.data.dir is used from the global conf.
+   * @param state
+   *   a row from the testCases table which indicates which directories
+   *   to setup for the node
+   * @return file paths representing either dfs.name.dir or dfs.data.dir
+   *   directories
+   */
+  String[] createStorageState(NodeType nodeType, boolean[] state) throws Exception {
+    String[] baseDirs = (nodeType == NAME_NODE ?
+                         conf.getStrings("dfs.name.dir") :
+                         conf.getStrings("dfs.data.dir"));
+    UpgradeUtilities.createEmptyDirs(baseDirs);
+    if (state[0])  // current
+      UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "current");
+    if (state[1])  // previous
+      UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "previous");
+    if (state[2])  // previous.tmp
+      UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "previous.tmp");
+    if (state[3])  // removed.tmp
+      UpgradeUtilities.createStorageDirs(nodeType, baseDirs, "removed.tmp");
+    return baseDirs;
+  }
+ 
+  /**
+   * Verify that the current and/or previous exist as indicated by 
+   * the method parameters.  If previous exists, verify that
+   * it hasn't been modified by comparing the checksum of all it's
+   * containing files with their original checksum.  It is assumed that
+   * the server has recovered.
+   */
+  void checkResult(NodeType nodeType, String[] baseDirs, 
+                   boolean currentShouldExist, boolean previousShouldExist) 
+    throws IOException
+  {
+    switch (nodeType) {
+    case NAME_NODE:
+      if (currentShouldExist) {
+        for (int i = 0; i < baseDirs.length; i++) {
+          assertTrue(new File(baseDirs[i],"current").isDirectory());
+          assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
+          assertTrue(new File(baseDirs[i],"current/edits").isFile());
+          assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
+          assertTrue(new File(baseDirs[i],"current/fstime").isFile());
+        }
+      }
+      break;
+    case DATA_NODE:
+      if (currentShouldExist) {
+        for (int i = 0; i < baseDirs.length; i++) {
+          assertEquals(
+                       UpgradeUtilities.checksumContents(
+                                                         nodeType, new File(baseDirs[i],"current")),
+                       UpgradeUtilities.checksumMasterContents(nodeType));
+        }
+      }
+      break;
+    }
+    if (previousShouldExist) {
+      for (int i = 0; i < baseDirs.length; i++) {
+        assertTrue(new File(baseDirs[i],"previous").isDirectory());
+        assertEquals(
+                     UpgradeUtilities.checksumContents(
+                                                       nodeType, new File(baseDirs[i],"previous")),
+                     UpgradeUtilities.checksumMasterContents(nodeType));
+      }
+    }
+  }
+ 
+  /**
+   * This test iterates over the testCases table and attempts
+   * to startup the NameNode and DataNode normally.
+   */
+  public void testStorageStates() throws Exception {
+    String[] baseDirs;
+    UpgradeUtilities.initialize();
+
+    for (int numDirs = 1; numDirs <= 2; numDirs++) {
+      conf = new Configuration();
+      conf.setInt("dfs.datanode.scan.period.hours", -1);      
+      conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
+      for (int i = 0; i < testCases.length; i++) {
+        boolean[] testCase = testCases[i];
+        boolean shouldRecover = testCase[4];
+        boolean curAfterRecover = testCase[5];
+        boolean prevAfterRecover = testCase[6];
+
+        log("NAME_NODE recovery", numDirs, i, testCase);
+        baseDirs = createStorageState(NAME_NODE, testCase);
+        if (shouldRecover) {
+          cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
+          checkResult(NAME_NODE, baseDirs, curAfterRecover, prevAfterRecover);
+          cluster.shutdown();
+        } else {
+          try {
+            cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
+            throw new AssertionError("NameNode should have failed to start");
+          } catch (IOException expected) {
+            // the exception is expected
+            // check that the message says "not formatted" 
+            // when storage directory is empty (case #5)
+            if(!testCases[i][0] && !testCases[i][2] 
+                      && !testCases[i][1] && !testCases[i][3]) {
+              assertTrue(expected.getLocalizedMessage().contains(
+                  "NameNode is not formatted"));
+            }
+          }
+        }
+        
+        log("DATA_NODE recovery", numDirs, i, testCase);
+        createStorageState(NAME_NODE, new boolean[] {true, true, false, false});
+        cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
+        baseDirs = createStorageState(DATA_NODE, testCase);
+        if (!testCase[0] && !testCase[1] && !testCase[2] && !testCase[3]) {
+          // DataNode will create and format current if no directories exist
+          cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
+        } else {
+          if (shouldRecover) {
+            cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
+            checkResult(DATA_NODE, baseDirs, curAfterRecover, prevAfterRecover);
+          } else {
+            try {
+              cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
+              throw new AssertionError("DataNode should have failed to start");
+            } catch (Exception expected) {
+              // expected
+            }
+          }
+        }
+        cluster.shutdown();
+      } // end testCases loop
+    } // end numDirs loop
+  }
+ 
+  protected void tearDown() throws Exception {
+    LOG.info("Shutting down MiniDFSCluster");
+    if (cluster != null) cluster.shutdown();
+  }
+  
+  public static void main(String[] args) throws Exception {
+    new TestDFSStorageStateRecovery().testStorageStates();
+  }
+  
+}
+
+

+ 252 - 0
src/test/org/apache/hadoop/hdfs/TestDFSUpgrade.java

@@ -0,0 +1,252 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.hdfs;
+
+import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE;
+import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+
+/**
+* This test ensures the appropriate response (successful or failure) from
+* the system when the system is upgraded under various storage state and
+* version conditions.
+*/
+public class TestDFSUpgrade extends TestCase {
+ 
+  private static final Log LOG = LogFactory.getLog(
+                                                   "org.apache.hadoop.hdfs.TestDFSUpgrade");
+  private Configuration conf;
+  private int testCounter = 0;
+  private MiniDFSCluster cluster = null;
+    
+  /**
+   * Writes an INFO log message containing the parameters.
+   */
+  void log(String label, int numDirs) {
+    LOG.info("============================================================");
+    LOG.info("***TEST " + (testCounter++) + "*** " 
+             + label + ":"
+             + " numDirs="+numDirs);
+  }
+  
+  /**
+   * Verify that the current and previous directories exist.  Verify that 
+   * previous hasn't been modified by comparing the checksum of all it's
+   * containing files with their original checksum.  It is assumed that
+   * the server has recovered and upgraded.
+   */
+  void checkResult(NodeType nodeType, String[] baseDirs) throws IOException {
+    switch (nodeType) {
+    case NAME_NODE:
+      for (int i = 0; i < baseDirs.length; i++) {
+        assertTrue(new File(baseDirs[i],"current").isDirectory());
+        assertTrue(new File(baseDirs[i],"current/VERSION").isFile());
+        assertTrue(new File(baseDirs[i],"current/edits").isFile());
+        assertTrue(new File(baseDirs[i],"current/fsimage").isFile());
+        assertTrue(new File(baseDirs[i],"current/fstime").isFile());
+      }
+      break;
+    case DATA_NODE:
+      for (int i = 0; i < baseDirs.length; i++) {
+        assertEquals(
+                     UpgradeUtilities.checksumContents(
+                                                       nodeType, new File(baseDirs[i],"current")),
+                     UpgradeUtilities.checksumMasterContents(nodeType));
+      }
+      break;
+    }
+    for (int i = 0; i < baseDirs.length; i++) {
+      assertTrue(new File(baseDirs[i],"previous").isDirectory());
+      assertEquals(
+                   UpgradeUtilities.checksumContents(
+                                                     nodeType, new File(baseDirs[i],"previous")),
+                   UpgradeUtilities.checksumMasterContents(nodeType));
+    }
+  }
+ 
+  /**
+   * Attempts to start a NameNode with the given operation.  Starting
+   * the NameNode should throw an exception.
+   */
+  void startNameNodeShouldFail(StartupOption operation) {
+    try {
+      cluster = new MiniDFSCluster(conf, 0, operation); // should fail
+      throw new AssertionError("NameNode should have failed to start");
+    } catch (Exception expected) {
+      // expected
+    }
+  }
+  
+  /**
+   * Attempts to start a DataNode with the given operation.  Starting
+   * the DataNode should throw an exception.
+   */
+  void startDataNodeShouldFail(StartupOption operation) {
+    try {
+      cluster.startDataNodes(conf, 1, false, operation, null); // should fail
+      throw new AssertionError("DataNode should have failed to start");
+    } catch (Exception expected) {
+      // expected
+      assertFalse(cluster.isDataNodeUp());
+    }
+  }
+ 
+  /**
+   * This test attempts to upgrade the NameNode and DataNode under
+   * a number of valid and invalid conditions.
+   */
+  public void testUpgrade() throws Exception {
+    File[] baseDirs;
+    UpgradeUtilities.initialize();
+    
+    for (int numDirs = 1; numDirs <= 2; numDirs++) {
+      conf = new Configuration();
+      conf.setInt("dfs.datanode.scan.period.hours", -1);      
+      conf = UpgradeUtilities.initializeStorageStateConf(numDirs, conf);
+      String[] nameNodeDirs = conf.getStrings("dfs.name.dir");
+      String[] dataNodeDirs = conf.getStrings("dfs.data.dir");
+      
+      log("Normal NameNode upgrade", numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
+      checkResult(NAME_NODE, nameNodeDirs);
+      cluster.shutdown();
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      
+      log("Normal DataNode upgrade", numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
+      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
+      cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
+      checkResult(DATA_NODE, dataNodeDirs);
+      cluster.shutdown();
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
+      
+      log("NameNode upgrade with existing previous dir", numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
+      startNameNodeShouldFail(StartupOption.UPGRADE);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      
+      log("DataNode upgrade with existing previous dir", numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
+      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
+      UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
+      cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
+      checkResult(DATA_NODE, dataNodeDirs);
+      cluster.shutdown();
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
+
+      log("DataNode upgrade with future stored layout version in current", numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
+      baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
+      UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
+                                         new StorageInfo(Integer.MIN_VALUE,
+                                                         UpgradeUtilities.getCurrentNamespaceID(cluster),
+                                                         UpgradeUtilities.getCurrentFsscTime(cluster)));
+      startDataNodeShouldFail(StartupOption.REGULAR);
+      cluster.shutdown();
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
+      
+      log("DataNode upgrade with newer fsscTime in current", numDirs);
+      UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
+      baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
+      UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
+                                         new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(),
+                                                         UpgradeUtilities.getCurrentNamespaceID(cluster),
+                                                         Long.MAX_VALUE));
+      startDataNodeShouldFail(StartupOption.REGULAR);
+      cluster.shutdown();
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      UpgradeUtilities.createEmptyDirs(dataNodeDirs);
+
+      log("NameNode upgrade with no edits file", numDirs);
+      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      for (File f : baseDirs) { 
+        FileUtil.fullyDelete(new File(f,"edits"));
+      }
+      startNameNodeShouldFail(StartupOption.UPGRADE);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      
+      log("NameNode upgrade with no image file", numDirs);
+      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      for (File f : baseDirs) { 
+        FileUtil.fullyDelete(new File(f,"fsimage")); 
+      }
+      startNameNodeShouldFail(StartupOption.UPGRADE);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      
+      log("NameNode upgrade with corrupt version file", numDirs);
+      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      for (File f : baseDirs) { 
+        UpgradeUtilities.corruptFile(new File(f,"VERSION")); 
+      }
+      startNameNodeShouldFail(StartupOption.UPGRADE);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      
+      log("NameNode upgrade with old layout version in current", numDirs);
+      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      UpgradeUtilities.createVersionFile(NAME_NODE, baseDirs,
+                                         new StorageInfo(Storage.LAST_UPGRADABLE_LAYOUT_VERSION + 1,
+                                                         UpgradeUtilities.getCurrentNamespaceID(null),
+                                                         UpgradeUtilities.getCurrentFsscTime(null)));
+      startNameNodeShouldFail(StartupOption.UPGRADE);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+      
+      log("NameNode upgrade with future layout version in current", numDirs);
+      baseDirs = UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
+      UpgradeUtilities.createVersionFile(NAME_NODE, baseDirs,
+                                         new StorageInfo(Integer.MIN_VALUE,
+                                                         UpgradeUtilities.getCurrentNamespaceID(null),
+                                                         UpgradeUtilities.getCurrentFsscTime(null)));
+      startNameNodeShouldFail(StartupOption.UPGRADE);
+      UpgradeUtilities.createEmptyDirs(nameNodeDirs);
+    } // end numDir loop
+  }
+ 
+  protected void tearDown() throws Exception {
+    LOG.info("Shutting down MiniDFSCluster");
+    if (cluster != null) cluster.shutdown();
+  }
+    
+  public static void main(String[] args) throws Exception {
+    new TestDFSUpgrade().testUpgrade();
+  }
+  
+}
+
+

+ 203 - 0
src/test/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java

@@ -0,0 +1,203 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import junit.framework.TestCase;
+import java.io.*;
+import java.net.InetSocketAddress;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.TreeMap;
+import java.util.zip.CRC32;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSInputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+/**
+ * This tests data transfer protocol handling in the Datanode. It sends
+ * various forms of wrong data and verifies that Datanode handles it well.
+ * 
+ * This test uses the following two file from src/test/.../dfs directory :
+ *   1) hadoop-version-dfs-dir.tgz : contains DFS directories.
+ *   2) hadoop-dfs-dir.txt : checksums that are compared in this test.
+ * Please read hadoop-dfs-dir.txt for more information.  
+ */
+public class TestDFSUpgradeFromImage extends TestCase {
+  
+  private static final Log LOG = LogFactory.getLog(
+                    "org.apache.hadoop.hdfs.TestDFSUpgradeFromImage");
+  
+  public int numDataNodes = 4;
+  
+  private static class ReferenceFileInfo {
+    String path;
+    long checksum;
+  }
+  
+  LinkedList<ReferenceFileInfo> refList = new LinkedList<ReferenceFileInfo>();
+  Iterator<ReferenceFileInfo> refIter;
+  
+  boolean printChecksum = false;
+  
+  protected void setUp() throws IOException {
+    unpackStorage();
+  }
+
+  public void unpackStorage() throws IOException {
+    String tarFile = System.getProperty("test.cache.data", "build/test/cache") +
+                     "/hadoop-14-dfs-dir.tgz";
+    String dataDir = System.getProperty("test.build.data", "build/test/data");
+    File dfsDir = new File(dataDir, "dfs");
+    if ( dfsDir.exists() && !FileUtil.fullyDelete(dfsDir) ) {
+      throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
+    }
+    FileUtil.unTar(new File(tarFile), new File(dataDir));
+    //Now read the reference info
+    
+    BufferedReader reader = new BufferedReader( 
+                        new FileReader(System.getProperty("test.cache.data", "build/test/cache") +
+                                       "/hadoop-dfs-dir.txt"));
+    String line;
+    while ( (line = reader.readLine()) != null ) {
+      
+      line = line.trim();
+      if (line.length() <= 0 || line.startsWith("#")) {
+        continue;
+      }
+      String[] arr = line.split("\\s+\t\\s+");
+      if (arr.length < 1) {
+        continue;
+      }
+      if (arr[0].equals("printChecksums")) {
+        printChecksum = true;
+        break;
+      }
+      if (arr.length < 2) {
+        continue;
+      }
+      ReferenceFileInfo info = new ReferenceFileInfo();
+      info.path = arr[0];
+      info.checksum = Long.parseLong(arr[1]);
+      refList.add(info);
+    }
+    reader.close();
+  }
+
+  private void verifyChecksum(String path, long checksum) throws IOException {
+    if ( refIter == null ) {
+      refIter = refList.iterator();
+    }
+    
+    if ( printChecksum ) {
+      LOG.info("CRC info for reference file : " + path + " \t " + checksum);
+    } else {
+      if ( !refIter.hasNext() ) {
+        throw new IOException("Checking checksum for " + path +
+                              "Not enough elements in the refList");
+      }
+      ReferenceFileInfo info = refIter.next();
+      // The paths are expected to be listed in the same order 
+      // as they are traversed here.
+      assertEquals(info.path, path);
+      assertEquals("Checking checksum for " + path, info.checksum, checksum);
+    }
+  }
+  
+  CRC32 overallChecksum = new CRC32();
+  
+  private void verifyDir(DFSClient client, String dir) 
+                                           throws IOException {
+    
+    FileStatus[] fileArr = client.listPaths(dir);
+    TreeMap<String, Boolean> fileMap = new TreeMap<String, Boolean>();
+    
+    for(FileStatus file : fileArr) {
+      String path = file.getPath().toString();
+      fileMap.put(path, Boolean.valueOf(file.isDir()));
+    }
+    
+    for(Iterator<String> it = fileMap.keySet().iterator(); it.hasNext();) {
+      String path = it.next();
+      boolean isDir = fileMap.get(path);
+      
+      overallChecksum.update(path.getBytes());
+      
+      if ( isDir ) {
+        verifyDir(client, path);
+      } else {
+        // this is not a directory. Checksum the file data.
+        CRC32 fileCRC = new CRC32();
+        FSInputStream in = client.open(path);
+        byte[] buf = new byte[4096];
+        int nRead = 0;
+        while ( (nRead = in.read(buf, 0, buf.length)) > 0 ) {
+          fileCRC.update(buf, 0, nRead);
+        }
+        
+        verifyChecksum(path, fileCRC.getValue());
+      }
+    }
+  }
+  
+  private void verifyFileSystem(DFSClient client) throws IOException {
+  
+    verifyDir(client, "/");
+    
+    verifyChecksum("overallCRC", overallChecksum.getValue());
+    
+    if ( printChecksum ) {
+      throw new IOException("Checksums are written to log as requested. " +
+                            "Throwing this exception to force an error " +
+                            "for this test.");
+    }
+  }
+  
+  public void testUpgradeFromImage() throws IOException {
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new Configuration();
+      if (System.getProperty("test.build.data") == null) { // to allow test to be run outside of Ant
+        System.setProperty("test.build.data", "build/test/data");
+      }
+      conf.setInt("dfs.datanode.scan.period.hours", -1); // block scanning off
+      cluster = new MiniDFSCluster(0, conf, numDataNodes, false, true,
+                                   StartupOption.UPGRADE, null);
+      cluster.waitActive();
+      DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
+                                           cluster.getNameNodePort()), conf);
+      //Safemode will be off only after upgrade is complete. Wait for it.
+      while ( dfsClient.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET) ) {
+        LOG.info("Waiting for SafeMode to be OFF.");
+        try {
+          Thread.sleep(1000);
+        } catch (InterruptedException ignored) {}
+      }
+
+      verifyFileSystem(dfsClient);
+    } finally {
+      if (cluster != null) { cluster.shutdown(); }
+    }
+  }
+}

+ 350 - 0
src/test/org/apache/hadoop/hdfs/TestDataTransferProtocol.java

@@ -0,0 +1,350 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.AccessToken;
+import org.apache.hadoop.util.DataChecksum;
+
+/**
+ * This tests data transfer protocol handling in the Datanode. It sends
+ * various forms of wrong data and verifies that Datanode handles it well.
+ */
+public class TestDataTransferProtocol extends TestCase {
+  
+  private static final Log LOG = LogFactory.getLog(
+                    "org.apache.hadoop.hdfs.TestDataTransferProtocol");
+  
+  DatanodeID datanode;
+  InetSocketAddress dnAddr;
+  ByteArrayOutputStream sendBuf = new ByteArrayOutputStream(128);
+  DataOutputStream sendOut = new DataOutputStream(sendBuf);
+  ByteArrayOutputStream recvBuf = new ByteArrayOutputStream(128);
+  DataOutputStream recvOut = new DataOutputStream(recvBuf);
+
+  private void sendRecvData(String testDescription,
+                            boolean eofExpected) throws IOException {
+    /* Opens a socket to datanode
+     * sends the data in sendBuf.
+     * If there is data in expectedBuf, expects to receive the data
+     *     from datanode that matches expectedBuf.
+     * If there is an exception while recieving, throws it
+     *     only if exceptionExcepted is false.
+     */
+    
+    Socket sock = null;
+    try {
+      
+      if ( testDescription != null ) {
+        LOG.info("Testing : " + testDescription);
+      }
+      sock = new Socket();
+      sock.connect(dnAddr, HdfsConstants.READ_TIMEOUT);
+      sock.setSoTimeout(HdfsConstants.READ_TIMEOUT);
+      
+      OutputStream out = sock.getOutputStream();
+      // Should we excuse 
+      byte[] retBuf = new byte[recvBuf.size()];
+      
+      DataInputStream in = new DataInputStream(sock.getInputStream());
+      out.write(sendBuf.toByteArray());
+      try {
+        in.readFully(retBuf);
+      } catch (EOFException eof) {
+        if ( eofExpected ) {
+          LOG.info("Got EOF as expected.");
+          return;
+        }
+        throw eof;
+      }
+      for (int i=0; i<retBuf.length; i++) {
+        System.out.print(retBuf[i]);
+      }
+      System.out.println(":");
+      
+      if (eofExpected) {
+        throw new IOException("Did not recieve IOException when an exception " +
+                              "is expected while reading from " + 
+                              datanode.getName());
+      }
+      
+      byte[] needed = recvBuf.toByteArray();
+      for (int i=0; i<retBuf.length; i++) {
+        System.out.print(retBuf[i]);
+        assertEquals("checking byte[" + i + "]", needed[i], retBuf[i]);
+      }
+    } finally {
+      IOUtils.closeSocket(sock);
+    }
+  }
+  
+  void createFile(FileSystem fs, Path path, int fileLen) throws IOException {
+    byte [] arr = new byte[fileLen];
+    FSDataOutputStream out = fs.create(path);
+    out.write(arr);
+    out.close();
+  }
+  
+  void readFile(FileSystem fs, Path path, int fileLen) throws IOException {
+    byte [] arr = new byte[fileLen];
+    FSDataInputStream in = fs.open(path);
+    in.readFully(arr);
+  }
+  
+  public void testDataTransferProtocol() throws IOException {
+    Random random = new Random();
+    int oneMil = 1024*1024;
+    Path file = new Path("dataprotocol.dat");
+    int numDataNodes = 1;
+    
+    Configuration conf = new Configuration();
+    conf.setInt("dfs.replication", numDataNodes); 
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDataNodes, true, null);
+    cluster.waitActive();
+    DFSClient dfsClient = new DFSClient(
+                 new InetSocketAddress("localhost", cluster.getNameNodePort()),
+                 conf);                
+    datanode = dfsClient.datanodeReport(DatanodeReportType.LIVE)[0];
+    dnAddr = NetUtils.createSocketAddr(datanode.getName());
+    FileSystem fileSys = cluster.getFileSystem();
+    
+    int fileLen = Math.min(conf.getInt("dfs.block.size", 4096), 4096);
+    
+    createFile(fileSys, file, fileLen);
+
+    // get the first blockid for the file
+    Block firstBlock = DFSTestUtil.getFirstBlock(fileSys, file);
+    long newBlockId = firstBlock.getBlockId() + 1;
+
+    recvBuf.reset();
+    sendBuf.reset();
+    
+    // bad version
+    recvOut.writeShort((short)(DataTransferProtocol.DATA_TRANSFER_VERSION-1));
+    sendOut.writeShort((short)(DataTransferProtocol.DATA_TRANSFER_VERSION-1));
+    sendRecvData("Wrong Version", true);
+
+    // bad ops
+    sendBuf.reset();
+    sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
+    sendOut.writeByte(DataTransferProtocol.OP_WRITE_BLOCK - 1);
+    sendRecvData("Wrong Op Code", true);
+    
+    /* Test OP_WRITE_BLOCK */
+    sendBuf.reset();
+    sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
+    sendOut.writeByte(DataTransferProtocol.OP_WRITE_BLOCK);
+    sendOut.writeLong(newBlockId); // block id
+    sendOut.writeLong(0);          // generation stamp
+    sendOut.writeInt(0);           // targets in pipeline 
+    sendOut.writeBoolean(false);   // recoveryFlag
+    Text.writeString(sendOut, "cl");// clientID
+    sendOut.writeBoolean(false); // no src node info
+    sendOut.writeInt(0);           // number of downstream targets
+    AccessToken.DUMMY_TOKEN.write(sendOut);
+    sendOut.writeByte((byte)DataChecksum.CHECKSUM_CRC32);
+    
+    // bad bytes per checksum
+    sendOut.writeInt(-1-random.nextInt(oneMil));
+    recvBuf.reset();
+    recvOut.writeShort((short)DataTransferProtocol.OP_STATUS_ERROR);
+    sendRecvData("wrong bytesPerChecksum while writing", true);
+
+    sendBuf.reset();
+    recvBuf.reset();
+    sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
+    sendOut.writeByte(DataTransferProtocol.OP_WRITE_BLOCK);
+    sendOut.writeLong(newBlockId);
+    sendOut.writeLong(0);          // generation stamp
+    sendOut.writeInt(0);           // targets in pipeline 
+    sendOut.writeBoolean(false);   // recoveryFlag
+    Text.writeString(sendOut, "cl");// clientID
+    sendOut.writeBoolean(false); // no src node info
+
+    // bad number of targets
+    sendOut.writeInt(-1-random.nextInt(oneMil));
+    recvOut.writeShort((short)DataTransferProtocol.OP_STATUS_ERROR);
+    sendRecvData("bad targets len while writing block " + newBlockId, true);
+
+    sendBuf.reset();
+    recvBuf.reset();
+    sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
+    sendOut.writeByte(DataTransferProtocol.OP_WRITE_BLOCK);
+    sendOut.writeLong(++newBlockId);
+    sendOut.writeLong(0);          // generation stamp
+    sendOut.writeInt(0);           // targets in pipeline 
+    sendOut.writeBoolean(false);   // recoveryFlag
+    Text.writeString(sendOut, "cl");// clientID
+    sendOut.writeBoolean(false); // no src node info
+    sendOut.writeInt(0);
+    AccessToken.DUMMY_TOKEN.write(sendOut);
+    sendOut.writeByte((byte)DataChecksum.CHECKSUM_CRC32);
+    sendOut.writeInt(512);
+    sendOut.writeInt(4);           // size of packet
+    sendOut.writeLong(0);          // OffsetInBlock
+    sendOut.writeLong(100);        // sequencenumber
+    sendOut.writeBoolean(false);   // lastPacketInBlock
+    
+    // bad data chunk length
+    sendOut.writeInt(-1-random.nextInt(oneMil));
+    Text.writeString(recvOut, ""); // first bad node
+    recvOut.writeLong(100);        // sequencenumber
+    recvOut.writeShort((short)DataTransferProtocol.OP_STATUS_ERROR);
+    sendRecvData("negative DATA_CHUNK len while writing block " + newBlockId, 
+                 true);
+
+    // test for writing a valid zero size block
+    sendBuf.reset();
+    recvBuf.reset();
+    sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
+    sendOut.writeByte(DataTransferProtocol.OP_WRITE_BLOCK);
+    sendOut.writeLong(++newBlockId);
+    sendOut.writeLong(0);          // generation stamp
+    sendOut.writeInt(0);           // targets in pipeline 
+    sendOut.writeBoolean(false);   // recoveryFlag
+    Text.writeString(sendOut, "cl");// clientID
+    sendOut.writeBoolean(false); // no src node info
+    sendOut.writeInt(0);
+    AccessToken.DUMMY_TOKEN.write(sendOut);
+    sendOut.writeByte((byte)DataChecksum.CHECKSUM_CRC32);
+    sendOut.writeInt(512);         // checksum size
+    sendOut.writeInt(8);           // size of packet
+    sendOut.writeLong(0);          // OffsetInBlock
+    sendOut.writeLong(100);        // sequencenumber
+    sendOut.writeBoolean(true);    // lastPacketInBlock
+
+    sendOut.writeInt(0);           // chunk length
+    sendOut.writeInt(0);           // zero checksum
+    //ok finally write a block with 0 len
+    Text.writeString(recvOut, ""); // first bad node
+    recvOut.writeLong(100);        // sequencenumber
+    recvOut.writeShort((short)DataTransferProtocol.OP_STATUS_SUCCESS);
+    sendRecvData("Writing a zero len block blockid " + newBlockId, false);
+    
+    /* Test OP_READ_BLOCK */
+
+    // bad block id
+    sendBuf.reset();
+    recvBuf.reset();
+    sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
+    sendOut.writeByte(DataTransferProtocol.OP_READ_BLOCK);
+    newBlockId = firstBlock.getBlockId()-1;
+    sendOut.writeLong(newBlockId);
+    sendOut.writeLong(firstBlock.getGenerationStamp());
+    sendOut.writeLong(0L);
+    sendOut.writeLong(fileLen);
+    recvOut.writeShort((short)DataTransferProtocol.OP_STATUS_ERROR);
+    Text.writeString(sendOut, "cl");
+    AccessToken.DUMMY_TOKEN.write(sendOut);
+    sendRecvData("Wrong block ID " + newBlockId + " for read", false); 
+
+    // negative block start offset
+    sendBuf.reset();
+    sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
+    sendOut.writeByte(DataTransferProtocol.OP_READ_BLOCK);
+    sendOut.writeLong(firstBlock.getBlockId());
+    sendOut.writeLong(firstBlock.getGenerationStamp());
+    sendOut.writeLong(-1L);
+    sendOut.writeLong(fileLen);
+    Text.writeString(sendOut, "cl");
+    AccessToken.DUMMY_TOKEN.write(sendOut);
+    sendRecvData("Negative start-offset for read for block " + 
+                 firstBlock.getBlockId(), false);
+
+    // bad block start offset
+    sendBuf.reset();
+    sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
+    sendOut.writeByte(DataTransferProtocol.OP_READ_BLOCK);
+    sendOut.writeLong(firstBlock.getBlockId());
+    sendOut.writeLong(firstBlock.getGenerationStamp());
+    sendOut.writeLong(fileLen);
+    sendOut.writeLong(fileLen);
+    Text.writeString(sendOut, "cl");
+    AccessToken.DUMMY_TOKEN.write(sendOut);
+    sendRecvData("Wrong start-offset for reading block " +
+                 firstBlock.getBlockId(), false);
+    
+    // negative length is ok. Datanode assumes we want to read the whole block.
+    recvBuf.reset();
+    recvOut.writeShort((short)DataTransferProtocol.OP_STATUS_SUCCESS);    
+    sendBuf.reset();
+    sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
+    sendOut.writeByte(DataTransferProtocol.OP_READ_BLOCK);
+    sendOut.writeLong(firstBlock.getBlockId());
+    sendOut.writeLong(firstBlock.getGenerationStamp());
+    sendOut.writeLong(0);
+    sendOut.writeLong(-1-random.nextInt(oneMil));
+    Text.writeString(sendOut, "cl");
+    AccessToken.DUMMY_TOKEN.write(sendOut);
+    sendRecvData("Negative length for reading block " +
+                 firstBlock.getBlockId(), false);
+    
+    // length is more than size of block.
+    recvBuf.reset();
+    recvOut.writeShort((short)DataTransferProtocol.OP_STATUS_ERROR);    
+    sendBuf.reset();
+    sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
+    sendOut.writeByte(DataTransferProtocol.OP_READ_BLOCK);
+    sendOut.writeLong(firstBlock.getBlockId());
+    sendOut.writeLong(firstBlock.getGenerationStamp());
+    sendOut.writeLong(0);
+    sendOut.writeLong(fileLen + 1);
+    Text.writeString(sendOut, "cl");
+    AccessToken.DUMMY_TOKEN.write(sendOut);
+    sendRecvData("Wrong length for reading block " +
+                 firstBlock.getBlockId(), false);
+    
+    //At the end of all this, read the file to make sure that succeeds finally.
+    sendBuf.reset();
+    sendOut.writeShort((short)DataTransferProtocol.DATA_TRANSFER_VERSION);
+    sendOut.writeByte(DataTransferProtocol.OP_READ_BLOCK);
+    sendOut.writeLong(firstBlock.getBlockId());
+    sendOut.writeLong(firstBlock.getGenerationStamp());
+    sendOut.writeLong(0);
+    sendOut.writeLong(fileLen);
+    Text.writeString(sendOut, "cl");
+    AccessToken.DUMMY_TOKEN.write(sendOut);
+    readFile(fileSys, file, fileLen);
+  }
+}

+ 438 - 0
src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java

@@ -0,0 +1,438 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.URL;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+import java.io.*;
+import java.nio.channels.FileChannel;
+import java.util.Random;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
+
+import junit.framework.TestCase;
+
+/**
+ * This test verifies that block verification occurs on the datanode
+ */
+public class TestDatanodeBlockScanner extends TestCase {
+  
+  private static final Log LOG = 
+                 LogFactory.getLog(TestDatanodeBlockScanner.class);
+  
+  private static Pattern pattern = 
+             Pattern.compile(".*?(blk_[-]*\\d+).*?scan time\\s*:\\s*(\\d+)");
+  /**
+   * This connects to datanode and fetches block verification data.
+   * It repeats this until the given block has a verification time > 0.
+   */
+  private static long waitForVerification(DatanodeInfo dn, FileSystem fs, 
+                                          Path file) throws IOException {
+    URL url = new URL("http://localhost:" + dn.getInfoPort() +
+                      "/blockScannerReport?listblocks");
+    long lastWarnTime = System.currentTimeMillis();
+    long verificationTime = 0;
+    
+    String block = DFSTestUtil.getFirstBlock(fs, file).getBlockName();
+    
+    while (verificationTime <= 0) {
+      String response = DFSTestUtil.urlGet(url);
+      for(Matcher matcher = pattern.matcher(response); matcher.find();) {
+        if (block.equals(matcher.group(1))) {
+          verificationTime = Long.parseLong(matcher.group(2));
+          break;
+        }
+      }
+      
+      if (verificationTime <= 0) {
+        long now = System.currentTimeMillis();
+        if ((now - lastWarnTime) >= 5*1000) {
+          LOG.info("Waiting for verification of " + block);
+          lastWarnTime = now; 
+        }
+        try {
+          Thread.sleep(500);
+        } catch (InterruptedException ignored) {}
+      }
+    }
+    
+    return verificationTime;
+  }
+
+  public void testDatanodeBlockScanner() throws IOException {
+    
+    long startTime = System.currentTimeMillis();
+    
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    cluster.waitActive();
+    
+    FileSystem fs = cluster.getFileSystem();
+    Path file1 = new Path("/tmp/testBlockVerification/file1");
+    Path file2 = new Path("/tmp/testBlockVerification/file2");
+    
+    /*
+     * Write the first file and restart the cluster.
+     */
+    DFSTestUtil.createFile(fs, file1, 10, (short)1, 0);
+    cluster.shutdown();
+    cluster = new MiniDFSCluster(conf, 1, false, null);
+    cluster.waitActive();
+    
+    DFSClient dfsClient =  new DFSClient(new InetSocketAddress("localhost", 
+                                         cluster.getNameNodePort()), conf);
+    fs = cluster.getFileSystem();
+    DatanodeInfo dn = dfsClient.datanodeReport(DatanodeReportType.LIVE)[0];
+    
+    /*
+     * The cluster restarted. The block should be verified by now.
+     */
+    assertTrue(waitForVerification(dn, fs, file1) > startTime);
+    
+    /*
+     * Create a new file and read the block. The block should be marked 
+     * verified since the client reads the block and verifies checksum. 
+     */
+    DFSTestUtil.createFile(fs, file2, 10, (short)1, 0);
+    IOUtils.copyBytes(fs.open(file2), new IOUtils.NullOutputStream(), 
+                      conf, true); 
+    assertTrue(waitForVerification(dn, fs, file2) > startTime);
+    
+    cluster.shutdown();
+  }
+
+  public static boolean corruptReplica(String blockName, int replica) throws IOException {
+    Random random = new Random();
+    File baseDir = new File(System.getProperty("test.build.data"), "dfs/data");
+    boolean corrupted = false;
+    for (int i=replica*2; i<replica*2+2; i++) {
+      File blockFile = new File(baseDir, "data" + (i+1)+ "/current/" + 
+                               blockName);
+      if (blockFile.exists()) {
+        // Corrupt replica by writing random bytes into replica
+        RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
+        FileChannel channel = raFile.getChannel();
+        String badString = "BADBAD";
+        int rand = random.nextInt((int)channel.size()/2);
+        raFile.seek(rand);
+        raFile.write(badString.getBytes());
+        raFile.close();
+        corrupted = true;
+      }
+    }
+    return corrupted;
+  }
+
+  public void testBlockCorruptionPolicy() throws IOException {
+    Configuration conf = new Configuration();
+    conf.setLong("dfs.blockreport.intervalMsec", 1000L);
+    Random random = new Random();
+    FileSystem fs = null;
+    DFSClient dfsClient = null;
+    LocatedBlocks blocks = null;
+    int blockCount = 0;
+    int rand = random.nextInt(3);
+
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
+    cluster.waitActive();
+    fs = cluster.getFileSystem();
+    Path file1 = new Path("/tmp/testBlockVerification/file1");
+    DFSTestUtil.createFile(fs, file1, 1024, (short)3, 0);
+    String block = DFSTestUtil.getFirstBlock(fs, file1).getBlockName();
+    
+    dfsClient = new DFSClient(new InetSocketAddress("localhost", 
+                                        cluster.getNameNodePort()), conf);
+    do {
+      blocks = dfsClient.namenode.
+                   getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
+      blockCount = blocks.get(0).getLocations().length;
+      try {
+        LOG.info("Looping until expected blockCount of 3 is received");
+        Thread.sleep(1000);
+      } catch (InterruptedException ignore) {
+      }
+    } while (blockCount != 3);
+    assertTrue(blocks.get(0).isCorrupt() == false);
+
+    // Corrupt random replica of block 
+    corruptReplica(block, rand);
+
+    // Restart the datanode hoping the corrupt block to be reported
+    cluster.restartDataNode(rand);
+
+    // We have 2 good replicas and block is not corrupt
+    do {
+      blocks = dfsClient.namenode.
+                   getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
+      blockCount = blocks.get(0).getLocations().length;
+      try {
+        LOG.info("Looping until expected blockCount of 2 is received");
+        Thread.sleep(1000);
+      } catch (InterruptedException ignore) {
+      }
+    } while (blockCount != 2);
+    assertTrue(blocks.get(0).isCorrupt() == false);
+  
+    // Corrupt all replicas. Now, block should be marked as corrupt
+    // and we should get all the replicas 
+    corruptReplica(block, 0);
+    corruptReplica(block, 1);
+    corruptReplica(block, 2);
+
+    // Read the file to trigger reportBadBlocks by client
+    try {
+      IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), 
+                        conf, true);
+    } catch (IOException e) {
+      // Ignore exception
+    }
+
+    // We now have the blocks to be marked as corrupt and we get back all
+    // its replicas
+    do {
+      blocks = dfsClient.namenode.
+                   getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
+      blockCount = blocks.get(0).getLocations().length;
+      try {
+        LOG.info("Looping until expected blockCount of 3 is received");
+        Thread.sleep(1000);
+      } catch (InterruptedException ignore) {
+      }
+    } while (blockCount != 3);
+    assertTrue(blocks.get(0).isCorrupt() == true);
+
+    cluster.shutdown();
+  }
+  
+  /**
+   * testBlockCorruptionRecoveryPolicy.
+   * This tests recovery of corrupt replicas, first for one corrupt replica
+   * then for two. The test invokes blockCorruptionRecoveryPolicy which
+   * 1. Creates a block with desired number of replicas
+   * 2. Corrupts the desired number of replicas and restarts the datanodes
+   *    containing the corrupt replica. Additionaly we also read the block
+   *    in case restarting does not report corrupt replicas.
+   *    Restarting or reading from the datanode would trigger reportBadBlocks 
+   *    to namenode.
+   *    NameNode adds it to corruptReplicasMap and neededReplication
+   * 3. Test waits until all corrupt replicas are reported, meanwhile
+   *    Re-replciation brings the block back to healthy state
+   * 4. Test again waits until the block is reported with expected number
+   *    of good replicas.
+   */
+  public void testBlockCorruptionRecoveryPolicy() throws IOException {
+    // Test recovery of 1 corrupt replica
+    LOG.info("Testing corrupt replica recovery for one corrupt replica");
+    blockCorruptionRecoveryPolicy(4, (short)3, 1);
+
+    // Test recovery of 2 corrupt replicas
+    LOG.info("Testing corrupt replica recovery for two corrupt replicas");
+    blockCorruptionRecoveryPolicy(5, (short)3, 2);
+  }
+  
+  private void blockCorruptionRecoveryPolicy(int numDataNodes, 
+                                             short numReplicas,
+                                             int numCorruptReplicas) 
+                                             throws IOException {
+    Configuration conf = new Configuration();
+    conf.setLong("dfs.blockreport.intervalMsec", 30L);
+    conf.setLong("dfs.replication.interval", 30);
+    conf.setLong("dfs.heartbeat.interval", 30L);
+    conf.setBoolean("dfs.replication.considerLoad", false);
+    FileSystem fs = null;
+    DFSClient dfsClient = null;
+    LocatedBlocks blocks = null;
+    int replicaCount = 0;
+
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDataNodes, true, null);
+    cluster.waitActive();
+    fs = cluster.getFileSystem();
+    Path file1 = new Path("/tmp/testBlockCorruptRecovery/file");
+    DFSTestUtil.createFile(fs, file1, 1024, numReplicas, 0);
+    Block blk = DFSTestUtil.getFirstBlock(fs, file1);
+    String block = blk.getBlockName();
+    
+    dfsClient = new DFSClient(new InetSocketAddress("localhost", 
+                                        cluster.getNameNodePort()), conf);
+    blocks = dfsClient.namenode.
+               getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
+    replicaCount = blocks.get(0).getLocations().length;
+
+    // Wait until block is replicated to numReplicas
+    while (replicaCount != numReplicas) {
+      try {
+        LOG.info("Looping until expected replicaCount of " + numReplicas +
+                  "is reached");
+        Thread.sleep(1000);
+      } catch (InterruptedException ignore) {
+      }
+      blocks = dfsClient.namenode.
+                   getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
+      replicaCount = blocks.get(0).getLocations().length;
+    }
+    assertTrue(blocks.get(0).isCorrupt() == false);
+
+    // Corrupt numCorruptReplicas replicas of block 
+    int[] corruptReplicasDNIDs = new int[numCorruptReplicas];
+    for (int i=0, j=0; (j != numCorruptReplicas) && (i < numDataNodes); i++) {
+      if (corruptReplica(block, i)) 
+        corruptReplicasDNIDs[j++] = i;
+    }
+    
+    // Restart the datanodes containing corrupt replicas 
+    // so they would be reported to namenode and re-replicated
+    for (int i =0; i < numCorruptReplicas; i++) 
+     cluster.restartDataNode(corruptReplicasDNIDs[i]);
+
+    // Loop until all corrupt replicas are reported
+    int corruptReplicaSize = cluster.getNamesystem().
+                              numCorruptReplicas(blk);
+    while (corruptReplicaSize != numCorruptReplicas) {
+      try {
+        IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), 
+                          conf, true);
+      } catch (IOException e) {
+      }
+      try {
+        LOG.info("Looping until expected " + numCorruptReplicas + " are " +
+                 "reported. Current reported " + corruptReplicaSize);
+        Thread.sleep(1000);
+      } catch (InterruptedException ignore) {
+      }
+      corruptReplicaSize = cluster.getNamesystem().
+                              numCorruptReplicas(blk);
+    }
+    
+    // Loop until the block recovers after replication
+    blocks = dfsClient.namenode.
+               getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
+    replicaCount = blocks.get(0).getLocations().length;
+    while (replicaCount != numReplicas) {
+      try {
+        LOG.info("Looping until block gets rereplicated to " + numReplicas);
+        Thread.sleep(1000);
+      } catch (InterruptedException ignore) {
+      }
+      blocks = dfsClient.namenode.
+                 getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
+      replicaCount = blocks.get(0).getLocations().length;
+    }
+
+    // Make sure the corrupt replica is invalidated and removed from
+    // corruptReplicasMap
+    corruptReplicaSize = cluster.getNamesystem().
+                          numCorruptReplicas(blk);
+    while (corruptReplicaSize != 0 || replicaCount != numReplicas) {
+      try {
+        LOG.info("Looping until corrupt replica is invalidated");
+        Thread.sleep(1000);
+      } catch (InterruptedException ignore) {
+      }
+      corruptReplicaSize = cluster.getNamesystem().
+                            numCorruptReplicas(blk);
+      blocks = dfsClient.namenode.
+                 getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
+      replicaCount = blocks.get(0).getLocations().length;
+    }
+    // Make sure block is healthy 
+    assertTrue(corruptReplicaSize == 0);
+    assertTrue(replicaCount == numReplicas);
+    assertTrue(blocks.get(0).isCorrupt() == false);
+    cluster.shutdown();
+  }
+  
+  /** Test if NameNode handles truncated blocks in block report */
+  public void testTruncatedBlockReport() throws Exception {
+    final Configuration conf = new Configuration();
+    final short REPLICATION_FACTOR = (short)2;
+
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, REPLICATION_FACTOR, true, null);
+    cluster.waitActive();
+    FileSystem fs = cluster.getFileSystem();
+    try {
+      final Path fileName = new Path("/file1");
+      DFSTestUtil.createFile(fs, fileName, 1, REPLICATION_FACTOR, 0);
+      DFSTestUtil.waitReplication(fs, fileName, REPLICATION_FACTOR);
+
+      String block = DFSTestUtil.getFirstBlock(fs, fileName).getBlockName();
+
+      // Truncate replica of block
+      changeReplicaLength(block, 0, -1);
+
+      cluster.shutdown();
+
+      // restart the cluster
+      cluster = new MiniDFSCluster(
+          0, conf, REPLICATION_FACTOR, false, true, null, null, null);
+      cluster.startDataNodes(conf, 1, true, null, null);
+      cluster.waitActive();  // now we have 3 datanodes
+
+      // wait for truncated block be detected and the block to be replicated
+      DFSTestUtil.waitReplication(
+          cluster.getFileSystem(), fileName, REPLICATION_FACTOR);
+      
+      // Make sure that truncated block will be deleted
+      waitForBlockDeleted(block, 0);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+  
+  /**
+   * Change the length of a block at datanode dnIndex
+   */
+  static boolean changeReplicaLength(String blockName, int dnIndex, int lenDelta) throws IOException {
+    File baseDir = new File(System.getProperty("test.build.data"), "dfs/data");
+    for (int i=dnIndex*2; i<dnIndex*2+2; i++) {
+      File blockFile = new File(baseDir, "data" + (i+1)+ "/current/" + 
+                               blockName);
+      if (blockFile.exists()) {
+        RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
+        raFile.setLength(raFile.length()+lenDelta);
+        raFile.close();
+        return true;
+      }
+    }
+    return false;
+  }
+  
+  private static void waitForBlockDeleted(String blockName, int dnIndex) 
+  throws IOException, InterruptedException {
+    File baseDir = new File(System.getProperty("test.build.data"), "dfs/data");
+    File blockFile1 = new File(baseDir, "data" + (2*dnIndex+1)+ "/current/" + 
+        blockName);
+    File blockFile2 = new File(baseDir, "data" + (2*dnIndex+2)+ "/current/" + 
+        blockName);
+    while (blockFile1.exists() || blockFile2.exists()) {
+      Thread.sleep(100);
+    }
+  }
+}

+ 417 - 0
src/test/org/apache/hadoop/hdfs/TestDatanodeDeath.java

@@ -0,0 +1,417 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.log4j.Level;
+
+/**
+ * This class tests that a file need not be closed before its
+ * data can be read by another client.
+ */
+public class TestDatanodeDeath extends TestCase {
+  {
+    ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL);
+  }
+
+  static final int blockSize = 8192;
+  static final int numBlocks = 2;
+  static final int fileSize = numBlocks * blockSize + 1;
+  static final int numDatanodes = 15;
+  static final short replication = 3;
+
+  int numberOfFiles = 3;
+  int numThreads = 5;
+  Workload[] workload = null;
+
+  //
+  // an object that does a bunch of transactions
+  //
+  static class Workload extends Thread {
+    private short replication;
+    private int numberOfFiles;
+    private int id;
+    private FileSystem fs;
+    private long stamp;
+    private final long myseed;
+
+    Workload(long myseed, FileSystem fs, int threadIndex, int numberOfFiles, 
+             short replication, long stamp) {
+      this.myseed = myseed;
+      id = threadIndex;
+      this.fs = fs;
+      this.numberOfFiles = numberOfFiles;
+      this.replication = replication;
+      this.stamp = stamp;
+    }
+
+    // create a bunch of files. Write to them and then verify.
+    public void run() {
+      System.out.println("Workload starting ");
+      for (int i = 0; i < numberOfFiles; i++) {
+        Path filename = new Path(id + "." + i);
+        try {
+          System.out.println("Workload processing file " + filename);
+          FSDataOutputStream stm = createFile(fs, filename, replication);
+          DFSClient.DFSOutputStream dfstream = (DFSClient.DFSOutputStream)
+                                                 (stm.getWrappedStream());
+          dfstream.setArtificialSlowdown(1000);
+          writeFile(stm, myseed);
+          stm.close();
+          checkFile(fs, filename, replication, numBlocks, fileSize, myseed);
+        } catch (Throwable e) {
+          System.out.println("Workload exception " + e);
+          assertTrue(e.toString(), false);
+        }
+
+        // increment the stamp to indicate that another file is done.
+        synchronized (this) {
+          stamp++;
+        }
+      }
+    }
+
+    public synchronized void resetStamp() {
+      this.stamp = 0;
+    }
+
+    public synchronized long getStamp() {
+      return stamp;
+    }
+  }
+
+  //
+  // creates a file and returns a descriptor for writing to it.
+  //
+  static private FSDataOutputStream createFile(FileSystem fileSys, Path name, short repl)
+    throws IOException {
+    // create and write a file that contains three blocks of data
+    FSDataOutputStream stm = fileSys.create(name, true,
+                                            fileSys.getConf().getInt("io.file.buffer.size", 4096),
+                                            repl, (long)blockSize);
+    return stm;
+  }
+
+  //
+  // writes to file
+  //
+  static private void writeFile(FSDataOutputStream stm, long seed) throws IOException {
+    byte[] buffer = AppendTestUtil.randomBytes(seed, fileSize);
+
+    int mid = fileSize/2;
+    stm.write(buffer, 0, mid);
+    stm.write(buffer, mid, fileSize - mid);
+  }
+
+  //
+  // verify that the data written are sane
+  // 
+  static private void checkFile(FileSystem fileSys, Path name, int repl,
+                         int numblocks, int filesize, long seed)
+    throws IOException {
+    boolean done = false;
+    int attempt = 0;
+
+    long len = fileSys.getFileStatus(name).getLen();
+    assertTrue(name + " should be of size " + filesize +
+               " but found to be of size " + len, 
+               len == filesize);
+
+    // wait till all full blocks are confirmed by the datanodes.
+    while (!done) {
+      attempt++;
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException e) {}
+      done = true;
+      BlockLocation[] locations = fileSys.getFileBlockLocations(
+          fileSys.getFileStatus(name), 0, filesize);
+
+      if (locations.length < numblocks) {
+        if (attempt > 100) {
+          System.out.println("File " + name + " has only " +
+                             locations.length + " blocks, " +
+                             " but is expected to have " + numblocks +
+                             " blocks.");
+        }
+        done = false;
+        continue;
+      }
+      for (int idx = 0; idx < locations.length; idx++) {
+        if (locations[idx].getHosts().length < repl) {
+          if (attempt > 100) {
+            System.out.println("File " + name + " has " +
+                               locations.length + " blocks: " +
+                               " The " + idx + " block has only " +
+                               locations[idx].getHosts().length + 
+                               " replicas but is expected to have " 
+                               + repl + " replicas.");
+          }
+          done = false;
+          break;
+        }
+      }
+    }
+    FSDataInputStream stm = fileSys.open(name);
+    final byte[] expected = AppendTestUtil.randomBytes(seed, fileSize);
+
+    // do a sanity check. Read the file
+    byte[] actual = new byte[filesize];
+    stm.readFully(0, actual);
+    checkData(actual, 0, expected, "Read 1");
+  }
+
+  private static void checkData(byte[] actual, int from, byte[] expected, String message) {
+    for (int idx = 0; idx < actual.length; idx++) {
+      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
+                        expected[from+idx]+" actual "+actual[idx],
+                        actual[idx], expected[from+idx]);
+      actual[idx] = 0;
+    }
+  }
+
+  /**
+   * A class that kills one datanode and recreates a new one. It waits to
+   * ensure that that all workers have finished at least one file since the 
+   * last kill of a datanode. This guarantees that all three replicas of
+   * a block do not get killed (otherwise the file will be corrupt and the
+   * test will fail).
+   */
+  class Modify extends Thread {
+    volatile boolean running;
+    MiniDFSCluster cluster;
+    Configuration conf;
+
+    Modify(Configuration conf, MiniDFSCluster cluster) {
+      running = true;
+      this.cluster = cluster;
+      this.conf = conf;
+    }
+
+    public void run() {
+
+      while (running) {
+        try {
+          Thread.sleep(1000);
+        } catch (InterruptedException e) {
+          continue;
+        }
+
+        // check if all threads have a new stamp. 
+        // If so, then all workers have finished at least one file
+        // since the last stamp.
+        boolean loop = false;
+        for (int i = 0; i < numThreads; i++) {
+          if (workload[i].getStamp() == 0) {
+            loop = true;
+            break;
+          }
+        }
+        if (loop) {
+          continue;
+        }
+
+        // Now it is guaranteed that there will be at least one valid
+        // replica of a file.
+
+        for (int i = 0; i < replication - 1; i++) {
+          // pick a random datanode to shutdown
+          int victim = AppendTestUtil.nextInt(numDatanodes);
+          try {
+            System.out.println("Stopping datanode " + victim);
+            cluster.restartDataNode(victim);
+            // cluster.startDataNodes(conf, 1, true, null, null);
+          } catch (IOException e) {
+            System.out.println("TestDatanodeDeath Modify exception " + e);
+            assertTrue("TestDatanodeDeath Modify exception " + e, false);
+            running = false;
+          }
+        }
+
+        // set a new stamp for all workers
+        for (int i = 0; i < numThreads; i++) {
+          workload[i].resetStamp();
+        }
+      }
+    }
+
+    // Make the thread exit.
+    void close() {
+      running = false;
+      this.interrupt();
+    }
+  }
+
+  /**
+   * Test that writing to files is good even when datanodes in the pipeline
+   * dies.
+   */
+  private void complexTest() throws IOException {
+    Configuration conf = new Configuration();
+    conf.setInt("heartbeat.recheck.interval", 2000);
+    conf.setInt("dfs.heartbeat.interval", 2);
+    conf.setInt("dfs.replication.pending.timeout.sec", 2);
+    conf.setInt("dfs.socket.timeout", 5000);
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
+    cluster.waitActive();
+    FileSystem fs = cluster.getFileSystem();
+    Modify modThread = null;
+
+    try {
+      
+      // Create threads and make them run workload concurrently.
+      workload = new Workload[numThreads];
+      for (int i = 0; i < numThreads; i++) {
+        workload[i] = new Workload(AppendTestUtil.nextLong(), fs, i, numberOfFiles, replication, 0);
+        workload[i].start();
+      }
+
+      // Create a thread that kills existing datanodes and creates new ones.
+      modThread = new Modify(conf, cluster);
+      modThread.start();
+
+      // wait for all transactions to get over
+      for (int i = 0; i < numThreads; i++) {
+        try {
+          System.out.println("Waiting for thread " + i + " to complete...");
+          workload[i].join();
+
+          // if most of the threads are done, then stop restarting datanodes.
+          if (i >= numThreads/2) {
+            modThread.close();
+          }
+         
+        } catch (InterruptedException e) {
+          i--;      // retry
+        }
+      }
+    } finally {
+      if (modThread != null) {
+        modThread.close();
+        try {
+          modThread.join();
+        } catch (InterruptedException e) {}
+      }
+      fs.close();
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * Write to one file, then kill one datanode in the pipeline and then
+   * close the file.
+   */
+  private void simpleTest(int datanodeToKill) throws IOException {
+    Configuration conf = new Configuration();
+    conf.setInt("heartbeat.recheck.interval", 2000);
+    conf.setInt("dfs.heartbeat.interval", 1);
+    conf.setInt("dfs.replication.pending.timeout.sec", 2);
+    conf.setInt("dfs.socket.timeout", 5000);
+    int myMaxNodes = 5;
+    System.out.println("SimpleTest starting with DataNode to Kill " + 
+                       datanodeToKill);
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, myMaxNodes, true, null);
+    cluster.waitActive();
+    FileSystem fs = cluster.getFileSystem();
+    short repl = 3;
+
+    Path filename = new Path("simpletest.dat");
+    try {
+
+      // create a file and write one block of data
+      System.out.println("SimpleTest creating file " + filename);
+      FSDataOutputStream stm = createFile(fs, filename, repl);
+      DFSClient.DFSOutputStream dfstream = (DFSClient.DFSOutputStream)
+                                             (stm.getWrappedStream());
+
+      // these are test settings
+      dfstream.setChunksPerPacket(5);
+      dfstream.setArtificialSlowdown(3000);
+
+      final long myseed = AppendTestUtil.nextLong();
+      byte[] buffer = AppendTestUtil.randomBytes(myseed, fileSize);
+      int mid = fileSize/4;
+      stm.write(buffer, 0, mid);
+
+      DatanodeInfo[] targets = dfstream.getPipeline();
+      int count = 5;
+      while (count-- > 0 && targets == null) {
+        try {
+          System.out.println("SimpleTest: Waiting for pipeline to be created.");
+          Thread.sleep(1000);
+        } catch (InterruptedException e) {
+        }
+        targets = dfstream.getPipeline();
+      }
+
+      if (targets == null) {
+        int victim = AppendTestUtil.nextInt(myMaxNodes);
+        System.out.println("SimpleTest stopping datanode random " + victim);
+        cluster.stopDataNode(victim);
+      } else {
+        int victim = datanodeToKill;
+        System.out.println("SimpleTest stopping datanode " +
+                            targets[victim].getName());
+        cluster.stopDataNode(targets[victim].getName());
+      }
+      System.out.println("SimpleTest stopping datanode complete");
+
+      // write some more data to file, close and verify
+      stm.write(buffer, mid, fileSize - mid);
+      stm.close();
+
+      checkFile(fs, filename, repl, numBlocks, fileSize, myseed);
+    } catch (Throwable e) {
+      System.out.println("Simple Workload exception " + e);
+      e.printStackTrace();
+      assertTrue(e.toString(), false);
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+  }
+
+  public void testSimple0() throws IOException {simpleTest(0);}
+
+  public void testSimple1() throws IOException {simpleTest(1);}
+
+  public void testSimple2() throws IOException {simpleTest(2);}
+
+  public void testComplex() throws IOException {complexTest();}
+}

+ 87 - 0
src/test/org/apache/hadoop/hdfs/TestDatanodeReport.java

@@ -0,0 +1,87 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.hdfs;
+
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+
+import junit.framework.TestCase;
+import org.apache.hadoop.conf.Configuration;
+
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+
+/**
+ * This test ensures the all types of data node report work correctly.
+ */
+public class TestDatanodeReport extends TestCase {
+  final static private Configuration conf = new Configuration();
+  final static private int NUM_OF_DATANODES = 4;
+    
+  /**
+   * This test attempts to different types of datanode report.
+   */
+  public void testDatanodeReport() throws Exception {
+    conf.setInt(
+        "heartbeat.recheck.interval", 500); // 0.5s
+    MiniDFSCluster cluster = 
+      new MiniDFSCluster(conf, NUM_OF_DATANODES, true, null);
+    try {
+      //wait until the cluster is up
+      cluster.waitActive();
+
+      InetSocketAddress addr = new InetSocketAddress("localhost",
+          cluster.getNameNodePort());
+      DFSClient client = new DFSClient(addr, conf);
+
+      assertEquals(client.datanodeReport(DatanodeReportType.ALL).length,
+                   NUM_OF_DATANODES);
+      assertEquals(client.datanodeReport(DatanodeReportType.LIVE).length,
+                   NUM_OF_DATANODES);
+      assertEquals(client.datanodeReport(DatanodeReportType.DEAD).length, 0);
+
+      // bring down one datanode
+      ArrayList<DataNode> datanodes = cluster.getDataNodes();
+      datanodes.remove(datanodes.size()-1).shutdown();
+
+      DatanodeInfo[] nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
+      while (nodeInfo.length != 1) {
+        try {
+          Thread.sleep(500);
+        } catch (Exception e) {
+        }
+        nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
+      }
+
+      assertEquals(client.datanodeReport(DatanodeReportType.LIVE).length,
+                   NUM_OF_DATANODES-1);
+      assertEquals(client.datanodeReport(DatanodeReportType.ALL).length,
+                   NUM_OF_DATANODES);
+    }finally {
+      cluster.shutdown();
+    }
+  }
+ 
+  public static void main(String[] args) throws Exception {
+    new TestDatanodeReport().testDatanodeReport();
+  }
+  
+}
+
+

+ 297 - 0
src/test/org/apache/hadoop/hdfs/TestDecommission.java

@@ -0,0 +1,297 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Iterator;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+
+/**
+ * This class tests the decommissioning of nodes.
+ */
+public class TestDecommission extends TestCase {
+  static final long seed = 0xDEADBEEFL;
+  static final int blockSize = 8192;
+  static final int fileSize = 16384;
+  static final int numDatanodes = 6;
+
+
+  Random myrand = new Random();
+  Path hostsFile;
+  Path excludeFile;
+
+  ArrayList<String> decommissionedNodes = new ArrayList<String>(numDatanodes);
+
+  private enum NodeState {NORMAL, DECOMMISSION_INPROGRESS, DECOMMISSIONED; }
+
+  private void writeConfigFile(FileSystem fs, Path name, ArrayList<String> nodes) 
+    throws IOException {
+
+    // delete if it already exists
+    if (fs.exists(name)) {
+      fs.delete(name, true);
+    }
+
+    FSDataOutputStream stm = fs.create(name);
+    
+    if (nodes != null) {
+      for (Iterator<String> it = nodes.iterator(); it.hasNext();) {
+        String node = it.next();
+        stm.writeBytes(node);
+        stm.writeBytes("\n");
+      }
+    }
+    stm.close();
+  }
+
+  private void writeFile(FileSystem fileSys, Path name, int repl)
+    throws IOException {
+    // create and write a file that contains three blocks of data
+    FSDataOutputStream stm = fileSys.create(name, true, 
+                                            fileSys.getConf().getInt("io.file.buffer.size", 4096),
+                                            (short)repl, (long)blockSize);
+    byte[] buffer = new byte[fileSize];
+    Random rand = new Random(seed);
+    rand.nextBytes(buffer);
+    stm.write(buffer);
+    stm.close();
+  }
+  
+  
+  private void checkFile(FileSystem fileSys, Path name, int repl)
+    throws IOException {
+    DFSTestUtil.waitReplication(fileSys, name, (short) repl);
+  }
+
+  private void printFileLocations(FileSystem fileSys, Path name)
+  throws IOException {
+    BlockLocation[] locations = fileSys.getFileBlockLocations(
+        fileSys.getFileStatus(name), 0, fileSize);
+    for (int idx = 0; idx < locations.length; idx++) {
+      String[] loc = locations[idx].getHosts();
+      System.out.print("Block[" + idx + "] : ");
+      for (int j = 0; j < loc.length; j++) {
+        System.out.print(loc[j] + " ");
+      }
+      System.out.println("");
+    }
+  }
+
+  /**
+   * For blocks that reside on the nodes that are down, verify that their
+   * replication factor is 1 more than the specified one.
+   */
+  private void checkFile(FileSystem fileSys, Path name, int repl,
+                         String downnode) throws IOException {
+    //
+    // sleep an additional 10 seconds for the blockreports from the datanodes
+    // to arrive. 
+    //
+    // need a raw stream
+    assertTrue("Not HDFS:"+fileSys.getUri(), fileSys instanceof DistributedFileSystem);
+        
+    DFSClient.DFSDataInputStream dis = (DFSClient.DFSDataInputStream) 
+      ((DistributedFileSystem)fileSys).open(name);
+    Collection<LocatedBlock> dinfo = dis.getAllBlocks();
+
+    for (LocatedBlock blk : dinfo) { // for each block
+      int hasdown = 0;
+      DatanodeInfo[] nodes = blk.getLocations();
+      for (int j = 0; j < nodes.length; j++) {     // for each replica
+        if (nodes[j].getName().equals(downnode)) {
+          hasdown++;
+          System.out.println("Block " + blk.getBlock() + " replica " +
+                             nodes[j].getName() + " is decommissioned.");
+        }
+      }
+      System.out.println("Block " + blk.getBlock() + " has " + hasdown +
+                         " decommissioned replica.");
+      assertEquals("Number of replicas for block" + blk.getBlock(),
+                   Math.min(numDatanodes, repl+hasdown), nodes.length);  
+    }
+  }
+  
+  private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
+    assertTrue(fileSys.exists(name));
+    fileSys.delete(name, true);
+    assertTrue(!fileSys.exists(name));
+  }
+
+  private void printDatanodeReport(DatanodeInfo[] info) {
+    System.out.println("-------------------------------------------------");
+    for (int i = 0; i < info.length; i++) {
+      System.out.println(info[i].getDatanodeReport());
+      System.out.println();
+    }
+  }
+
+  /*
+   * decommission one random node.
+   */
+  private String decommissionNode(NameNode namenode,
+                                  Configuration conf,
+                                  DFSClient client, 
+                                  FileSystem localFileSys)
+    throws IOException {
+    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
+
+    //
+    // pick one datanode randomly.
+    //
+    int index = 0;
+    boolean found = false;
+    while (!found) {
+      index = myrand.nextInt(info.length);
+      if (!info[index].isDecommissioned()) {
+        found = true;
+      }
+    }
+    String nodename = info[index].getName();
+    System.out.println("Decommissioning node: " + nodename);
+
+    // write nodename into the exclude file.
+    ArrayList<String> nodes = new ArrayList<String>(decommissionedNodes);
+    nodes.add(nodename);
+    writeConfigFile(localFileSys, excludeFile, nodes);
+    namenode.getNamesystem().refreshNodes(conf);
+    return nodename;
+  }
+
+  /*
+   * Check if node is in the requested state.
+   */
+  private boolean checkNodeState(FileSystem filesys, 
+                                 String node, 
+                                 NodeState state) throws IOException {
+    DistributedFileSystem dfs = (DistributedFileSystem) filesys;
+    boolean done = false;
+    boolean foundNode = false;
+    DatanodeInfo[] datanodes = dfs.getDataNodeStats();
+    for (int i = 0; i < datanodes.length; i++) {
+      DatanodeInfo dn = datanodes[i];
+      if (dn.getName().equals(node)) {
+        if (state == NodeState.DECOMMISSIONED) {
+          done = dn.isDecommissioned();
+        } else if (state == NodeState.DECOMMISSION_INPROGRESS) {
+          done = dn.isDecommissionInProgress();
+        } else {
+          done = (!dn.isDecommissionInProgress() && !dn.isDecommissioned());
+        }
+        System.out.println(dn.getDatanodeReport());
+        foundNode = true;
+      }
+    }
+    if (!foundNode) {
+      throw new IOException("Could not find node: " + node);
+    }
+    return done;
+  }
+
+  /* 
+   * Wait till node is fully decommissioned.
+   */
+  private void waitNodeState(FileSystem filesys,
+                             String node,
+                             NodeState state) throws IOException {
+    boolean done = checkNodeState(filesys, node, state);
+    while (!done) {
+      System.out.println("Waiting for node " + node +
+                         " to change state to " + state);
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException e) {
+        // nothing
+      }
+      done = checkNodeState(filesys, node, state);
+    }
+  }
+  
+  /**
+   * Tests Decommission in DFS.
+   */
+  public void testDecommission() throws IOException {
+    Configuration conf = new Configuration();
+    conf.setBoolean("dfs.replication.considerLoad", false);
+
+    // Set up the hosts/exclude files.
+    FileSystem localFileSys = FileSystem.getLocal(conf);
+    Path workingDir = localFileSys.getWorkingDirectory();
+    Path dir = new Path(workingDir, "build/test/data/work-dir/decommission");
+    assertTrue(localFileSys.mkdirs(dir));
+    hostsFile = new Path(dir, "hosts");
+    excludeFile = new Path(dir, "exclude");
+    conf.set("dfs.hosts.exclude", excludeFile.toUri().getPath());
+    conf.setInt("heartbeat.recheck.interval", 2000);
+    conf.setInt("dfs.heartbeat.interval", 1);
+    conf.setInt("dfs.replication.pending.timeout.sec", 4);
+    writeConfigFile(localFileSys, excludeFile, null);
+
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
+    cluster.waitActive();
+    InetSocketAddress addr = new InetSocketAddress("localhost", 
+                                                   cluster.getNameNodePort());
+    DFSClient client = new DFSClient(addr, conf);
+    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
+    assertEquals("Number of Datanodes ", numDatanodes, info.length);
+    FileSystem fileSys = cluster.getFileSystem();
+
+    try {
+      for (int iteration = 0; iteration < numDatanodes - 1; iteration++) {
+        int replicas = numDatanodes - iteration - 1;
+        //
+        // Decommission one node. Verify that node is decommissioned.
+        // 
+        Path file1 = new Path("decommission.dat");
+        writeFile(fileSys, file1, replicas);
+        System.out.println("Created file decommission.dat with " +
+                           replicas + " replicas.");
+        checkFile(fileSys, file1, replicas);
+        printFileLocations(fileSys, file1);
+        String downnode = decommissionNode(cluster.getNameNode(), conf,
+                                           client, localFileSys);
+        decommissionedNodes.add(downnode);
+        waitNodeState(fileSys, downnode, NodeState.DECOMMISSIONED);
+        checkFile(fileSys, file1, replicas, downnode);
+        cleanupFile(fileSys, file1);
+        cleanupFile(localFileSys, dir);
+      }
+    } catch (IOException e) {
+      info = client.datanodeReport(DatanodeReportType.ALL);
+      printDatanodeReport(info);
+      throw e;
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+  }
+}

+ 60 - 0
src/test/org/apache/hadoop/hdfs/TestDefaultNameNodePort.java

@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.net.InetSocketAddress;
+import java.net.URI;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+
+/** Test NameNode port defaulting code. */
+public class TestDefaultNameNodePort extends TestCase {
+
+  public void testGetAddressFromString() throws Exception {
+    assertEquals(NameNode.getAddress("foo").getPort(),
+                 NameNode.DEFAULT_PORT);
+    assertEquals(NameNode.getAddress("hdfs://foo/").getPort(),
+                 NameNode.DEFAULT_PORT);
+    assertEquals(NameNode.getAddress("hdfs://foo:555").getPort(),
+                 555);
+    assertEquals(NameNode.getAddress("foo:555").getPort(),
+                 555);
+  }
+
+  public void testGetAddressFromConf() throws Exception {
+    Configuration conf = new Configuration();
+    FileSystem.setDefaultUri(conf, "hdfs://foo/");
+    assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
+    FileSystem.setDefaultUri(conf, "hdfs://foo:555/");
+    assertEquals(NameNode.getAddress(conf).getPort(), 555);
+    FileSystem.setDefaultUri(conf, "foo");
+    assertEquals(NameNode.getAddress(conf).getPort(), NameNode.DEFAULT_PORT);
+  }
+
+  public void testGetUri() {
+    assertEquals(NameNode.getUri(new InetSocketAddress("foo", 555)),
+                 URI.create("hdfs://foo:555"));
+    assertEquals(NameNode.getUri(new InetSocketAddress("foo",
+                                                       NameNode.DEFAULT_PORT)),
+                 URI.create("hdfs://foo"));
+  }
+}

+ 232 - 0
src/test/org/apache/hadoop/hdfs/TestDistributedFileSystem.java

@@ -0,0 +1,232 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.URI;
+import java.util.Random;
+
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.log4j.Level;
+
+public class TestDistributedFileSystem extends junit.framework.TestCase {
+  private static final Random RAN = new Random();
+
+  public void testFileSystemCloseAll() throws Exception {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 0, true, null);
+    URI address = FileSystem.getDefaultUri(conf);
+
+    try {
+      FileSystem.closeAll();
+
+      conf = new Configuration();
+      FileSystem.setDefaultUri(conf, address);
+      FileSystem.get(conf);
+      FileSystem.get(conf);
+      FileSystem.closeAll();
+    }
+    finally {
+      if (cluster != null) {cluster.shutdown();}
+    }
+  }
+  
+  /**
+   * Tests DFSClient.close throws no ConcurrentModificationException if 
+   * multiple files are open.
+   */
+  public void testDFSClose() throws Exception {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    FileSystem fileSys = cluster.getFileSystem();
+
+    try {
+      // create two files
+      fileSys.create(new Path("/test/dfsclose/file-0"));
+      fileSys.create(new Path("/test/dfsclose/file-1"));
+
+      fileSys.close();
+    }
+    finally {
+      if (cluster != null) {cluster.shutdown();}
+    }
+  }
+
+  public void testDFSClient() throws Exception {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = null;
+
+    try {
+      cluster = new MiniDFSCluster(conf, 2, true, null);
+      final Path filepath = new Path("/test/LeaseChecker/foo");
+      final long millis = System.currentTimeMillis();
+
+      {
+        DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
+        assertFalse(dfs.dfs.isLeaseCheckerStarted());
+  
+        //create a file
+        FSDataOutputStream out = dfs.create(filepath);
+        assertTrue(dfs.dfs.isLeaseCheckerStarted());
+  
+        //write something and close
+        out.writeLong(millis);
+        assertTrue(dfs.dfs.isLeaseCheckerStarted());
+        out.close();
+        assertTrue(dfs.dfs.isLeaseCheckerStarted());
+        dfs.close();
+      }
+
+      {
+        // Check to see if opening a non-existent file triggers a FNF
+        FileSystem fs = cluster.getFileSystem();
+        Path dir = new Path("/wrwelkj");
+        assertFalse("File should not exist for test.", fs.exists(dir));
+
+        try {
+          FSDataInputStream in = fs.open(dir);
+          try {
+            in.close();
+            fs.close();
+          } finally {
+            assertTrue("Did not get a FileNotFoundException for non-existing" +
+                " file.", false);
+          }
+        } catch (FileNotFoundException fnf) {
+          // This is the proper exception to catch; move on.
+        }
+
+      }
+
+      {
+        DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
+        assertFalse(dfs.dfs.isLeaseCheckerStarted());
+
+        //open and check the file
+        FSDataInputStream in = dfs.open(filepath);
+        assertFalse(dfs.dfs.isLeaseCheckerStarted());
+        assertEquals(millis, in.readLong());
+        assertFalse(dfs.dfs.isLeaseCheckerStarted());
+        in.close();
+        assertFalse(dfs.dfs.isLeaseCheckerStarted());
+        dfs.close();
+      }
+      
+      { // test accessing DFS with ip address. should work with any hostname
+        // alias or ip address that points to the interface that NameNode
+        // is listening on. In this case, it is localhost.
+        String uri = "hdfs://127.0.0.1:" + cluster.getNameNodePort() + 
+                      "/test/ipAddress/file";
+        Path path = new Path(uri);
+        FileSystem fs = FileSystem.get(path.toUri(), conf);
+        FSDataOutputStream out = fs.create(path);
+        byte[] buf = new byte[1024];
+        out.write(buf);
+        out.close();
+        
+        FSDataInputStream in = fs.open(path);
+        in.readFully(buf);
+        in.close();
+        fs.close();
+      }
+    }
+    finally {
+      if (cluster != null) {cluster.shutdown();}
+    }
+  }
+  
+  public void testFileChecksum() throws IOException {
+    ((Log4JLogger)HftpFileSystem.LOG).getLogger().setLevel(Level.ALL);
+
+    final long seed = RAN.nextLong();
+    System.out.println("seed=" + seed);
+    RAN.setSeed(seed);
+
+    final Configuration conf = new Configuration();
+    conf.set("slave.host.name", "localhost");
+
+    final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    final FileSystem hdfs = cluster.getFileSystem();
+    final String hftpuri = "hftp://" + conf.get("dfs.http.address");
+    System.out.println("hftpuri=" + hftpuri);
+    final FileSystem hftp = new Path(hftpuri).getFileSystem(conf);
+
+    final String dir = "/filechecksum";
+    final int block_size = 1024;
+    final int buffer_size = conf.getInt("io.file.buffer.size", 4096);
+    conf.setInt("io.bytes.per.checksum", 512);
+
+    //try different number of blocks
+    for(int n = 0; n < 5; n++) {
+      //generate random data
+      final byte[] data = new byte[RAN.nextInt(block_size/2-1)+n*block_size+1];
+      RAN.nextBytes(data);
+      System.out.println("data.length=" + data.length);
+  
+      //write data to a file
+      final Path foo = new Path(dir, "foo" + n);
+      {
+        final FSDataOutputStream out = hdfs.create(foo, false, buffer_size,
+            (short)2, block_size);
+        out.write(data);
+        out.close();
+      }
+      
+      //compute checksum
+      final FileChecksum hdfsfoocs = hdfs.getFileChecksum(foo);
+      System.out.println("hdfsfoocs=" + hdfsfoocs);
+      
+      final FileChecksum hftpfoocs = hftp.getFileChecksum(foo);
+      System.out.println("hftpfoocs=" + hftpfoocs);
+
+      final Path qualified = new Path(hftpuri + dir, "foo" + n);
+      final FileChecksum qfoocs = hftp.getFileChecksum(qualified);
+      System.out.println("qfoocs=" + qfoocs);
+
+      //write another file
+      final Path bar = new Path(dir, "bar" + n);
+      {
+        final FSDataOutputStream out = hdfs.create(bar, false, buffer_size,
+            (short)2, block_size);
+        out.write(data);
+        out.close();
+      }
+  
+      { //verify checksum
+        final FileChecksum barcs = hdfs.getFileChecksum(bar);
+        final int barhashcode = barcs.hashCode();
+        assertEquals(hdfsfoocs.hashCode(), barhashcode);
+        assertEquals(hdfsfoocs, barcs);
+
+        assertEquals(hftpfoocs.hashCode(), barhashcode);
+        assertEquals(hftpfoocs, barcs);
+
+        assertEquals(qfoocs.hashCode(), barhashcode);
+        assertEquals(qfoocs, barcs);
+      }
+    }
+  }
+}

+ 348 - 0
src/test/org/apache/hadoop/hdfs/TestFSInputChecker.java

@@ -0,0 +1,348 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.RandomAccessFile;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ChecksumException;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
+
+/**
+ * This class tests if FSInputChecker works correctly.
+ */
+public class TestFSInputChecker extends TestCase {
+  static final long seed = 0xDEADBEEFL;
+  static final int BYTES_PER_SUM = 10;
+  static final int BLOCK_SIZE = 2*BYTES_PER_SUM;
+  static final int HALF_CHUNK_SIZE = BYTES_PER_SUM/2;
+  static final int FILE_SIZE = 2*BLOCK_SIZE-1;
+  static final short NUM_OF_DATANODES = 2;
+  byte[] expected = new byte[FILE_SIZE];
+  byte[] actual;
+  FSDataInputStream stm;
+  Random rand = new Random(seed);
+
+  /* create a file */
+  private void writeFile(FileSystem fileSys, Path name) throws IOException {
+    // create and write a file that contains three blocks of data
+    FSDataOutputStream stm = fileSys.create(name, new FsPermission((short)0777),
+        true, fileSys.getConf().getInt("io.file.buffer.size", 4096),
+        NUM_OF_DATANODES, BLOCK_SIZE, null);
+    stm.write(expected);
+    stm.close();
+  }
+  
+  /*validate data*/
+  private void checkAndEraseData(byte[] actual, int from, byte[] expected, 
+      String message) throws Exception {
+    for (int idx = 0; idx < actual.length; idx++) {
+      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
+                        expected[from+idx]+" actual "+actual[idx],
+                        actual[idx], expected[from+idx]);
+      actual[idx] = 0;
+    }
+  }
+  
+  /* test read and getPos */
+  private void checkReadAndGetPos() throws Exception {
+    actual = new byte[FILE_SIZE];
+    // test reads that do not cross checksum boundary
+    stm.seek(0);
+    int offset;
+    for(offset=0; offset<BLOCK_SIZE+BYTES_PER_SUM;
+                  offset += BYTES_PER_SUM ) {
+      assertEquals(stm.getPos(), offset);
+      stm.readFully(actual, offset, BYTES_PER_SUM);
+    }
+    stm.readFully(actual, offset, FILE_SIZE-BLOCK_SIZE-BYTES_PER_SUM);
+    assertEquals(stm.getPos(), FILE_SIZE);
+    checkAndEraseData(actual, 0, expected, "Read Sanity Test");
+    
+    // test reads that cross checksum boundary
+    stm.seek(0L);
+    assertEquals(stm.getPos(), 0L);
+    stm.readFully(actual, 0, HALF_CHUNK_SIZE);
+    assertEquals(stm.getPos(), HALF_CHUNK_SIZE);
+    stm.readFully(actual, HALF_CHUNK_SIZE, BLOCK_SIZE-HALF_CHUNK_SIZE);
+    assertEquals(stm.getPos(), BLOCK_SIZE);
+    stm.readFully(actual, BLOCK_SIZE, BYTES_PER_SUM+HALF_CHUNK_SIZE);
+    assertEquals(stm.getPos(), BLOCK_SIZE+BYTES_PER_SUM+HALF_CHUNK_SIZE);
+    stm.readFully(actual, 2*BLOCK_SIZE-HALF_CHUNK_SIZE, 
+        FILE_SIZE-(2*BLOCK_SIZE-HALF_CHUNK_SIZE));
+    assertEquals(stm.getPos(), FILE_SIZE);
+    checkAndEraseData(actual, 0, expected, "Read Sanity Test");
+    
+    // test read that cross block boundary
+    stm.seek(0L);
+    stm.readFully(actual, 0, BYTES_PER_SUM+HALF_CHUNK_SIZE);
+    assertEquals(stm.getPos(), BYTES_PER_SUM+HALF_CHUNK_SIZE);
+    stm.readFully(actual, BYTES_PER_SUM+HALF_CHUNK_SIZE, BYTES_PER_SUM);
+    assertEquals(stm.getPos(), BLOCK_SIZE+HALF_CHUNK_SIZE);
+    stm.readFully(actual, BLOCK_SIZE+HALF_CHUNK_SIZE,
+        FILE_SIZE-BLOCK_SIZE-HALF_CHUNK_SIZE);
+    assertEquals(stm.getPos(), FILE_SIZE);
+    checkAndEraseData(actual, 0, expected, "Read Sanity Test");
+  }
+  
+  /* test if one seek is correct */
+  private void testSeek1(int offset) 
+  throws Exception {
+    stm.seek(offset);
+    assertEquals(offset, stm.getPos());
+    stm.readFully(actual);
+    checkAndEraseData(actual, offset, expected, "Read Sanity Test");
+  }
+
+  /* test seek() */
+  private void checkSeek( ) throws Exception {
+    actual = new byte[HALF_CHUNK_SIZE];
+    
+    // test seeks to checksum boundary
+    testSeek1(0);
+    testSeek1(BYTES_PER_SUM);
+    testSeek1(BLOCK_SIZE);
+    
+    // test seek to non-checksum-boundary pos
+    testSeek1(BLOCK_SIZE+HALF_CHUNK_SIZE);
+    testSeek1(HALF_CHUNK_SIZE);
+    
+    // test seek to a position at the same checksum chunk
+    testSeek1(HALF_CHUNK_SIZE/2);
+    testSeek1(HALF_CHUNK_SIZE*3/2);
+    
+    // test end of file
+    actual = new byte[1];
+    testSeek1(FILE_SIZE-1);
+    
+    String errMsg = null;
+    try {
+      stm.seek(FILE_SIZE);
+    } catch (IOException e) {
+      errMsg = e.getMessage();
+    }
+    assertTrue(errMsg==null);
+  }
+
+  /* test if one skip is correct */
+  private void testSkip1(int skippedBytes) 
+  throws Exception {
+    long oldPos = stm.getPos();
+    long nSkipped = stm.skip(skippedBytes);
+    long newPos = oldPos+nSkipped;
+    assertEquals(stm.getPos(), newPos);
+    stm.readFully(actual);
+    checkAndEraseData(actual, (int)newPos, expected, "Read Sanity Test");
+  }
+
+  /* test skip() */
+  private void checkSkip( ) throws Exception {
+    actual = new byte[HALF_CHUNK_SIZE];
+    
+    // test skip to a checksum boundary
+    stm.seek(0);
+    testSkip1(BYTES_PER_SUM);
+    testSkip1(HALF_CHUNK_SIZE);
+    testSkip1(HALF_CHUNK_SIZE);
+    
+    // test skip to non-checksum-boundary pos
+    stm.seek(0);
+    testSkip1(HALF_CHUNK_SIZE + 1);
+    testSkip1(BYTES_PER_SUM);
+    testSkip1(HALF_CHUNK_SIZE);
+    
+    // test skip to a position at the same checksum chunk
+    stm.seek(0);
+    testSkip1(1);
+    testSkip1(1);
+    
+    // test skip to end of file
+    stm.seek(0);
+    actual = new byte[1];
+    testSkip1(FILE_SIZE-1);
+    
+    stm.seek(0);
+    assertEquals(stm.skip(FILE_SIZE), FILE_SIZE);
+    assertEquals(stm.skip(10), 0);
+    
+    stm.seek(0);
+    assertEquals(stm.skip(FILE_SIZE+10), FILE_SIZE);
+    stm.seek(10);
+    assertEquals(stm.skip(FILE_SIZE), FILE_SIZE-10);
+  }
+
+  private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
+    assertTrue(fileSys.exists(name));
+    fileSys.delete(name, true);
+    assertTrue(!fileSys.exists(name));
+  }
+  
+  /**
+   * Tests read/seek/getPos/skipped opeation for input stream.
+   */
+  private void testChecker(FileSystem fileSys, boolean readCS)
+  throws Exception {
+    Path file = new Path("try.dat");
+    if( readCS ) {
+      writeFile(fileSys, file);
+    } else {
+      writeFile(fileSys, file);
+    }
+    stm = fileSys.open(file);
+    checkReadAndGetPos();
+    checkSeek();
+    checkSkip();
+    //checkMark
+    assertFalse(stm.markSupported());
+    stm.close();
+    cleanupFile(fileSys, file);
+  }
+  
+  private void testFileCorruption(LocalFileSystem fileSys) throws IOException {
+    // create a file and verify that checksum corruption results in 
+    // a checksum exception on LocalFS
+    
+    String dir = System.getProperty("test.build.data", ".");
+    Path file = new Path(dir + "/corruption-test.dat");
+    Path crcFile = new Path(dir + "/.corruption-test.dat.crc");
+    
+    writeFile(fileSys, file);
+    
+    int fileLen = (int)fileSys.getFileStatus(file).getLen();
+    
+    byte [] buf = new byte[fileLen];
+
+    InputStream in = fileSys.open(file);
+    IOUtils.readFully(in, buf, 0, buf.length);
+    in.close();
+    
+    // check .crc corruption
+    checkFileCorruption(fileSys, file, crcFile);
+    fileSys.delete(file, true);
+    
+    writeFile(fileSys, file);
+    
+    // check data corrutpion
+    checkFileCorruption(fileSys, file, file);
+    
+    fileSys.delete(file, true);
+  }
+  
+  private void checkFileCorruption(LocalFileSystem fileSys, Path file, 
+                                   Path fileToCorrupt) throws IOException {
+    
+    // corrupt the file 
+    RandomAccessFile out = 
+      new RandomAccessFile(new File(fileToCorrupt.toString()), "rw");
+    
+    byte[] buf = new byte[(int)fileSys.getFileStatus(file).getLen()];    
+    int corruptFileLen = (int)fileSys.getFileStatus(fileToCorrupt).getLen();
+    assertTrue(buf.length >= corruptFileLen);
+    
+    rand.nextBytes(buf);
+    out.seek(corruptFileLen/2);
+    out.write(buf, 0, corruptFileLen/4);
+    out.close();
+
+    boolean gotException = false;
+    
+    InputStream in = fileSys.open(file);
+    try {
+      IOUtils.readFully(in, buf, 0, buf.length);
+    } catch (ChecksumException e) {
+      gotException = true;
+    }
+    assertTrue(gotException);
+    in.close();    
+  }
+  
+  public void testFSInputChecker() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setLong("dfs.block.size", BLOCK_SIZE);
+    conf.setInt("io.bytes.per.checksum", BYTES_PER_SUM);
+    rand.nextBytes(expected);
+
+    // test DFS
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fileSys = cluster.getFileSystem();
+    try {
+      testChecker(fileSys, true);
+      testChecker(fileSys, false);
+      testSeekAndRead(fileSys);
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+    
+    
+    // test Local FS
+    fileSys = FileSystem.getLocal(conf);
+    try {
+      testChecker(fileSys, true);
+      testChecker(fileSys, false);
+      testFileCorruption((LocalFileSystem)fileSys);
+      testSeekAndRead(fileSys);
+    }finally {
+      fileSys.close();
+    }
+  }
+
+  private void testSeekAndRead(FileSystem fileSys)
+  throws IOException {
+    Path file = new Path("try.dat");
+    writeFile(fileSys, file);
+    stm = fileSys.open(file,
+        fileSys.getConf().getInt("io.file.buffer.size", 4096));
+    checkSeekAndRead();
+    stm.close();
+    cleanupFile(fileSys, file);
+  }
+
+  private void checkSeekAndRead() throws IOException {
+    int position = 1;
+    int len = 2 * BYTES_PER_SUM - position;
+    readAndCompare(stm, position, len);
+
+    position = BYTES_PER_SUM;
+    len = BYTES_PER_SUM;
+    readAndCompare(stm, position, len);
+  }
+
+  private void readAndCompare(FSDataInputStream in, int position, int len)
+      throws IOException {
+    byte[] b = new byte[len];
+    in.seek(position);
+    IOUtils.readFully(in, b, 0, b.length);
+
+    for (int i = 0; i < b.length; i++) {
+      assertEquals(expected[position + i], b[i]);
+    }
+  }
+}

+ 131 - 0
src/test/org/apache/hadoop/hdfs/TestFSOutputSummer.java

@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import junit.framework.TestCase;
+import java.io.*;
+import java.util.Random;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+/**
+ * This class tests if FSOutputSummer works correctly.
+ */
+public class TestFSOutputSummer extends TestCase {
+  private static final long seed = 0xDEADBEEFL;
+  private static final int BYTES_PER_CHECKSUM = 10;
+  private static final int BLOCK_SIZE = 2*BYTES_PER_CHECKSUM;
+  private static final int HALF_CHUNK_SIZE = BYTES_PER_CHECKSUM/2;
+  private static final int FILE_SIZE = 2*BLOCK_SIZE-1;
+  private static final short NUM_OF_DATANODES = 2;
+  private byte[] expected = new byte[FILE_SIZE];
+  private byte[] actual = new byte[FILE_SIZE];
+  private FileSystem fileSys;
+
+  /* create a file, write all data at once */
+  private void writeFile1(Path name) throws Exception {
+    FSDataOutputStream stm = fileSys.create(name, true, 
+               fileSys.getConf().getInt("io.file.buffer.size", 4096),
+               NUM_OF_DATANODES, BLOCK_SIZE);
+    stm.write(expected);
+    stm.close();
+    checkFile(name);
+    cleanupFile(name);
+  }
+  
+  /* create a file, write data chunk by chunk */
+  private void writeFile2(Path name) throws Exception {
+    FSDataOutputStream stm = fileSys.create(name, true, 
+               fileSys.getConf().getInt("io.file.buffer.size", 4096),
+               NUM_OF_DATANODES, BLOCK_SIZE);
+    int i=0;
+    for( ;i<FILE_SIZE-BYTES_PER_CHECKSUM; i+=BYTES_PER_CHECKSUM) {
+      stm.write(expected, i, BYTES_PER_CHECKSUM);
+    }
+    stm.write(expected, i, FILE_SIZE-3*BYTES_PER_CHECKSUM);
+    stm.close();
+    checkFile(name);
+    cleanupFile(name);
+  }
+  
+  /* create a file, write data with vairable amount of data */
+  private void writeFile3(Path name) throws Exception {
+    FSDataOutputStream stm = fileSys.create(name, true, 
+        fileSys.getConf().getInt("io.file.buffer.size", 4096),
+        NUM_OF_DATANODES, BLOCK_SIZE);
+    stm.write(expected, 0, HALF_CHUNK_SIZE);
+    stm.write(expected, HALF_CHUNK_SIZE, BYTES_PER_CHECKSUM+2);
+    stm.write(expected, HALF_CHUNK_SIZE+BYTES_PER_CHECKSUM+2, 2);
+    stm.write(expected, HALF_CHUNK_SIZE+BYTES_PER_CHECKSUM+4, HALF_CHUNK_SIZE);
+    stm.write(expected, BLOCK_SIZE+4, BYTES_PER_CHECKSUM-4);
+    stm.write(expected, BLOCK_SIZE+BYTES_PER_CHECKSUM, 
+        FILE_SIZE-3*BYTES_PER_CHECKSUM);
+    stm.close();
+    checkFile(name);
+    cleanupFile(name);
+  }
+  private void checkAndEraseData(byte[] actual, int from, byte[] expected,
+      String message) throws Exception {
+    for (int idx = 0; idx < actual.length; idx++) {
+      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
+                        expected[from+idx]+" actual "+actual[idx],
+                        actual[idx], expected[from+idx]);
+      actual[idx] = 0;
+    }
+  }
+  
+  private void checkFile(Path name) throws Exception {
+    FSDataInputStream stm = fileSys.open(name);
+    // do a sanity check. Read the file
+    stm.readFully(0, actual);
+    checkAndEraseData(actual, 0, expected, "Read Sanity Test");
+    stm.close();
+  }
+
+  private void cleanupFile(Path name) throws IOException {
+    assertTrue(fileSys.exists(name));
+    fileSys.delete(name, true);
+    assertTrue(!fileSys.exists(name));
+  }
+  
+  /**
+   * Test write opeation for output stream in DFS.
+   */
+  public void testFSOutputSummer() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setLong("dfs.block.size", BLOCK_SIZE);
+    conf.setInt("io.bytes.per.checksum", BYTES_PER_CHECKSUM);
+    MiniDFSCluster cluster = new MiniDFSCluster(
+        conf, NUM_OF_DATANODES, true, null);
+    fileSys = cluster.getFileSystem();
+    try {
+      Path file = new Path("try.dat");
+      Random rand = new Random(seed);
+      rand.nextBytes(expected);
+      writeFile1(file);
+      writeFile2(file);
+      writeFile3(file);
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+  }
+}

+ 312 - 0
src/test/org/apache/hadoop/hdfs/TestFileAppend.java

@@ -0,0 +1,312 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.List;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileUtil.HardLink;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.FSDataset;
+import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+
+/**
+ * This class tests the building blocks that are needed to
+ * support HDFS appends.
+ */
+public class TestFileAppend extends TestCase {
+  static final int blockSize = 1024;
+  static final int numBlocks = 10;
+  static final int fileSize = numBlocks * blockSize + 1;
+  boolean simulatedStorage = false;
+
+  private long seed;
+  private byte[] fileContents = null;
+
+  //
+  // create a buffer that contains the entire test file data.
+  //
+  private void initBuffer(int size) {
+    seed = AppendTestUtil.nextLong();
+    fileContents = AppendTestUtil.randomBytes(seed, size);
+  }
+
+  /*
+   * creates a file but does not close it
+   */ 
+  private FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
+    throws IOException {
+    FSDataOutputStream stm = fileSys.create(name, true,
+                                            fileSys.getConf().getInt("io.file.buffer.size", 4096),
+                                            (short)repl, (long)blockSize);
+    return stm;
+  }
+
+  //
+  // writes to file but does not close it
+  //
+  private void writeFile(FSDataOutputStream stm) throws IOException {
+    byte[] buffer = AppendTestUtil.randomBytes(seed, fileSize);
+    stm.write(buffer);
+  }
+
+  //
+  // verify that the data written to the full blocks are sane
+  // 
+  private void checkFile(FileSystem fileSys, Path name, int repl)
+    throws IOException {
+    boolean done = false;
+
+    // wait till all full blocks are confirmed by the datanodes.
+    while (!done) {
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException e) {}
+      done = true;
+      BlockLocation[] locations = fileSys.getFileBlockLocations(
+          fileSys.getFileStatus(name), 0, fileSize);
+      if (locations.length < numBlocks) {
+        System.out.println("Number of blocks found " + locations.length);
+        done = false;
+        continue;
+      }
+      for (int idx = 0; idx < numBlocks; idx++) {
+        if (locations[idx].getHosts().length < repl) {
+          System.out.println("Block index " + idx + " not yet replciated.");
+          done = false;
+          break;
+        }
+      }
+    }
+    FSDataInputStream stm = fileSys.open(name);
+    byte[] expected = new byte[numBlocks * blockSize];
+    if (simulatedStorage) {
+      for (int i= 0; i < expected.length; i++) {  
+        expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
+      }
+    } else {
+      for (int i= 0; i < expected.length; i++) {  
+        expected[i] = fileContents[i];
+      }
+    }
+    // do a sanity check. Read the file
+    byte[] actual = new byte[numBlocks * blockSize];
+    stm.readFully(0, actual);
+    checkData(actual, 0, expected, "Read 1");
+  }
+
+  private void checkFullFile(FileSystem fs, Path name) throws IOException {
+    FSDataInputStream stm = fs.open(name);
+    byte[] actual = new byte[fileSize];
+    stm.readFully(0, actual);
+    checkData(actual, 0, fileContents, "Read 2");
+    stm.close();
+  }
+
+  private void checkData(byte[] actual, int from, byte[] expected, String message) {
+    for (int idx = 0; idx < actual.length; idx++) {
+      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
+                   expected[from+idx]+" actual "+actual[idx],
+                   expected[from+idx], actual[idx]);
+      actual[idx] = 0;
+    }
+  }
+
+
+  /**
+   * Test that copy on write for blocks works correctly
+   */
+  public void testCopyOnWrite() throws IOException {
+    Configuration conf = new Configuration();
+    if (simulatedStorage) {
+      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+    }
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fs = cluster.getFileSystem();
+    InetSocketAddress addr = new InetSocketAddress("localhost",
+                                                   cluster.getNameNodePort());
+    DFSClient client = new DFSClient(addr, conf);
+    try {
+
+      // create a new file, write to it and close it.
+      //
+      Path file1 = new Path("/filestatus.dat");
+      FSDataOutputStream stm = createFile(fs, file1, 1);
+      writeFile(stm);
+      stm.close();
+
+      // Get a handle to the datanode
+      DataNode[] dn = cluster.listDataNodes();
+      assertTrue("There should be only one datanode but found " + dn.length,
+                  dn.length == 1);
+
+      LocatedBlocks locations = client.namenode.getBlockLocations(
+                                  file1.toString(), 0, Long.MAX_VALUE);
+      List<LocatedBlock> blocks = locations.getLocatedBlocks();
+      FSDataset dataset = (FSDataset) dn[0].data;
+
+      //
+      // Create hard links for a few of the blocks
+      //
+      for (int i = 0; i < blocks.size(); i = i + 2) {
+        Block b = blocks.get(i).getBlock();
+        FSDataset fsd = dataset;
+        File f = fsd.getFile(b);
+        File link = new File(f.toString() + ".link");
+        System.out.println("Creating hardlink for File " + f + 
+                           " to " + link);
+        HardLink.createHardLink(f, link);
+      }
+
+      //
+      // Detach all blocks. This should remove hardlinks (if any)
+      //
+      for (int i = 0; i < blocks.size(); i++) {
+        Block b = blocks.get(i).getBlock();
+        System.out.println("testCopyOnWrite detaching block " + b);
+        assertTrue("Detaching block " + b + " should have returned true",
+                   dataset.detachBlock(b, 1) == true);
+      }
+
+      // Since the blocks were already detached earlier, these calls should
+      // return false
+      //
+      for (int i = 0; i < blocks.size(); i++) {
+        Block b = blocks.get(i).getBlock();
+        System.out.println("testCopyOnWrite detaching block " + b);
+        assertTrue("Detaching block " + b + " should have returned false",
+                   dataset.detachBlock(b, 1) == false);
+      }
+
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * Test a simple flush on a simple HDFS file.
+   */
+  public void testSimpleFlush() throws IOException {
+    Configuration conf = new Configuration();
+    if (simulatedStorage) {
+      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+    }
+    initBuffer(fileSize);
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fs = cluster.getFileSystem();
+    try {
+
+      // create a new file.
+      Path file1 = new Path("/simpleFlush.dat");
+      FSDataOutputStream stm = createFile(fs, file1, 1);
+      System.out.println("Created file simpleFlush.dat");
+
+      // write to file
+      int mid = fileSize/2;
+      stm.write(fileContents, 0, mid);
+      stm.sync();
+      System.out.println("Wrote and Flushed first part of file.");
+
+      // write the remainder of the file
+      stm.write(fileContents, mid, fileSize - mid);
+      System.out.println("Written second part of file");
+      stm.sync();
+      stm.sync();
+      System.out.println("Wrote and Flushed second part of file.");
+
+      // verify that full blocks are sane
+      checkFile(fs, file1, 1);
+
+      stm.close();
+      System.out.println("Closed file.");
+
+      // verify that entire file is good
+      checkFullFile(fs, file1);
+
+    } catch (IOException e) {
+      System.out.println("Exception :" + e);
+      throw e; 
+    } catch (Throwable e) {
+      System.out.println("Throwable :" + e);
+      e.printStackTrace();
+      throw new IOException("Throwable : " + e);
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * Test that file data can be flushed.
+   */
+  public void testComplexFlush() throws IOException {
+    Configuration conf = new Configuration();
+    if (simulatedStorage) {
+      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+    }
+    initBuffer(fileSize);
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fs = cluster.getFileSystem();
+    try {
+
+      // create a new file.
+      Path file1 = new Path("/complexFlush.dat");
+      FSDataOutputStream stm = createFile(fs, file1, 1);
+      System.out.println("Created file complexFlush.dat");
+
+      int start = 0;
+      for (start = 0; (start + 29) < fileSize; ) {
+        stm.write(fileContents, start, 29);
+        stm.sync();
+        start += 29;
+      }
+      stm.write(fileContents, start, fileSize-start);
+
+      // verify that full blocks are sane
+      checkFile(fs, file1, 1);
+      stm.close();
+
+      // verify that entire file is good
+      checkFullFile(fs, file1);
+    } catch (IOException e) {
+      System.out.println("Exception :" + e);
+      throw e; 
+    } catch (Throwable e) {
+      System.out.println("Throwable :" + e);
+      e.printStackTrace();
+      throw new IOException("Throwable : " + e);
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+  }
+}

+ 427 - 0
src/test/org/apache/hadoop/hdfs/TestFileAppend2.java

@@ -0,0 +1,427 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.log4j.Level;
+
+/**
+ * This class tests the building blocks that are needed to
+ * support HDFS appends.
+ */
+public class TestFileAppend2 extends TestCase {
+
+  {
+    ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
+  }
+
+  static final int blockSize = 1024;
+  static final int numBlocks = 5;
+  static final int fileSize = numBlocks * blockSize + 1;
+  boolean simulatedStorage = false;
+
+  private byte[] fileContents = null;
+
+  int numDatanodes = 5;
+  int numberOfFiles = 50;
+  int numThreads = 10;
+  int numAppendsPerThread = 20;
+/***
+  int numberOfFiles = 1;
+  int numThreads = 1;
+  int numAppendsPerThread = 2000;
+****/
+  Workload[] workload = null;
+  ArrayList<Path> testFiles = new ArrayList<Path>();
+  volatile static boolean globalStatus = true;
+
+  //
+  // create a buffer that contains the entire test file data.
+  //
+  private void initBuffer(int size) {
+    long seed = AppendTestUtil.nextLong();
+    fileContents = AppendTestUtil.randomBytes(seed, size);
+  }
+
+  /*
+   * creates a file but does not close it
+   */ 
+  private FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
+    throws IOException {
+    FSDataOutputStream stm = fileSys.create(name, true,
+                                            fileSys.getConf().getInt("io.file.buffer.size", 4096),
+                                            (short)repl, (long)blockSize);
+    return stm;
+  }
+
+  private void checkFile(FileSystem fs, Path name, int len) throws IOException {
+    FSDataInputStream stm = fs.open(name);
+    byte[] actual = new byte[len];
+    stm.readFully(0, actual);
+    checkData(actual, 0, fileContents, "Read 2");
+    stm.close();
+  }
+
+  private void checkFullFile(FileSystem fs, Path name) throws IOException {
+    checkFile(fs, name, fileSize);
+  }
+
+  private void checkData(byte[] actual, int from, byte[] expected, String message) {
+    for (int idx = 0; idx < actual.length; idx++) {
+      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
+                   expected[from+idx]+" actual "+actual[idx],
+                   expected[from+idx], actual[idx]);
+      actual[idx] = 0;
+    }
+  }
+
+
+  /**
+   * Creates one file, writes a few bytes to it and then closed it.
+   * Reopens the same file for appending, write all blocks and then close.
+   * Verify that all data exists in file.
+   */ 
+  public void testSimpleAppend() throws IOException {
+    Configuration conf = new Configuration();
+    if (simulatedStorage) {
+      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+    }
+    conf.setInt("dfs.datanode.handler.count", 50);
+    conf.setBoolean("dfs.support.append", true);
+    initBuffer(fileSize);
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fs = cluster.getFileSystem();
+    try {
+      { // test appending to a file.
+
+        // create a new file.
+        Path file1 = new Path("/simpleAppend.dat");
+        FSDataOutputStream stm = createFile(fs, file1, 1);
+        System.out.println("Created file simpleAppend.dat");
+  
+        // write to file
+        int mid = 186;   // io.bytes.per.checksum bytes
+        System.out.println("Writing " + mid + " bytes to file " + file1);
+        stm.write(fileContents, 0, mid);
+        stm.close();
+        System.out.println("Wrote and Closed first part of file.");
+  
+        // write to file
+        int mid2 = 607;   // io.bytes.per.checksum bytes
+        System.out.println("Writing " + mid + " bytes to file " + file1);
+        stm = fs.append(file1);
+        stm.write(fileContents, mid, mid2-mid);
+        stm.close();
+        System.out.println("Wrote and Closed second part of file.");
+  
+        // write the remainder of the file
+        stm = fs.append(file1);
+
+        // ensure getPos is set to reflect existing size of the file
+        assertTrue(stm.getPos() > 0);
+
+        System.out.println("Writing " + (fileSize - mid2) + " bytes to file " + file1);
+        stm.write(fileContents, mid2, fileSize - mid2);
+        System.out.println("Written second part of file");
+        stm.close();
+        System.out.println("Wrote and Closed second part of file.");
+  
+        // verify that entire file is good
+        checkFullFile(fs, file1);
+      }
+
+      { // test appending to an non-existing file.
+        FSDataOutputStream out = null;
+        try {
+          out = fs.append(new Path("/non-existing.dat"));
+          fail("Expected to have FileNotFoundException");
+        }
+        catch(java.io.FileNotFoundException fnfe) {
+          System.out.println("Good: got " + fnfe);
+          fnfe.printStackTrace(System.out);
+        }
+        finally {
+          IOUtils.closeStream(out);
+        }
+      }
+
+      { // test append permission.
+
+        //set root to all writable 
+        Path root = new Path("/");
+        fs.setPermission(root, new FsPermission((short)0777));
+        fs.close();
+
+        // login as a different user
+        final UserGroupInformation superuser = UserGroupInformation.getCurrentUGI();
+        String username = "testappenduser";
+        String group = "testappendgroup";
+        assertFalse(superuser.getUserName().equals(username));
+        assertFalse(Arrays.asList(superuser.getGroupNames()).contains(group));
+        UnixUserGroupInformation appenduser = UnixUserGroupInformation.createImmutable(
+            new String[]{username, group});
+        UnixUserGroupInformation.saveToConf(conf,
+            UnixUserGroupInformation.UGI_PROPERTY_NAME, appenduser);
+        fs = FileSystem.get(conf);
+
+        // create a file
+        Path dir = new Path(root, getClass().getSimpleName());
+        Path foo = new Path(dir, "foo.dat");
+        FSDataOutputStream out = null;
+        int offset = 0;
+        try {
+          out = fs.create(foo);
+          int len = 10 + AppendTestUtil.nextInt(100);
+          out.write(fileContents, offset, len);
+          offset += len;
+        }
+        finally {
+          IOUtils.closeStream(out);
+        }
+
+        // change dir and foo to minimal permissions.
+        fs.setPermission(dir, new FsPermission((short)0100));
+        fs.setPermission(foo, new FsPermission((short)0200));
+
+        // try append, should success
+        out = null;
+        try {
+          out = fs.append(foo);
+          int len = 10 + AppendTestUtil.nextInt(100);
+          out.write(fileContents, offset, len);
+          offset += len;
+        }
+        finally {
+          IOUtils.closeStream(out);
+        }
+
+        // change dir and foo to all but no write on foo.
+        fs.setPermission(foo, new FsPermission((short)0577));
+        fs.setPermission(dir, new FsPermission((short)0777));
+
+        // try append, should fail
+        out = null;
+        try {
+          out = fs.append(foo);
+          fail("Expected to have AccessControlException");
+        }
+        catch(AccessControlException ace) {
+          System.out.println("Good: got " + ace);
+          ace.printStackTrace(System.out);
+        }
+        finally {
+          IOUtils.closeStream(out);
+        }
+      }
+    } catch (IOException e) {
+      System.out.println("Exception :" + e);
+      throw e; 
+    } catch (Throwable e) {
+      System.out.println("Throwable :" + e);
+      e.printStackTrace();
+      throw new IOException("Throwable : " + e);
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+  }
+
+  //
+  // an object that does a bunch of appends to files
+  //
+  class Workload extends Thread {
+    private int id;
+    private MiniDFSCluster cluster;
+
+    Workload(MiniDFSCluster cluster, int threadIndex) {
+      id = threadIndex;
+      this.cluster = cluster;
+    }
+
+    // create a bunch of files. Write to them and then verify.
+    public void run() {
+      System.out.println("Workload " + id + " starting... ");
+      for (int i = 0; i < numAppendsPerThread; i++) {
+   
+        // pick a file at random and remove it from pool
+        Path testfile = null;
+        synchronized (testFiles) {
+          if (testFiles.size() == 0) {
+            System.out.println("Completed write to almost all files.");
+            return;  
+          }
+          int index = AppendTestUtil.nextInt(testFiles.size());
+          testfile = testFiles.remove(index);
+        }
+
+        long len = 0;
+        int sizeToAppend = 0;
+        try {
+          FileSystem fs = cluster.getFileSystem();
+
+          // add a random number of bytes to file
+          len = fs.getFileStatus(testfile).getLen();
+
+          // if file is already full, then pick another file
+          if (len >= fileSize) {
+            System.out.println("File " + testfile + " is full.");
+            continue;
+          }
+  
+          // do small size appends so that we can trigger multiple
+          // appends to the same file.
+          //
+          int left = (int)(fileSize - len)/3;
+          if (left <= 0) {
+            left = 1;
+          }
+          sizeToAppend = AppendTestUtil.nextInt(left);
+
+          System.out.println("Workload thread " + id +
+                             " appending " + sizeToAppend + " bytes " +
+                             " to file " + testfile +
+                             " of size " + len);
+          FSDataOutputStream stm = fs.append(testfile);
+          stm.write(fileContents, (int)len, sizeToAppend);
+          stm.close();
+
+          // wait for the file size to be reflected in the namenode metadata
+          while (fs.getFileStatus(testfile).getLen() != (len + sizeToAppend)) {
+            try {
+              System.out.println("Workload thread " + id +
+                                 " file " + testfile  +
+                                 " size " + fs.getFileStatus(testfile).getLen() +
+                                 " expected size " + (len + sizeToAppend) +
+                                 " waiting for namenode metadata update.");
+              Thread.sleep(5000);
+            } catch (InterruptedException e) { 
+            }
+          }
+
+          assertTrue("File " + testfile + " size is " + 
+                     fs.getFileStatus(testfile).getLen() +
+                     " but expected " + (len + sizeToAppend),
+                    fs.getFileStatus(testfile).getLen() == (len + sizeToAppend));
+
+          checkFile(fs, testfile, (int)(len + sizeToAppend));
+        } catch (Throwable e) {
+          globalStatus = false;
+          if (e != null && e.toString() != null) {
+            System.out.println("Workload exception " + id + 
+                               " testfile " + testfile +
+                               " " + e);
+            e.printStackTrace();
+          }
+          assertTrue("Workload exception " + id + " testfile " + testfile +
+                     " expected size " + (len + sizeToAppend),
+                     false);
+        }
+
+        // Add testfile back to the pool of files.
+        synchronized (testFiles) {
+          testFiles.add(testfile);
+        }
+      }
+    }
+  }
+
+  /**
+   * Test that appends to files at random offsets.
+   */
+  public void testComplexAppend() throws IOException {
+    initBuffer(fileSize);
+    Configuration conf = new Configuration();
+    conf.setInt("heartbeat.recheck.interval", 2000);
+    conf.setInt("dfs.heartbeat.interval", 2);
+    conf.setInt("dfs.replication.pending.timeout.sec", 2);
+    conf.setInt("dfs.socket.timeout", 30000);
+    conf.setInt("dfs.datanode.socket.write.timeout", 30000);
+    conf.setInt("dfs.datanode.handler.count", 50);
+    conf.setBoolean("dfs.support.append", true);
+
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, 
+                                                true, null);
+    cluster.waitActive();
+    FileSystem fs = cluster.getFileSystem();
+
+    try {
+      // create a bunch of test files with random replication factors.
+      // Insert them into a linked list.
+      //
+      for (int i = 0; i < numberOfFiles; i++) {
+        short replication = (short)(AppendTestUtil.nextInt(numDatanodes) + 1);
+        Path testFile = new Path("/" + i + ".dat");
+        FSDataOutputStream stm = createFile(fs, testFile, replication);
+        stm.close();
+        testFiles.add(testFile);
+      }
+
+      // Create threads and make them run workload concurrently.
+      workload = new Workload[numThreads];
+      for (int i = 0; i < numThreads; i++) {
+        workload[i] = new Workload(cluster, i);
+        workload[i].start();
+      }
+
+      // wait for all transactions to get over
+      for (int i = 0; i < numThreads; i++) {
+        try {
+          System.out.println("Waiting for thread " + i + " to complete...");
+          workload[i].join();
+          System.out.println("Waiting for thread " + i + " complete.");
+        } catch (InterruptedException e) {
+          i--;      // retry
+        }
+      }
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+
+    // If any of the worker thread failed in their job, indicate that
+    // this test failed.
+    //
+    assertTrue("testComplexAppend Worker encountered exceptions.", globalStatus);
+  }
+}

+ 270 - 0
src/test/org/apache/hadoop/hdfs/TestFileAppend3.java

@@ -0,0 +1,270 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.io.RandomAccessFile;
+
+import junit.extensions.TestSetup;
+import junit.framework.Test;
+import junit.framework.TestSuite;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.FSDataset;
+import org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo;
+
+/** This class implements some of tests posted in HADOOP-2658. */
+public class TestFileAppend3 extends junit.framework.TestCase {
+  static final long BLOCK_SIZE = 64 * 1024;
+  static final short REPLICATION = 3;
+  static final int DATANODE_NUM = 5;
+
+  private static Configuration conf;
+  private static int buffersize;
+  private static MiniDFSCluster cluster;
+  private static DistributedFileSystem fs;
+
+  public static Test suite() {
+    return new TestSetup(new TestSuite(TestFileAppend3.class)) {
+      protected void setUp() throws java.lang.Exception {
+        AppendTestUtil.LOG.info("setUp()");
+        conf = new Configuration();
+        conf.setInt("io.bytes.per.checksum", 512);
+        conf.setBoolean("dfs.support.append", true);
+        buffersize = conf.getInt("io.file.buffer.size", 4096);
+        cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
+        fs = (DistributedFileSystem)cluster.getFileSystem();
+      }
+    
+      protected void tearDown() throws Exception {
+        AppendTestUtil.LOG.info("tearDown()");
+        if(fs != null) fs.close();
+        if(cluster != null) cluster.shutdown();
+      }
+    };  
+  }
+
+  /** TC1: Append on block boundary. */
+  public void testTC1() throws Exception {
+    final Path p = new Path("/TC1/foo");
+    System.out.println("p=" + p);
+
+    //a. Create file and write one block of data. Close file.
+    final int len1 = (int)BLOCK_SIZE; 
+    {
+      FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE);
+      AppendTestUtil.write(out, 0, len1);
+      out.close();
+    }
+
+    //   Reopen file to append. Append half block of data. Close file.
+    final int len2 = (int)BLOCK_SIZE/2; 
+    {
+      FSDataOutputStream out = fs.append(p);
+      AppendTestUtil.write(out, len1, len2);
+      out.close();
+    }
+    
+    //b. Reopen file and read 1.5 blocks worth of data. Close file.
+    AppendTestUtil.check(fs, p, len1 + len2);
+  }
+
+  /** TC2: Append on non-block boundary. */
+  public void testTC2() throws Exception {
+    final Path p = new Path("/TC2/foo");
+    System.out.println("p=" + p);
+
+    //a. Create file with one and a half block of data. Close file.
+    final int len1 = (int)(BLOCK_SIZE + BLOCK_SIZE/2); 
+    {
+      FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE);
+      AppendTestUtil.write(out, 0, len1);
+      out.close();
+    }
+
+    //   Reopen file to append quarter block of data. Close file.
+    final int len2 = (int)BLOCK_SIZE/4; 
+    {
+      FSDataOutputStream out = fs.append(p);
+      AppendTestUtil.write(out, len1, len2);
+      out.close();
+    }
+
+    //b. Reopen file and read 1.75 blocks of data. Close file.
+    AppendTestUtil.check(fs, p, len1 + len2);
+  }
+
+  /** TC5: Only one simultaneous append. */
+  public void testTC5() throws Exception {
+    final Path p = new Path("/TC5/foo");
+    System.out.println("p=" + p);
+
+    //a. Create file on Machine M1. Write half block to it. Close file.
+    {
+      FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE);
+      AppendTestUtil.write(out, 0, (int)(BLOCK_SIZE/2));
+      out.close();
+    }
+
+    //b. Reopen file in "append" mode on Machine M1.
+    FSDataOutputStream out = fs.append(p);
+
+    //c. On Machine M2, reopen file in "append" mode. This should fail.
+    try {
+      AppendTestUtil.createHdfsWithDifferentUsername(conf).append(p);
+      fail("This should fail.");
+    } catch(IOException ioe) {
+      AppendTestUtil.LOG.info("GOOD: got an exception", ioe);
+    }
+
+    //d. On Machine M1, close file.
+    out.close();        
+  }
+
+  /** TC7: Corrupted replicas are present. */
+  public void testTC7() throws Exception {
+    final short repl = 2;
+    final Path p = new Path("/TC7/foo");
+    System.out.println("p=" + p);
+    
+    //a. Create file with replication factor of 2. Write half block of data. Close file.
+    final int len1 = (int)(BLOCK_SIZE/2); 
+    {
+      FSDataOutputStream out = fs.create(p, false, buffersize, repl, BLOCK_SIZE);
+      AppendTestUtil.write(out, 0, len1);
+      out.close();
+    }
+    DFSTestUtil.waitReplication(fs, p, repl);
+
+    //b. Log into one datanode that has one replica of this block.
+    //   Find the block file on this datanode and truncate it to zero size.
+    final LocatedBlocks locatedblocks = fs.dfs.namenode.getBlockLocations(p.toString(), 0L, len1);
+    assertEquals(1, locatedblocks.locatedBlockCount());
+    final LocatedBlock lb = locatedblocks.get(0);
+    final Block blk = lb.getBlock();
+    assertEquals(len1, lb.getBlockSize());
+
+    DatanodeInfo[] datanodeinfos = lb.getLocations();
+    assertEquals(repl, datanodeinfos.length);
+    final DataNode dn = cluster.getDataNode(datanodeinfos[0].getIpcPort());
+    final FSDataset data = (FSDataset)dn.getFSDataset();
+    final RandomAccessFile raf = new RandomAccessFile(data.getBlockFile(blk), "rw");
+    AppendTestUtil.LOG.info("dn=" + dn + ", blk=" + blk + " (length=" + blk.getNumBytes() + ")");
+    assertEquals(len1, raf.length());
+    raf.setLength(0);
+    raf.close();
+
+    //c. Open file in "append mode".  Append a new block worth of data. Close file.
+    final int len2 = (int)BLOCK_SIZE; 
+    {
+      FSDataOutputStream out = fs.append(p);
+      AppendTestUtil.write(out, len1, len2);
+      out.close();
+    }
+
+    //d. Reopen file and read two blocks worth of data.
+    AppendTestUtil.check(fs, p, len1 + len2);
+  }
+
+  /** TC11: Racing rename */
+  public void testTC11() throws Exception {
+    final Path p = new Path("/TC11/foo");
+    System.out.println("p=" + p);
+
+    //a. Create file and write one block of data. Close file.
+    final int len1 = (int)BLOCK_SIZE; 
+    {
+      FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE);
+      AppendTestUtil.write(out, 0, len1);
+      out.close();
+    }
+
+    //b. Reopen file in "append" mode. Append half block of data.
+    FSDataOutputStream out = fs.append(p);
+    final int len2 = (int)BLOCK_SIZE/2; 
+    AppendTestUtil.write(out, len1, len2);
+    
+    //c. Rename file to file.new.
+    final Path pnew = new Path(p + ".new");
+    assertTrue(fs.rename(p, pnew));
+
+    //d. Close file handle that was opened in (b). 
+    try {
+      out.close();
+      fail("close() should throw an exception");
+    } catch(Exception e) {
+      AppendTestUtil.LOG.info("GOOD!", e);
+    }
+
+    //wait for the lease recovery 
+    cluster.setLeasePeriod(1000, 1000);
+    AppendTestUtil.sleep(5000);
+
+    //check block sizes 
+    final long len = fs.getFileStatus(pnew).getLen();
+    final LocatedBlocks locatedblocks = fs.dfs.namenode.getBlockLocations(pnew.toString(), 0L, len);
+    final int numblock = locatedblocks.locatedBlockCount();
+    for(int i = 0; i < numblock; i++) {
+      final LocatedBlock lb = locatedblocks.get(i);
+      final Block blk = lb.getBlock();
+      final long size = lb.getBlockSize();
+      if (i < numblock - 1) {
+        assertEquals(BLOCK_SIZE, size);
+      }
+      for(DatanodeInfo datanodeinfo : lb.getLocations()) {
+        final DataNode dn = cluster.getDataNode(datanodeinfo.getIpcPort());
+        final BlockMetaDataInfo metainfo = dn.getBlockMetaDataInfo(blk);
+        assertEquals(size, metainfo.getNumBytes());
+      }
+    }
+  }
+
+  /** TC12: Append to partial CRC chunk */
+  public void testTC12() throws Exception {
+    final Path p = new Path("/TC12/foo");
+    System.out.println("p=" + p);
+    
+    //a. Create file with a block size of 64KB
+    //   and a default io.bytes.per.checksum of 512 bytes.
+    //   Write 25687 bytes of data. Close file.
+    final int len1 = 25687; 
+    {
+      FSDataOutputStream out = fs.create(p, false, buffersize, REPLICATION, BLOCK_SIZE);
+      AppendTestUtil.write(out, 0, len1);
+      out.close();
+    }
+
+    //b. Reopen file in "append" mode. Append another 5877 bytes of data. Close file.
+    final int len2 = 5877; 
+    {
+      FSDataOutputStream out = fs.append(p);
+      AppendTestUtil.write(out, len1, len2);
+      out.close();
+    }
+
+    //c. Reopen file and read 25687+5877 bytes of data from file. Close file.
+    AppendTestUtil.check(fs, p, len1 + len2);
+  }
+}

+ 171 - 0
src/test/org/apache/hadoop/hdfs/TestFileCorruption.java

@@ -0,0 +1,171 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.util.ArrayList;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ChecksumException;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.server.common.GenerationStamp;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+
+/**
+ * A JUnit test for corrupted file handling.
+ */
+public class TestFileCorruption extends TestCase {
+  /** check if DFS can handle corrupted blocks properly */
+  public void testFileCorruption() throws Exception {
+    MiniDFSCluster cluster = null;
+    DFSTestUtil util = new DFSTestUtil("TestFileCorruption", 20, 3, 8*1024);
+    try {
+      Configuration conf = new Configuration();
+      cluster = new MiniDFSCluster(conf, 3, true, null);
+      FileSystem fs = cluster.getFileSystem();
+      util.createFiles(fs, "/srcdat");
+      // Now deliberately remove the blocks
+      File data_dir = new File(System.getProperty("test.build.data"),
+                               "dfs/data/data5/current");
+      assertTrue("data directory does not exist", data_dir.exists());
+      File[] blocks = data_dir.listFiles();
+      assertTrue("Blocks do not exist in data-dir", (blocks != null) && (blocks.length > 0));
+      for (int idx = 0; idx < blocks.length; idx++) {
+        if (!blocks[idx].getName().startsWith("blk_")) {
+          continue;
+        }
+        System.out.println("Deliberately removing file "+blocks[idx].getName());
+        assertTrue("Cannot remove file.", blocks[idx].delete());
+      }
+      assertTrue("Corrupted replicas not handled properly.",
+                 util.checkFiles(fs, "/srcdat"));
+      util.cleanup(fs, "/srcdat");
+    } finally {
+      if (cluster != null) { cluster.shutdown(); }
+    }
+  }
+
+  /** check if local FS can handle corrupted blocks properly */
+  public void testLocalFileCorruption() throws Exception {
+    Configuration conf = new Configuration();
+    Path file = new Path(System.getProperty("test.build.data"), "corruptFile");
+    FileSystem fs = FileSystem.getLocal(conf);
+    DataOutputStream dos = fs.create(file);
+    dos.writeBytes("original bytes");
+    dos.close();
+    // Now deliberately corrupt the file
+    dos = new DataOutputStream(new FileOutputStream(file.toString()));
+    dos.writeBytes("corruption");
+    dos.close();
+    // Now attempt to read the file
+    DataInputStream dis = fs.open(file, 512);
+    try {
+      System.out.println("A ChecksumException is expected to be logged.");
+      dis.readByte();
+    } catch (ChecksumException ignore) {
+      //expect this exception but let any NPE get thrown
+    }
+    fs.delete(file, true);
+  }
+  
+  /** Test the case that a replica is reported corrupt while it is not
+   * in blocksMap. Make sure that ArrayIndexOutOfBounds does not thrown.
+   * See Hadoop-4351.
+   */
+  public void testArrayOutOfBoundsException() throws Exception {
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new Configuration();
+      cluster = new MiniDFSCluster(conf, 2, true, null);
+      cluster.waitActive();
+      
+      FileSystem fs = cluster.getFileSystem();
+      final Path FILE_PATH = new Path("/tmp.txt");
+      final long FILE_LEN = 1L;
+      DFSTestUtil.createFile(fs, FILE_PATH, FILE_LEN, (short)2, 1L);
+      
+      // get the block
+      File dataDir = new File(cluster.getDataDirectory(),
+          "data1/current");
+      Block blk = getBlock(dataDir);
+      if (blk == null) {
+        blk = getBlock(new File(cluster.getDataDirectory(),
+          "dfs/data/data2/current"));
+      }
+      assertFalse(blk==null);
+
+      // start a third datanode
+      cluster.startDataNodes(conf, 1, true, null, null);
+      ArrayList<DataNode> datanodes = cluster.getDataNodes();
+      assertEquals(datanodes.size(), 3);
+      DataNode dataNode = datanodes.get(2);
+      
+      // report corrupted block by the third datanode
+      cluster.getNamesystem().markBlockAsCorrupt(blk, 
+          new DatanodeInfo(dataNode.dnRegistration ));
+      
+      // open the file
+      fs.open(FILE_PATH);
+      
+      //clean up
+      fs.delete(FILE_PATH, false);
+    } finally {
+      if (cluster != null) { cluster.shutdown(); }
+    }
+    
+  }
+  
+  private Block getBlock(File dataDir) {
+    assertTrue("data directory does not exist", dataDir.exists());
+    File[] blocks = dataDir.listFiles();
+    assertTrue("Blocks do not exist in dataDir", (blocks != null) && (blocks.length > 0));
+
+    int idx = 0;
+    String blockFileName = null;
+    for (; idx < blocks.length; idx++) {
+      blockFileName = blocks[idx].getName();
+      if (blockFileName.startsWith("blk_") && !blockFileName.endsWith(".meta")) {
+        break;
+      }
+    }
+    if (blockFileName == null) {
+      return null;
+    }
+    long blockId = Long.parseLong(blockFileName.substring("blk_".length()));
+    long blockTimeStamp = GenerationStamp.WILDCARD_STAMP;
+    for (idx=0; idx < blocks.length; idx++) {
+      String fileName = blocks[idx].getName();
+      if (fileName.startsWith(blockFileName) && fileName.endsWith(".meta")) {
+        int startIndex = blockFileName.length()+1;
+        int endIndex = fileName.length() - ".meta".length();
+        blockTimeStamp = Long.parseLong(fileName.substring(startIndex, endIndex));
+        break;
+      }
+    }
+    return new Block(blockId, blocks[idx].length(), blockTimeStamp);
+  }
+}

+ 750 - 0
src/test/org/apache/hadoop/hdfs/TestFileCreation.java

@@ -0,0 +1,750 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.FSDataset;
+import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.log4j.Level;
+
+
+/**
+ * This class tests that a file need not be closed before its
+ * data can be read by another client.
+ */
+public class TestFileCreation extends junit.framework.TestCase {
+  static final String DIR = "/" + TestFileCreation.class.getSimpleName() + "/";
+
+  {
+    //((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)DFSClient.LOG).getLogger().setLevel(Level.ALL);
+  }
+
+  static final long seed = 0xDEADBEEFL;
+  static final int blockSize = 8192;
+  static final int numBlocks = 2;
+  static final int fileSize = numBlocks * blockSize + 1;
+  boolean simulatedStorage = false;
+
+  // The test file is 2 times the blocksize plus one. This means that when the
+  // entire file is written, the first two blocks definitely get flushed to
+  // the datanodes.
+
+  // creates a file but does not close it
+  static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
+    throws IOException {
+    System.out.println("createFile: Created " + name + " with " + repl + " replica.");
+    FSDataOutputStream stm = fileSys.create(name, true,
+                                            fileSys.getConf().getInt("io.file.buffer.size", 4096),
+                                            (short)repl, (long)blockSize);
+    return stm;
+  }
+
+  //
+  // writes to file but does not close it
+  //
+  static void writeFile(FSDataOutputStream stm) throws IOException {
+    writeFile(stm, fileSize);
+  }
+
+  //
+  // writes specified bytes to file.
+  //
+  static void writeFile(FSDataOutputStream stm, int size) throws IOException {
+    byte[] buffer = AppendTestUtil.randomBytes(seed, size);
+    stm.write(buffer, 0, size);
+  }
+
+  //
+  // verify that the data written to the full blocks are sane
+  // 
+  private void checkFile(FileSystem fileSys, Path name, int repl)
+    throws IOException {
+    boolean done = false;
+
+    // wait till all full blocks are confirmed by the datanodes.
+    while (!done) {
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException e) {}
+      done = true;
+      BlockLocation[] locations = fileSys.getFileBlockLocations(
+          fileSys.getFileStatus(name), 0, fileSize);
+      if (locations.length < numBlocks) {
+        done = false;
+        continue;
+      }
+      for (int idx = 0; idx < locations.length; idx++) {
+        if (locations[idx].getHosts().length < repl) {
+          done = false;
+          break;
+        }
+      }
+    }
+    FSDataInputStream stm = fileSys.open(name);
+    final byte[] expected;
+    if (simulatedStorage) {
+      expected = new byte[numBlocks * blockSize];
+      for (int i= 0; i < expected.length; i++) {  
+        expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
+      }
+    } else {
+      expected = AppendTestUtil.randomBytes(seed, numBlocks*blockSize);
+    }
+    // do a sanity check. Read the file
+    byte[] actual = new byte[numBlocks * blockSize];
+    stm.readFully(0, actual);
+    stm.close();
+    checkData(actual, 0, expected, "Read 1");
+  }
+
+  static private void checkData(byte[] actual, int from, byte[] expected, String message) {
+    for (int idx = 0; idx < actual.length; idx++) {
+      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
+                   expected[from+idx]+" actual "+actual[idx],
+                   expected[from+idx], actual[idx]);
+      actual[idx] = 0;
+    }
+  }
+
+  static void checkFullFile(FileSystem fs, Path name) throws IOException {
+    FileStatus stat = fs.getFileStatus(name);
+    BlockLocation[] locations = fs.getFileBlockLocations(stat, 0, 
+                                                         fileSize);
+    for (int idx = 0; idx < locations.length; idx++) {
+      String[] hosts = locations[idx].getNames();
+      for (int i = 0; i < hosts.length; i++) {
+        System.out.print( hosts[i] + " ");
+      }
+      System.out.println(" off " + locations[idx].getOffset() +
+                         " len " + locations[idx].getLength());
+    }
+
+    byte[] expected = AppendTestUtil.randomBytes(seed, fileSize);
+    FSDataInputStream stm = fs.open(name);
+    byte[] actual = new byte[fileSize];
+    stm.readFully(0, actual);
+    checkData(actual, 0, expected, "Read 2");
+    stm.close();
+  }
+
+  /**
+   * Test that file data becomes available before file is closed.
+   */
+  public void testFileCreation() throws IOException {
+    Configuration conf = new Configuration();
+    if (simulatedStorage) {
+      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+    }
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fs = cluster.getFileSystem();
+    try {
+
+      //
+      // check that / exists
+      //
+      Path path = new Path("/");
+      System.out.println("Path : \"" + path.toString() + "\"");
+      System.out.println(fs.getFileStatus(path).isDir()); 
+      assertTrue("/ should be a directory", 
+                 fs.getFileStatus(path).isDir() == true);
+
+      //
+      // Create a directory inside /, then try to overwrite it
+      //
+      Path dir1 = new Path("/test_dir");
+      fs.mkdirs(dir1);
+      System.out.println("createFile: Creating " + dir1.getName() + 
+        " for overwrite of existing directory.");
+      try {
+        fs.create(dir1, true); // Create path, overwrite=true
+        fs.close();
+        assertTrue("Did not prevent directory from being overwritten.", false);
+      } catch (IOException ie) {
+        if (!ie.getMessage().contains("already exists as a directory."))
+          throw ie;
+      }
+      
+      // create a new file in home directory. Do not close it.
+      //
+      Path file1 = new Path("filestatus.dat");
+      FSDataOutputStream stm = createFile(fs, file1, 1);
+
+      // verify that file exists in FS namespace
+      assertTrue(file1 + " should be a file", 
+                  fs.getFileStatus(file1).isDir() == false);
+      System.out.println("Path : \"" + file1 + "\"");
+
+      // write to file
+      writeFile(stm);
+
+      // Make sure a client can read it before it is closed.
+      checkFile(fs, file1, 1);
+
+      // verify that file size has changed
+      long len = fs.getFileStatus(file1).getLen();
+      assertTrue(file1 + " should be of size " + (numBlocks * blockSize) +
+                 " but found to be of size " + len, 
+                  len == numBlocks * blockSize);
+
+      stm.close();
+
+      // verify that file size has changed to the full size
+      len = fs.getFileStatus(file1).getLen();
+      assertTrue(file1 + " should be of size " + fileSize +
+                 " but found to be of size " + len, 
+                  len == fileSize);
+      
+      
+      // Check storage usage 
+      // can't check capacities for real storage since the OS file system may be changing under us.
+      if (simulatedStorage) {
+        DataNode dn = cluster.getDataNodes().get(0);
+        assertEquals(fileSize, dn.getFSDataset().getDfsUsed());
+        assertEquals(SimulatedFSDataset.DEFAULT_CAPACITY-fileSize, dn.getFSDataset().getRemaining());
+      }
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * Test deleteOnExit
+   */
+  public void testDeleteOnExit() throws IOException {
+    Configuration conf = new Configuration();
+    if (simulatedStorage) {
+      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+    }
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fs = cluster.getFileSystem();
+    FileSystem localfs = FileSystem.getLocal(conf);
+
+    try {
+
+      // Creates files in HDFS and local file system.
+      //
+      Path file1 = new Path("filestatus.dat");
+      Path file2 = new Path("filestatus2.dat");
+      Path file3 = new Path("filestatus3.dat");
+      FSDataOutputStream stm1 = createFile(fs, file1, 1);
+      FSDataOutputStream stm2 = createFile(fs, file2, 1);
+      FSDataOutputStream stm3 = createFile(localfs, file3, 1);
+      System.out.println("DeleteOnExit: Created files.");
+
+      // write to files and close. Purposely, do not close file2.
+      writeFile(stm1);
+      writeFile(stm3);
+      stm1.close();
+      stm2.close();
+      stm3.close();
+
+      // set delete on exit flag on files.
+      fs.deleteOnExit(file1);
+      fs.deleteOnExit(file2);
+      localfs.deleteOnExit(file3);
+
+      // close the file system. This should make the above files
+      // disappear.
+      fs.close();
+      localfs.close();
+      fs = null;
+      localfs = null;
+
+      // reopen file system and verify that file does not exist.
+      fs = cluster.getFileSystem();
+      localfs = FileSystem.getLocal(conf);
+
+      assertTrue(file1 + " still exists inspite of deletOnExit set.",
+                 !fs.exists(file1));
+      assertTrue(file2 + " still exists inspite of deletOnExit set.",
+                 !fs.exists(file2));
+      assertTrue(file3 + " still exists inspite of deletOnExit set.",
+                 !localfs.exists(file3));
+      System.out.println("DeleteOnExit successful.");
+
+    } finally {
+      IOUtils.closeStream(fs);
+      IOUtils.closeStream(localfs);
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * Test that file data does not become corrupted even in the face of errors.
+   */
+  public void testFileCreationError1() throws IOException {
+    Configuration conf = new Configuration();
+    conf.setInt("heartbeat.recheck.interval", 1000);
+    conf.setInt("dfs.heartbeat.interval", 1);
+    if (simulatedStorage) {
+      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+    }
+    // create cluster
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fs = cluster.getFileSystem();
+    cluster.waitActive();
+    InetSocketAddress addr = new InetSocketAddress("localhost",
+                                                   cluster.getNameNodePort());
+    DFSClient client = new DFSClient(addr, conf);
+
+    try {
+
+      // create a new file.
+      //
+      Path file1 = new Path("/filestatus.dat");
+      FSDataOutputStream stm = createFile(fs, file1, 1);
+
+      // verify that file exists in FS namespace
+      assertTrue(file1 + " should be a file", 
+                  fs.getFileStatus(file1).isDir() == false);
+      System.out.println("Path : \"" + file1 + "\"");
+
+      // kill the datanode
+      cluster.shutdownDataNodes();
+
+      // wait for the datanode to be declared dead
+      while (true) {
+        DatanodeInfo[] info = client.datanodeReport(
+            FSConstants.DatanodeReportType.LIVE);
+        if (info.length == 0) {
+          break;
+        }
+        System.out.println("testFileCreationError1: waiting for datanode " +
+                           " to die.");
+        try {
+          Thread.sleep(1000);
+        } catch (InterruptedException e) {
+        }
+      }
+
+      // write 1 byte to file. 
+      // This should fail because all datanodes are dead.
+      byte[] buffer = AppendTestUtil.randomBytes(seed, 1);
+      try {
+        stm.write(buffer);
+        stm.close();
+      } catch (Exception e) {
+        System.out.println("Encountered expected exception");
+      }
+
+      // verify that no blocks are associated with this file
+      // bad block allocations were cleaned up earlier.
+      LocatedBlocks locations = client.namenode.getBlockLocations(
+                                  file1.toString(), 0, Long.MAX_VALUE);
+      System.out.println("locations = " + locations.locatedBlockCount());
+      assertTrue("Error blocks were not cleaned up",
+                 locations.locatedBlockCount() == 0);
+    } finally {
+      cluster.shutdown();
+      client.close();
+    }
+  }
+
+  /**
+   * Test that the filesystem removes the last block from a file if its
+   * lease expires.
+   */
+  public void testFileCreationError2() throws IOException {
+    long leasePeriod = 1000;
+    System.out.println("testFileCreationError2 start");
+    Configuration conf = new Configuration();
+    conf.setInt("heartbeat.recheck.interval", 1000);
+    conf.setInt("dfs.heartbeat.interval", 1);
+    if (simulatedStorage) {
+      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+    }
+    // create cluster
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    DistributedFileSystem dfs = null;
+    try {
+      cluster.waitActive();
+      dfs = (DistributedFileSystem)cluster.getFileSystem();
+      DFSClient client = dfs.dfs;
+
+      // create a new file.
+      //
+      Path file1 = new Path("/filestatus.dat");
+      createFile(dfs, file1, 1);
+      System.out.println("testFileCreationError2: "
+                         + "Created file filestatus.dat with one replicas.");
+
+      LocatedBlocks locations = client.namenode.getBlockLocations(
+                                  file1.toString(), 0, Long.MAX_VALUE);
+      System.out.println("testFileCreationError2: "
+          + "The file has " + locations.locatedBlockCount() + " blocks.");
+
+      // add another block to the file
+      LocatedBlock location = client.namenode.addBlock(file1.toString(), 
+          client.clientName);
+      System.out.println("testFileCreationError2: "
+          + "Added block " + location.getBlock());
+
+      locations = client.namenode.getBlockLocations(file1.toString(), 
+                                                    0, Long.MAX_VALUE);
+      int count = locations.locatedBlockCount();
+      System.out.println("testFileCreationError2: "
+          + "The file now has " + count + " blocks.");
+      
+      // set the soft and hard limit to be 1 second so that the
+      // namenode triggers lease recovery
+      cluster.setLeasePeriod(leasePeriod, leasePeriod);
+
+      // wait for the lease to expire
+      try {
+        Thread.sleep(5 * leasePeriod);
+      } catch (InterruptedException e) {
+      }
+
+      // verify that the last block was synchronized.
+      locations = client.namenode.getBlockLocations(file1.toString(), 
+                                                    0, Long.MAX_VALUE);
+      System.out.println("testFileCreationError2: "
+          + "locations = " + locations.locatedBlockCount());
+      assertEquals(0, locations.locatedBlockCount());
+      System.out.println("testFileCreationError2 successful");
+    } finally {
+      IOUtils.closeStream(dfs);
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * Test that file leases are persisted across namenode restarts.
+   * This test is currently not triggered because more HDFS work is 
+   * is needed to handle persistent leases.
+   */
+  public void xxxtestFileCreationNamenodeRestart() throws IOException {
+    Configuration conf = new Configuration();
+    final int MAX_IDLE_TIME = 2000; // 2s
+    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
+    conf.setInt("heartbeat.recheck.interval", 1000);
+    conf.setInt("dfs.heartbeat.interval", 1);
+    if (simulatedStorage) {
+      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+    }
+
+    // create cluster
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fs = null;
+    try {
+      cluster.waitActive();
+      fs = cluster.getFileSystem();
+      final int nnport = cluster.getNameNodePort();
+
+      // create a new file.
+      Path file1 = new Path("/filestatus.dat");
+      FSDataOutputStream stm = createFile(fs, file1, 1);
+      System.out.println("testFileCreationNamenodeRestart: "
+                         + "Created file " + file1);
+
+      // write two full blocks.
+      writeFile(stm, numBlocks * blockSize);
+      stm.sync();
+
+      // rename file wile keeping it open.
+      Path fileRenamed = new Path("/filestatusRenamed.dat");
+      fs.rename(file1, fileRenamed);
+      System.out.println("testFileCreationNamenodeRestart: "
+                         + "Renamed file " + file1 + " to " +
+                         fileRenamed);
+      file1 = fileRenamed;
+
+      // create another new file.
+      //
+      Path file2 = new Path("/filestatus2.dat");
+      FSDataOutputStream stm2 = createFile(fs, file2, 1);
+      System.out.println("testFileCreationNamenodeRestart: "
+                         + "Created file " + file2);
+
+      // create yet another new file with full path name. 
+      // rename it while open
+      //
+      Path file3 = new Path("/user/home/fullpath.dat");
+      FSDataOutputStream stm3 = createFile(fs, file3, 1);
+      System.out.println("testFileCreationNamenodeRestart: "
+                         + "Created file " + file3);
+      Path file4 = new Path("/user/home/fullpath4.dat");
+      FSDataOutputStream stm4 = createFile(fs, file4, 1);
+      System.out.println("testFileCreationNamenodeRestart: "
+                         + "Created file " + file4);
+
+      fs.mkdirs(new Path("/bin"));
+      fs.rename(new Path("/user/home"), new Path("/bin"));
+      Path file3new = new Path("/bin/home/fullpath.dat");
+      System.out.println("testFileCreationNamenodeRestart: "
+                         + "Renamed file " + file3 + " to " +
+                         file3new);
+      Path file4new = new Path("/bin/home/fullpath4.dat");
+      System.out.println("testFileCreationNamenodeRestart: "
+                         + "Renamed file " + file4 + " to " +
+                         file4new);
+
+      // restart cluster with the same namenode port as before.
+      // This ensures that leases are persisted in fsimage.
+      cluster.shutdown();
+      try {
+        Thread.sleep(2*MAX_IDLE_TIME);
+      } catch (InterruptedException e) {
+      }
+      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
+                                   null, null, null);
+      cluster.waitActive();
+
+      // restart cluster yet again. This triggers the code to read in
+      // persistent leases from fsimage.
+      cluster.shutdown();
+      try {
+        Thread.sleep(5000);
+      } catch (InterruptedException e) {
+      }
+      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
+                                   null, null, null);
+      cluster.waitActive();
+      fs = cluster.getFileSystem();
+
+      // instruct the dfsclient to use a new filename when it requests
+      // new blocks for files that were renamed.
+      DFSClient.DFSOutputStream dfstream = (DFSClient.DFSOutputStream)
+                                                 (stm.getWrappedStream());
+      dfstream.setTestFilename(file1.toString());
+      dfstream = (DFSClient.DFSOutputStream) (stm3.getWrappedStream());
+      dfstream.setTestFilename(file3new.toString());
+      dfstream = (DFSClient.DFSOutputStream) (stm4.getWrappedStream());
+      dfstream.setTestFilename(file4new.toString());
+
+      // write 1 byte to file.  This should succeed because the 
+      // namenode should have persisted leases.
+      byte[] buffer = AppendTestUtil.randomBytes(seed, 1);
+      stm.write(buffer);
+      stm.close();
+      stm2.write(buffer);
+      stm2.close();
+      stm3.close();
+      stm4.close();
+
+      // verify that new block is associated with this file
+      DFSClient client = ((DistributedFileSystem)fs).dfs;
+      LocatedBlocks locations = client.namenode.getBlockLocations(
+                                  file1.toString(), 0, Long.MAX_VALUE);
+      System.out.println("locations = " + locations.locatedBlockCount());
+      assertTrue("Error blocks were not cleaned up for file " + file1,
+                 locations.locatedBlockCount() == 3);
+
+      // verify filestatus2.dat
+      locations = client.namenode.getBlockLocations(
+                                  file2.toString(), 0, Long.MAX_VALUE);
+      System.out.println("locations = " + locations.locatedBlockCount());
+      assertTrue("Error blocks were not cleaned up for file " + file2,
+                 locations.locatedBlockCount() == 1);
+    } finally {
+      IOUtils.closeStream(fs);
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * Test that all open files are closed when client dies abnormally.
+   */
+  public void testDFSClientDeath() throws IOException {
+    Configuration conf = new Configuration();
+    System.out.println("Testing adbornal client death.");
+    if (simulatedStorage) {
+      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+    }
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fs = cluster.getFileSystem();
+    DistributedFileSystem dfs = (DistributedFileSystem) fs;
+    DFSClient dfsclient = dfs.dfs;
+    try {
+
+      // create a new file in home directory. Do not close it.
+      //
+      Path file1 = new Path("/clienttest.dat");
+      FSDataOutputStream stm = createFile(fs, file1, 1);
+      System.out.println("Created file clienttest.dat");
+
+      // write to file
+      writeFile(stm);
+
+      // close the dfsclient before closing the output stream.
+      // This should close all existing file.
+      dfsclient.close();
+
+      // reopen file system and verify that file exists.
+      assertTrue(file1 + " does not exist.", 
+          AppendTestUtil.createHdfsWithDifferentUsername(conf).exists(file1));
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+/**
+ * Test that file data becomes available before file is closed.
+ */
+  public void testFileCreationSimulated() throws IOException {
+    simulatedStorage = true;
+    testFileCreation();
+    simulatedStorage = false;
+  }
+
+  /**
+   * Test creating two files at the same time. 
+   */
+  public void testConcurrentFileCreation() throws IOException {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+
+    try {
+      FileSystem fs = cluster.getFileSystem();
+      
+      Path[] p = {new Path("/foo"), new Path("/bar")};
+      
+      //write 2 files at the same time
+      FSDataOutputStream[] out = {fs.create(p[0]), fs.create(p[1])};
+      int i = 0;
+      for(; i < 100; i++) {
+        out[0].write(i);
+        out[1].write(i);
+      }
+      out[0].close();
+      for(; i < 200; i++) {out[1].write(i);}
+      out[1].close();
+
+      //verify
+      FSDataInputStream[] in = {fs.open(p[0]), fs.open(p[1])};  
+      for(i = 0; i < 100; i++) {assertEquals(i, in[0].read());}
+      for(i = 0; i < 200; i++) {assertEquals(i, in[1].read());}
+    } finally {
+      if (cluster != null) {cluster.shutdown();}
+    }
+  }
+
+  /**
+   * Create a file, write something, fsync but not close.
+   * Then change lease period and wait for lease recovery.
+   * Finally, read the block directly from each Datanode and verify the content.
+   */
+  public void testLeaseExpireHardLimit() throws Exception {
+    System.out.println("testLeaseExpireHardLimit start");
+    final long leasePeriod = 1000;
+    final int DATANODE_NUM = 3;
+
+    Configuration conf = new Configuration();
+    conf.setInt("heartbeat.recheck.interval", 1000);
+    conf.setInt("dfs.heartbeat.interval", 1);
+
+    // create cluster
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
+    DistributedFileSystem dfs = null;
+    try {
+      cluster.waitActive();
+      dfs = (DistributedFileSystem)cluster.getFileSystem();
+
+      // create a new file.
+      final String f = DIR + "foo";
+      final Path fpath = new Path(f);
+      FSDataOutputStream out = TestFileCreation.createFile(dfs, fpath, DATANODE_NUM);
+      out.write("something".getBytes());
+      out.sync();
+
+      // set the soft and hard limit to be 1 second so that the
+      // namenode triggers lease recovery
+      cluster.setLeasePeriod(leasePeriod, leasePeriod);
+      // wait for the lease to expire
+      try {Thread.sleep(5 * leasePeriod);} catch (InterruptedException e) {}
+
+      LocatedBlocks locations = dfs.dfs.namenode.getBlockLocations(
+          f, 0, Long.MAX_VALUE);
+      assertEquals(1, locations.locatedBlockCount());
+      LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
+      int successcount = 0;
+      for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) {
+        DataNode datanode = cluster.getDataNode(datanodeinfo.ipcPort);
+        FSDataset dataset = (FSDataset)datanode.data;
+        Block b = dataset.getStoredBlock(locatedblock.getBlock().getBlockId());
+        File blockfile = dataset.findBlockFile(b.getBlockId());
+        System.out.println("blockfile=" + blockfile);
+        if (blockfile != null) {
+          BufferedReader in = new BufferedReader(new FileReader(blockfile));
+          assertEquals("something", in.readLine());
+          in.close();
+          successcount++;
+        }
+      }
+      System.out.println("successcount=" + successcount);
+      assertTrue(successcount > 0); 
+    } finally {
+      IOUtils.closeStream(dfs);
+      cluster.shutdown();
+    }
+
+    System.out.println("testLeaseExpireHardLimit successful");
+  }
+
+  // test closing file system before all file handles are closed.
+  public void testFsClose() throws Exception {
+    System.out.println("test file system close start");
+    final int DATANODE_NUM = 3;
+
+    Configuration conf = new Configuration();
+
+    // create cluster
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
+    DistributedFileSystem dfs = null;
+    try {
+      cluster.waitActive();
+      dfs = (DistributedFileSystem)cluster.getFileSystem();
+
+      // create a new file.
+      final String f = DIR + "foofs";
+      final Path fpath = new Path(f);
+      FSDataOutputStream out = TestFileCreation.createFile(dfs, fpath, DATANODE_NUM);
+      out.write("something".getBytes());
+
+      // close file system without closing file
+      dfs.close();
+    } finally {
+      System.out.println("testFsClose successful");
+    }
+  }
+}

+ 145 - 0
src/test/org/apache/hadoop/hdfs/TestFileCreationClient.java

@@ -0,0 +1,145 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
+import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.log4j.Level;
+
+/**
+ * This class tests that a file need not be closed before its
+ * data can be read by another client.
+ */
+public class TestFileCreationClient extends junit.framework.TestCase {
+  static final String DIR = "/" + TestFileCreationClient.class.getSimpleName() + "/";
+
+  {
+    ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)InterDatanodeProtocol.LOG).getLogger().setLevel(Level.ALL);
+  }
+
+  /** Test lease recovery Triggered by DFSClient. */
+  public void testClientTriggeredLeaseRecovery() throws Exception {
+    final int REPLICATION = 3;
+    Configuration conf = new Configuration();
+    conf.setInt("dfs.datanode.handler.count", 1);
+    conf.setInt("dfs.replication", REPLICATION);
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, REPLICATION, true, null);
+
+    try {
+      final FileSystem fs = cluster.getFileSystem();
+      final Path dir = new Path("/wrwelkj");
+      
+      SlowWriter[] slowwriters = new SlowWriter[10];
+      for(int i = 0; i < slowwriters.length; i++) {
+        slowwriters[i] = new SlowWriter(fs, new Path(dir, "file" + i));
+      }
+
+      try {
+        for(int i = 0; i < slowwriters.length; i++) {
+          slowwriters[i].start();
+        }
+
+        Thread.sleep(1000);                       // let writers get started
+
+        //stop a datanode, it should have least recover.
+        cluster.stopDataNode(AppendTestUtil.nextInt(REPLICATION));
+        
+        //let the slow writer writes a few more seconds
+        System.out.println("Wait a few seconds");
+        Thread.sleep(5000);
+      }
+      finally {
+        for(int i = 0; i < slowwriters.length; i++) {
+          if (slowwriters[i] != null) {
+            slowwriters[i].running = false;
+            slowwriters[i].interrupt();
+          }
+        }
+        for(int i = 0; i < slowwriters.length; i++) {
+          if (slowwriters[i] != null) {
+            slowwriters[i].join();
+          }
+        }
+      }
+
+      //Verify the file
+      System.out.println("Verify the file");
+      for(int i = 0; i < slowwriters.length; i++) {
+        System.out.println(slowwriters[i].filepath + ": length="
+            + fs.getFileStatus(slowwriters[i].filepath).getLen());
+        FSDataInputStream in = null;
+        try {
+          in = fs.open(slowwriters[i].filepath);
+          for(int j = 0, x; (x = in.read()) != -1; j++) {
+            assertEquals(j, x);
+          }
+        }
+        finally {
+          IOUtils.closeStream(in);
+        }
+      }
+    } finally {
+      if (cluster != null) {cluster.shutdown();}
+    }
+  }
+
+  static class SlowWriter extends Thread {
+    final FileSystem fs;
+    final Path filepath;
+    boolean running = true;
+    
+    SlowWriter(FileSystem fs, Path filepath) {
+      super(SlowWriter.class.getSimpleName() + ":" + filepath);
+      this.fs = fs;
+      this.filepath = filepath;
+    }
+
+    public void run() {
+      FSDataOutputStream out = null;
+      int i = 0;
+      try {
+        out = fs.create(filepath);
+        for(; running; i++) {
+          System.out.println(getName() + " writes " + i);
+          out.write(i);
+          out.sync();
+          sleep(100);
+        }
+      }
+      catch(Exception e) {
+        System.out.println(getName() + " dies: e=" + e);
+      }
+      finally {
+        System.out.println(getName() + ": i=" + i);
+        IOUtils.closeStream(out);
+      }
+    }        
+  }
+}

+ 99 - 0
src/test/org/apache/hadoop/hdfs/TestFileCreationDelete.java

@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.log4j.Level;
+
+public class TestFileCreationDelete extends junit.framework.TestCase {
+  {
+    ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+  }
+
+  public void testFileCreationDeleteParent() throws IOException {
+    Configuration conf = new Configuration();
+    final int MAX_IDLE_TIME = 2000; // 2s
+    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
+    conf.setInt("heartbeat.recheck.interval", 1000);
+    conf.setInt("dfs.heartbeat.interval", 1);
+    conf.setBoolean("dfs.support.append", true);
+
+    // create cluster
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fs = null;
+    try {
+      cluster.waitActive();
+      fs = cluster.getFileSystem();
+      final int nnport = cluster.getNameNodePort();
+
+      // create file1.
+      Path dir = new Path("/foo");
+      Path file1 = new Path(dir, "file1");
+      FSDataOutputStream stm1 = TestFileCreation.createFile(fs, file1, 1);
+      System.out.println("testFileCreationDeleteParent: "
+          + "Created file " + file1);
+      TestFileCreation.writeFile(stm1, 1000);
+      stm1.sync();
+
+      // create file2.
+      Path file2 = new Path("/file2");
+      FSDataOutputStream stm2 = TestFileCreation.createFile(fs, file2, 1);
+      System.out.println("testFileCreationDeleteParent: "
+          + "Created file " + file2);
+      TestFileCreation.writeFile(stm2, 1000);
+      stm2.sync();
+
+      // rm dir
+      fs.delete(dir, true);
+
+      // restart cluster with the same namenode port as before.
+      // This ensures that leases are persisted in fsimage.
+      cluster.shutdown();
+      try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
+      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
+                                   null, null, null);
+      cluster.waitActive();
+
+      // restart cluster yet again. This triggers the code to read in
+      // persistent leases from fsimage.
+      cluster.shutdown();
+      try {Thread.sleep(5000);} catch (InterruptedException e) {}
+      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
+                                   null, null, null);
+      cluster.waitActive();
+      fs = cluster.getFileSystem();
+
+      assertTrue(!fs.exists(file1));
+      assertTrue(fs.exists(file2));
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+  }
+}

+ 80 - 0
src/test/org/apache/hadoop/hdfs/TestFileCreationEmpty.java

@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.util.ConcurrentModificationException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+
+/**
+ * Test empty file creation.
+ */
+public class TestFileCreationEmpty extends junit.framework.TestCase {
+  private boolean isConcurrentModificationException = false;
+
+  /**
+   * This test creates three empty files and lets their leases expire.
+   * This triggers release of the leases. 
+   * The empty files are supposed to be closed by that 
+   * without causing ConcurrentModificationException.
+   */
+  public void testLeaseExpireEmptyFiles() throws Exception {
+    final Thread.UncaughtExceptionHandler oldUEH = Thread.getDefaultUncaughtExceptionHandler();
+    Thread.setDefaultUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
+      public void uncaughtException(Thread t, Throwable e) {
+        if (e instanceof ConcurrentModificationException) {
+          FSNamesystem.LOG.error("t=" + t, e);
+          isConcurrentModificationException = true;
+        }
+      }
+    });
+
+    System.out.println("testLeaseExpireEmptyFiles start");
+    final long leasePeriod = 1000;
+    final int DATANODE_NUM = 3;
+
+    final Configuration conf = new Configuration();
+    conf.setInt("heartbeat.recheck.interval", 1000);
+    conf.setInt("dfs.heartbeat.interval", 1);
+
+    // create cluster
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
+    try {
+      cluster.waitActive();
+      DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
+
+      // create a new file.
+      TestFileCreation.createFile(dfs, new Path("/foo"), DATANODE_NUM);
+      TestFileCreation.createFile(dfs, new Path("/foo2"), DATANODE_NUM);
+      TestFileCreation.createFile(dfs, new Path("/foo3"), DATANODE_NUM);
+
+      // set the soft and hard limit to be 1 second so that the
+      // namenode triggers lease recovery
+      cluster.setLeasePeriod(leasePeriod, leasePeriod);
+      // wait for the lease to expire
+      try {Thread.sleep(5 * leasePeriod);} catch (InterruptedException e) {}
+
+      assertFalse(isConcurrentModificationException);
+    } finally {
+      Thread.setDefaultUncaughtExceptionHandler(oldUEH);
+      cluster.shutdown();
+    }
+  }
+}

+ 24 - 0
src/test/org/apache/hadoop/hdfs/TestFileCreationNamenodeRestart.java

@@ -0,0 +1,24 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+public class TestFileCreationNamenodeRestart extends junit.framework.TestCase {
+  public void testFileCreationNamenodeRestart() throws Exception {
+    new TestFileCreation().xxxtestFileCreationNamenodeRestart();
+  }
+}

+ 143 - 0
src/test/org/apache/hadoop/hdfs/TestFileStatus.java

@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import junit.framework.TestCase;
+import java.io.*;
+import java.util.Random;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FSDataOutputStream;
+
+/**
+ * This class tests the FileStatus API.
+ */
+public class TestFileStatus extends TestCase {
+  static final long seed = 0xDEADBEEFL;
+  static final int blockSize = 8192;
+  static final int fileSize = 16384;
+
+  private static String TEST_ROOT_DIR =
+    new Path(System.getProperty("test.build.data","/tmp"))
+    .toString().replace(' ', '+');
+  
+  private void writeFile(FileSystem fileSys, Path name, int repl,
+                         int fileSize, int blockSize)
+    throws IOException {
+    // create and write a file that contains three blocks of data
+    FSDataOutputStream stm = fileSys.create(name, true,
+                                            fileSys.getConf().getInt("io.file.buffer.size", 4096),
+                                            (short)repl, (long)blockSize);
+    byte[] buffer = new byte[fileSize];
+    Random rand = new Random(seed);
+    rand.nextBytes(buffer);
+    stm.write(buffer);
+    stm.close();
+  }
+
+  private void checkFile(FileSystem fileSys, Path name, int repl)
+    throws IOException {
+    DFSTestUtil.waitReplication(fileSys, name, (short) repl);
+  }
+
+
+  /**
+   * Tests various options of DFSShell.
+   */
+  public void testFileStatus() throws IOException {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fs = cluster.getFileSystem();
+    DFSClient dfsClient = new DFSClient(conf);
+    try {
+
+      //
+      // check that / exists
+      //
+      Path path = new Path("/");
+      System.out.println("Path : \"" + path.toString() + "\"");
+      System.out.println(fs.isDirectory(path));
+      System.out.println(fs.getFileStatus(path).isDir()); 
+      assertTrue("/ should be a directory", 
+                 fs.getFileStatus(path).isDir() == true);
+      
+      // make sure getFileInfo returns null for files which do not exist
+      FileStatus fileInfo = dfsClient.getFileInfo("/noSuchFile");
+      assertTrue(fileInfo == null);
+
+      // create a file in home directory
+      //
+      Path file1 = new Path("filestatus.dat");
+      writeFile(fs, file1, 1, fileSize, blockSize);
+      System.out.println("Created file filestatus.dat with one "
+                         + " replicas.");
+      checkFile(fs, file1, 1);
+      assertTrue(file1 + " should be a file", 
+                  fs.getFileStatus(file1).isDir() == false);
+      assertTrue(fs.getFileStatus(file1).getBlockSize() == blockSize);
+      assertTrue(fs.getFileStatus(file1).getReplication() == 1);
+      assertTrue(fs.getFileStatus(file1).getLen() == fileSize);
+      System.out.println("Path : \"" + file1 + "\"");
+
+      // create an empty directory
+      //
+      Path parentDir = new Path("/test");
+      Path dir = new Path("/test/mkdirs");
+      assertTrue(fs.mkdirs(dir));
+      assertTrue(fs.exists(dir));
+      assertTrue(dir + " should be a directory", 
+                 fs.getFileStatus(path).isDir() == true);
+      assertTrue(dir + " should be zero size ",
+                 fs.getContentSummary(dir).getLength() == 0);
+      assertTrue(dir + " should be zero size ",
+                 fs.getFileStatus(dir).getLen() == 0);
+      System.out.println("Dir : \"" + dir + "\"");
+
+      // create another file that is smaller than a block.
+      //
+      Path file2 = new Path("/test/mkdirs/filestatus2.dat");
+      writeFile(fs, file2, 1, blockSize/4, blockSize);
+      System.out.println("Created file filestatus2.dat with one "
+                         + " replicas.");
+      checkFile(fs, file2, 1);
+      System.out.println("Path : \"" + file2 + "\"");
+
+      // verify file attributes
+      assertTrue(fs.getFileStatus(file2).getBlockSize() == blockSize);
+      assertTrue(fs.getFileStatus(file2).getReplication() == 1);
+
+      // create another file in the same directory
+      Path file3 = new Path("/test/mkdirs/filestatus3.dat");
+      writeFile(fs, file3, 1, blockSize/4, blockSize);
+      System.out.println("Created file filestatus3.dat with one "
+                         + " replicas.");
+      checkFile(fs, file3, 1);
+
+      // verify that the size of the directory increased by the size 
+      // of the two files
+      assertTrue(dir + " size should be " + (blockSize/2), 
+                 blockSize/2 == fs.getContentSummary(dir).getLength());
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+  }
+}

+ 176 - 0
src/test/org/apache/hadoop/hdfs/TestGetBlocks.java

@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.*;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.common.GenerationStamp;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+
+import junit.framework.TestCase;
+/**
+ * This class tests if block replacement request to data nodes work correctly.
+ */
+public class TestGetBlocks extends TestCase {
+  /** test getBlocks */
+  public void testGetBlocks() throws Exception {
+    final Configuration CONF = new Configuration();
+
+    final short REPLICATION_FACTOR = (short)2;
+    final int DEFAULT_BLOCK_SIZE = 1024;
+    final Random r = new Random();
+    
+    CONF.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+    MiniDFSCluster cluster = new MiniDFSCluster(
+          CONF, REPLICATION_FACTOR, true, null );
+    try {
+      cluster.waitActive();
+      
+      // create a file with two blocks
+      FileSystem fs = cluster.getFileSystem();
+      FSDataOutputStream out = fs.create(new Path("/tmp.txt"),
+          REPLICATION_FACTOR);
+      byte [] data = new byte[1024];
+      long fileLen = 2*DEFAULT_BLOCK_SIZE;
+      long bytesToWrite = fileLen;
+      while( bytesToWrite > 0 ) {
+        r.nextBytes(data);
+        int bytesToWriteNext = (1024<bytesToWrite)?1024:(int)bytesToWrite;
+        out.write(data, 0, bytesToWriteNext);
+        bytesToWrite -= bytesToWriteNext;
+      }
+      out.close();
+
+      // get blocks & data nodes
+      List<LocatedBlock> locatedBlocks;
+      DatanodeInfo[] dataNodes=null;
+      boolean notWritten;
+      do {
+        DFSClient dfsclient = new DFSClient(CONF);
+        locatedBlocks = dfsclient.namenode.
+          getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks();
+        assertEquals(2, locatedBlocks.size());
+        notWritten = false;
+        for(int i=0; i<2; i++) {
+          dataNodes = locatedBlocks.get(i).getLocations();
+          if(dataNodes.length != REPLICATION_FACTOR) {
+            notWritten = true;
+            try {
+              Thread.sleep(10);
+            } catch(InterruptedException e) {
+            }
+            break;
+          }
+        }
+      } while(notWritten);
+      
+      // get RPC client to namenode
+      InetSocketAddress addr = new InetSocketAddress("localhost",
+          cluster.getNameNodePort());
+      NamenodeProtocol namenode = (NamenodeProtocol) RPC.getProxy(
+          NamenodeProtocol.class, NamenodeProtocol.versionID, addr,
+          UnixUserGroupInformation.login(CONF), CONF,
+          NetUtils.getDefaultSocketFactory(CONF));
+
+      // get blocks of size fileLen from dataNodes[0]
+      BlockWithLocations[] locs;
+      locs = namenode.getBlocks(dataNodes[0], fileLen).getBlocks();
+      assertEquals(locs.length, 2);
+      assertEquals(locs[0].getDatanodes().length, 2);
+      assertEquals(locs[1].getDatanodes().length, 2);
+
+      // get blocks of size BlockSize from dataNodes[0]
+      locs = namenode.getBlocks(dataNodes[0], DEFAULT_BLOCK_SIZE).getBlocks();
+      assertEquals(locs.length, 1);
+      assertEquals(locs[0].getDatanodes().length, 2);
+
+      // get blocks of size 1 from dataNodes[0]
+      locs = namenode.getBlocks(dataNodes[0], 1).getBlocks();
+      assertEquals(locs.length, 1);
+      assertEquals(locs[0].getDatanodes().length, 2);
+
+      // get blocks of size 0 from dataNodes[0]
+      getBlocksWithException(namenode, dataNodes[0], 0);     
+
+      // get blocks of size -1 from dataNodes[0]
+      getBlocksWithException(namenode, dataNodes[0], -1);
+
+      // get blocks of size BlockSize from a non-existent datanode
+      getBlocksWithException(namenode, new DatanodeInfo(), 2);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  private void getBlocksWithException(NamenodeProtocol namenode,
+                                      DatanodeInfo datanode,
+                                      long size) throws IOException {
+    boolean getException = false;
+    try {
+        namenode.getBlocks(new DatanodeInfo(), 2);
+    } catch(RemoteException e) {
+      getException = true;
+      assertTrue(e.getMessage().contains("IllegalArgumentException"));
+    }
+    assertTrue(getException);
+  }
+ 
+  public void testGenerationStampWildCard() {
+    Map<Block, Long> map = new HashMap<Block, Long>();
+    final Random RAN = new Random();
+    final long seed = RAN.nextLong();
+    System.out.println("seed=" +  seed);
+    RAN.setSeed(seed);
+
+    long[] blkids = new long[10]; 
+    for(int i = 0; i < blkids.length; i++) {
+      blkids[i] = 1000L + RAN.nextInt(100000);
+      map.put(new Block(blkids[i], 0, blkids[i]), blkids[i]);
+    }
+    System.out.println("map=" + map.toString().replace(",", "\n  "));
+    
+    for(int i = 0; i < blkids.length; i++) {
+      Block b = new Block(blkids[i], 0, GenerationStamp.WILDCARD_STAMP);
+      Long v = map.get(b);
+      System.out.println(b + " => " + v);
+      assertEquals(blkids[i], v.longValue());
+    }
+  }
+
+  /**
+   * @param args
+   */
+  public static void main(String[] args) throws Exception {
+    (new TestGetBlocks()).testGetBlocks();
+  }
+
+}

+ 50 - 0
src/test/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java

@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystemContractBaseTest;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+
+public class TestHDFSFileSystemContract extends FileSystemContractBaseTest {
+  
+  private MiniDFSCluster cluster;
+  private String defaultWorkingDirectory;
+
+  @Override
+  protected void setUp() throws Exception {
+    Configuration conf = new Configuration();
+    cluster = new MiniDFSCluster(conf, 2, true, null);
+    fs = cluster.getFileSystem();
+    defaultWorkingDirectory = "/user/" + 
+           UnixUserGroupInformation.login().getUserName();
+  }
+  
+  @Override
+  protected void tearDown() throws Exception {
+    super.tearDown();
+    cluster.shutdown();
+  }
+
+  @Override
+  protected String getDefaultWorkingDirectory() {
+    return defaultWorkingDirectory;
+  }
+  
+}

+ 243 - 0
src/test/org/apache/hadoop/hdfs/TestHDFSServerPorts.java

@@ -0,0 +1,243 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode;
+
+/**
+ * This test checks correctness of port usage by hdfs components:
+ * NameNode, DataNode, and SecondaryNamenode.
+ * 
+ * The correct behavior is:<br> 
+ * - when a specific port is provided the server must either start on that port 
+ * or fail by throwing {@link java.net.BindException}.<br>
+ * - if the port = 0 (ephemeral) then the server should choose 
+ * a free port and start on it.
+ */
+public class TestHDFSServerPorts extends TestCase {
+  public static final String NAME_NODE_HOST = "localhost:";
+  public static final String NAME_NODE_HTTP_HOST = "0.0.0.0:";
+
+  Configuration config;
+  File hdfsDir;
+
+  /**
+   * Start the name-node.
+   */
+  public NameNode startNameNode() throws IOException {
+    String dataDir = System.getProperty("test.build.data");
+    hdfsDir = new File(dataDir, "dfs");
+    if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
+      throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
+    }
+    config = new Configuration();
+    config.set("dfs.name.dir", new File(hdfsDir, "name1").getPath());
+    FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
+    config.set("dfs.http.address", NAME_NODE_HTTP_HOST + "0");
+    NameNode.format(config);
+
+    String[] args = new String[] {};
+    // NameNode will modify config with the ports it bound to
+    return NameNode.createNameNode(args, config);
+  }
+
+  /**
+   * Start the data-node.
+   */
+  public DataNode startDataNode(int index, Configuration config) 
+  throws IOException {
+    String dataDir = System.getProperty("test.build.data");
+    File dataNodeDir = new File(dataDir, "data-" + index);
+    config.set("dfs.data.dir", dataNodeDir.getPath());
+
+    String[] args = new String[] {};
+    // NameNode will modify config with the ports it bound to
+    return DataNode.createDataNode(args, config);
+  }
+
+  /**
+   * Stop the datanode.
+   */
+  public void stopDataNode(DataNode dn) {
+    if (dn != null) {
+      dn.shutdown();
+    }
+  }
+
+  public void stopNameNode(NameNode nn) {
+    if (nn != null) {
+      nn.stop();
+    }
+  }
+
+  public Configuration getConfig() {
+    return this.config;
+  }
+
+  /**
+   * Check whether the name-node can be started.
+   */
+  private boolean canStartNameNode(Configuration conf) throws IOException {
+    NameNode nn2 = null;
+    try {
+      nn2 = NameNode.createNameNode(new String[]{}, conf);
+    } catch(IOException e) {
+      if (e instanceof java.net.BindException)
+        return false;
+      throw e;
+    }
+    stopNameNode(nn2);
+    return true;
+  }
+
+  /**
+   * Check whether the data-node can be started.
+   */
+  private boolean canStartDataNode(Configuration conf) throws IOException {
+    DataNode dn = null;
+    try {
+      dn = DataNode.createDataNode(new String[]{}, conf);
+    } catch(IOException e) {
+      if (e instanceof java.net.BindException)
+        return false;
+      throw e;
+    }
+    dn.shutdown();
+    return true;
+  }
+
+  /**
+   * Check whether the secondary name-node can be started.
+   */
+  private boolean canStartSecondaryNode(Configuration conf) throws IOException {
+    SecondaryNameNode sn = null;
+    try {
+      sn = new SecondaryNameNode(conf);
+    } catch(IOException e) {
+      if (e instanceof java.net.BindException)
+        return false;
+      throw e;
+    }
+    sn.shutdown();
+    return true;
+  }
+
+  /**
+   * Verify name-node port usage.
+   */
+  public void testNameNodePorts() throws Exception {
+    NameNode nn = null;
+    try {
+      nn = startNameNode();
+
+      // start another namenode on the same port
+      Configuration conf2 = new Configuration(config);
+      conf2.set("dfs.name.dir", new File(hdfsDir, "name2").getPath());
+      NameNode.format(conf2);
+      boolean started = canStartNameNode(conf2);
+      assertFalse(started); // should fail
+
+      // start on a different main port
+      FileSystem.setDefaultUri(conf2, "hdfs://"+NAME_NODE_HOST + "0");
+      started = canStartNameNode(conf2);
+      assertFalse(started); // should fail again
+
+      // reset conf2 since NameNode modifies it
+      FileSystem.setDefaultUri(conf2, "hdfs://"+NAME_NODE_HOST + "0");
+      // different http port
+      conf2.set("dfs.http.address", NAME_NODE_HTTP_HOST + "0");
+      started = canStartNameNode(conf2);
+      assertTrue(started); // should start now
+    } finally {
+      stopNameNode(nn);
+    }
+  }
+
+  /**
+   * Verify data-node port usage.
+   */
+  public void testDataNodePorts() throws Exception {
+    NameNode nn = null;
+    try {
+      nn = startNameNode();
+
+      // start data-node on the same port as name-node
+      Configuration conf2 = new Configuration(config);
+      conf2.set("dfs.data.dir", new File(hdfsDir, "data").getPath());
+      conf2.set("dfs.datanode.address",
+                FileSystem.getDefaultUri(config).getAuthority());
+      conf2.set("dfs.datanode.http.address", NAME_NODE_HTTP_HOST + "0");
+      boolean started = canStartDataNode(conf2);
+      assertFalse(started); // should fail
+
+      // bind http server to the same port as name-node
+      conf2.set("dfs.datanode.address", NAME_NODE_HOST + "0");
+      conf2.set("dfs.datanode.http.address", 
+                config.get("dfs.http.address"));
+      started = canStartDataNode(conf2);
+      assertFalse(started); // should fail
+    
+      // both ports are different from the name-node ones
+      conf2.set("dfs.datanode.address", NAME_NODE_HOST + "0");
+      conf2.set("dfs.datanode.http.address", NAME_NODE_HTTP_HOST + "0");
+      conf2.set("dfs.datanode.ipc.address", NAME_NODE_HOST + "0");
+      started = canStartDataNode(conf2);
+      assertTrue(started); // should start now
+    } finally {
+      stopNameNode(nn);
+    }
+  }
+
+  /**
+   * Verify secondary name-node port usage.
+   */
+  public void testSecondaryNodePorts() throws Exception {
+    NameNode nn = null;
+    try {
+      nn = startNameNode();
+
+      // bind http server to the same port as name-node
+      Configuration conf2 = new Configuration(config);
+      conf2.set("dfs.secondary.http.address", 
+                config.get("dfs.http.address"));
+      SecondaryNameNode.LOG.info("= Starting 1 on: " + 
+                                 conf2.get("dfs.secondary.http.address"));
+      boolean started = canStartSecondaryNode(conf2);
+      assertFalse(started); // should fail
+
+      // bind http server to a different port
+      conf2.set("dfs.secondary.http.address", NAME_NODE_HTTP_HOST + "0");
+      SecondaryNameNode.LOG.info("= Starting 2 on: " + 
+                                 conf2.get("dfs.secondary.http.address"));
+      started = canStartSecondaryNode(conf2);
+      assertTrue(started); // should start now
+    } finally {
+      stopNameNode(nn);
+    }
+  }
+}

+ 64 - 0
src/test/org/apache/hadoop/hdfs/TestHDFSTrash.java

@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+
+import junit.extensions.TestSetup;
+import junit.framework.Test;
+import junit.framework.TestSuite;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.TestTrash;
+
+/**
+ * This class tests commands from Trash.
+ */
+public class TestHDFSTrash extends TestTrash {
+
+  private static MiniDFSCluster cluster = null;
+  public static Test suite() {
+    TestSetup setup = new TestSetup(new TestSuite(TestHDFSTrash.class)) {
+      protected void setUp() throws Exception {
+        Configuration conf = new Configuration();
+        cluster = new MiniDFSCluster(conf, 2, true, null);
+      }
+      protected void tearDown() throws Exception {
+        if (cluster != null) { cluster.shutdown(); }
+      }
+    };
+    return setup;
+  }
+
+  /**
+   * Tests Trash on HDFS
+   */
+  public void testTrash() throws IOException {
+    trashShell(cluster.getFileSystem(), new Path("/"));
+  }
+
+  public void testNonDefaultFS() throws IOException {
+    FileSystem fs = cluster.getFileSystem();
+    Configuration conf = fs.getConf();
+    conf.set("fs.default.name", fs.getUri().toString());
+    trashNonDefaultFS(conf);
+  }
+
+}

+ 199 - 0
src/test/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java

@@ -0,0 +1,199 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import junit.framework.TestCase;
+import java.io.*;
+import java.util.HashSet;
+import java.util.Set;
+import java.net.*;
+
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+
+
+/**
+ * This class tests the replication and injection of blocks of a DFS file for simulated storage.
+ */
+public class TestInjectionForSimulatedStorage extends TestCase {
+  private int checksumSize = 16;
+  private int blockSize = checksumSize*2;
+  private int numBlocks = 4;
+  private int filesize = blockSize*numBlocks;
+  private int numDataNodes = 4;
+  private static final Log LOG = LogFactory.getLog(
+      "org.apache.hadoop.hdfs.TestInjectionForSimulatedStorage");
+
+  
+  private void writeFile(FileSystem fileSys, Path name, int repl)
+                                                throws IOException {
+    // create and write a file that contains three blocks of data
+    FSDataOutputStream stm = fileSys.create(name, true,
+          fileSys.getConf().getInt("io.file.buffer.size", 4096),
+                                      (short)repl, (long)blockSize);
+    byte[] buffer = new byte[filesize];
+    for (int i=0; i<buffer.length; i++) {
+      buffer[i] = '1';
+    }
+    stm.write(buffer);
+    stm.close();
+  }
+  
+  // Waits for all of the blocks to have expected replication
+
+  // Waits for all of the blocks to have expected replication
+  private void waitForBlockReplication(String filename, 
+                                       ClientProtocol namenode,
+                                       int expected, long maxWaitSec) 
+                                       throws IOException {
+    long start = System.currentTimeMillis();
+    
+    //wait for all the blocks to be replicated;
+    LOG.info("Checking for block replication for " + filename);
+    
+    LocatedBlocks blocks = namenode.getBlockLocations(filename, 0, Long.MAX_VALUE);
+    assertEquals(numBlocks, blocks.locatedBlockCount());
+    
+    for (int i = 0; i < numBlocks; ++i) {
+      LOG.info("Checking for block:" + (i+1));
+      while (true) { // Loop to check for block i (usually when 0 is done all will be done
+        blocks = namenode.getBlockLocations(filename, 0, Long.MAX_VALUE);
+        assertEquals(numBlocks, blocks.locatedBlockCount());
+        LocatedBlock block = blocks.get(i);
+        int actual = block.getLocations().length;
+        if ( actual == expected ) {
+          LOG.info("Got enough replicas for " + (i+1) + "th block " + block.getBlock() +
+              ", got " + actual + ".");
+          break;
+        }
+        LOG.info("Not enough replicas for " + (i+1) + "th block " + block.getBlock() +
+                               " yet. Expecting " + expected + ", got " + 
+                               actual + ".");
+      
+        if (maxWaitSec > 0 && 
+            (System.currentTimeMillis() - start) > (maxWaitSec * 1000)) {
+          throw new IOException("Timedout while waiting for all blocks to " +
+                                " be replicated for " + filename);
+        }
+      
+        try {
+          Thread.sleep(500);
+        } catch (InterruptedException ignored) {}
+      }
+    }
+  }
+ 
+  
+  
+  /* This test makes sure that NameNode retries all the available blocks 
+   * for under replicated blocks. This test uses simulated storage and one
+   * of its features to inject blocks,
+   * 
+   * It creates a file with several blocks and replication of 4. 
+   * The cluster is then shut down - NN retains its state but the DNs are 
+   * all simulated and hence loose their blocks. 
+   * The blocks are then injected in one of the DNs. The  expected behaviour is
+   * that the NN will arrange for themissing replica will be copied from a valid source.
+   */
+  public void testInjection() throws IOException {
+    
+    MiniDFSCluster cluster = null;
+
+    String testFile = "/replication-test-file";
+    Path testPath = new Path(testFile);
+    
+    byte buffer[] = new byte[1024];
+    for (int i=0; i<buffer.length; i++) {
+      buffer[i] = '1';
+    }
+    
+    try {
+      Configuration conf = new Configuration();
+      conf.set("dfs.replication", Integer.toString(numDataNodes));
+      conf.setInt("io.bytes.per.checksum", checksumSize);
+      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+      //first time format
+      cluster = new MiniDFSCluster(0, conf, numDataNodes, true,
+                                   true, null, null);
+      cluster.waitActive();
+      DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
+                                            cluster.getNameNodePort()),
+                                            conf);
+      
+      writeFile(cluster.getFileSystem(), testPath, numDataNodes);
+
+      
+      waitForBlockReplication(testFile, dfsClient.namenode, numDataNodes, 20);
+
+      
+      Block[][] blocksList = cluster.getAllBlockReports();
+                    
+      
+      cluster.shutdown();
+      cluster = null;
+      
+
+      
+      /* Start the MiniDFSCluster with more datanodes since once a writeBlock
+       * to a datanode node fails, same block can not be written to it
+       * immediately. In our case some replication attempts will fail.
+       */
+      
+      LOG.info("Restarting minicluster");
+      conf = new Configuration();
+      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+      conf.set("dfs.safemode.threshold.pct", "0.0f"); 
+      
+      cluster = new MiniDFSCluster(0, conf, numDataNodes*2, false,
+                                   true, null, null);
+      cluster.waitActive();
+      Set<Block> uniqueBlocks = new HashSet<Block>();
+      for (int i=0; i<blocksList.length; ++i) {
+        for (int j=0; j < blocksList[i].length; ++j) {
+          uniqueBlocks.add(blocksList[i][j]);
+        }
+      }
+      // Insert all the blocks in the first data node
+      
+      LOG.info("Inserting " + uniqueBlocks.size() + " blocks");
+      Block[] blocks = uniqueBlocks.toArray(new Block[uniqueBlocks.size()]);
+      cluster.injectBlocks(0, blocks);
+      
+      dfsClient = new DFSClient(new InetSocketAddress("localhost",
+                                  cluster.getNameNodePort()),
+                                  conf);
+      
+      waitForBlockReplication(testFile, dfsClient.namenode, numDataNodes, -1);
+      
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }  
+}

+ 66 - 0
src/test/org/apache/hadoop/hdfs/TestLease.java

@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.*;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+public class TestLease extends junit.framework.TestCase {
+  static boolean hasLease(MiniDFSCluster cluster, Path src) {
+    return cluster.getNamesystem().leaseManager.getLeaseByPath(src.toString()) != null;
+  }
+  
+  final Path dir = new Path("/test/lease/");
+
+  public void testLease() throws Exception {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    try {
+      FileSystem fs = cluster.getFileSystem();
+      assertTrue(fs.mkdirs(dir));
+      
+      Path a = new Path(dir, "a");
+      Path b = new Path(dir, "b");
+
+      DataOutputStream a_out = fs.create(a);
+      a_out.writeBytes("something");
+
+      assertTrue(hasLease(cluster, a));
+      assertTrue(!hasLease(cluster, b));
+      
+      DataOutputStream b_out = fs.create(b);
+      b_out.writeBytes("something");
+
+      assertTrue(hasLease(cluster, a));
+      assertTrue(hasLease(cluster, b));
+
+      a_out.close();
+      b_out.close();
+
+      assertTrue(!hasLease(cluster, a));
+      assertTrue(!hasLease(cluster, b));
+      
+      fs.delete(dir, true);
+    } finally {
+      if (cluster != null) {cluster.shutdown();}
+    }
+  }
+}

+ 140 - 0
src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java

@@ -0,0 +1,140 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.util.Arrays;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.TestInterDatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo;
+import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
+
+public class TestLeaseRecovery extends junit.framework.TestCase {
+  static final int BLOCK_SIZE = 1024;
+  static final short REPLICATION_NUM = (short)3;
+
+  static void checkMetaInfo(Block b, InterDatanodeProtocol idp
+      ) throws IOException {
+    TestInterDatanodeProtocol.checkMetaInfo(b, idp, null);
+  }
+  
+  static int min(Integer... x) {
+    int m = x[0];
+    for(int i = 1; i < x.length; i++) {
+      if (x[i] < m) {
+        m = x[i];
+      }
+    }
+    return m;
+  }
+
+  /**
+   * The following test first creates a file with a few blocks.
+   * It randomly truncates the replica of the last block stored in each datanode.
+   * Finally, it triggers block synchronization to synchronize all stored block.
+   */
+  public void testBlockSynchronization() throws Exception {
+    final int ORG_FILE_SIZE = 3000; 
+    Configuration conf = new Configuration();
+    conf.setLong("dfs.block.size", BLOCK_SIZE);
+    conf.setBoolean("dfs.support.append", true);
+    MiniDFSCluster cluster = null;
+
+    try {
+      cluster = new MiniDFSCluster(conf, 5, true, null);
+      cluster.waitActive();
+
+      //create a file
+      DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
+      String filestr = "/foo";
+      Path filepath = new Path(filestr);
+      DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L);
+      assertTrue(dfs.dfs.exists(filestr));
+      DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM);
+
+      //get block info for the last block
+      LocatedBlock locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(
+          dfs.dfs.namenode, filestr);
+      DatanodeInfo[] datanodeinfos = locatedblock.getLocations();
+      assertEquals(REPLICATION_NUM, datanodeinfos.length);
+
+      //connect to data nodes
+      InterDatanodeProtocol[] idps = new InterDatanodeProtocol[REPLICATION_NUM];
+      DataNode[] datanodes = new DataNode[REPLICATION_NUM];
+      for(int i = 0; i < REPLICATION_NUM; i++) {
+        idps[i] = DataNode.createInterDataNodeProtocolProxy(datanodeinfos[i], conf);
+        datanodes[i] = cluster.getDataNode(datanodeinfos[i].getIpcPort());
+        assertTrue(datanodes[i] != null);
+      }
+      
+      //verify BlockMetaDataInfo
+      Block lastblock = locatedblock.getBlock();
+      DataNode.LOG.info("newblocks=" + lastblock);
+      for(int i = 0; i < REPLICATION_NUM; i++) {
+        checkMetaInfo(lastblock, idps[i]);
+      }
+
+      //setup random block sizes 
+      int lastblocksize = ORG_FILE_SIZE % BLOCK_SIZE;
+      Integer[] newblocksizes = new Integer[REPLICATION_NUM];
+      for(int i = 0; i < REPLICATION_NUM; i++) {
+        newblocksizes[i] = AppendTestUtil.nextInt(lastblocksize);
+      }
+      DataNode.LOG.info("newblocksizes = " + Arrays.asList(newblocksizes)); 
+
+      //update blocks with random block sizes
+      Block[] newblocks = new Block[REPLICATION_NUM];
+      for(int i = 0; i < REPLICATION_NUM; i++) {
+        newblocks[i] = new Block(lastblock.getBlockId(), newblocksizes[i],
+            lastblock.getGenerationStamp());
+        idps[i].updateBlock(lastblock, newblocks[i], false);
+        checkMetaInfo(newblocks[i], idps[i]);
+      }
+
+      DataNode.LOG.info("dfs.dfs.clientName=" + dfs.dfs.clientName);
+      cluster.getNameNode().append(filestr, dfs.dfs.clientName);
+
+      //block synchronization
+      final int primarydatanodeindex = AppendTestUtil.nextInt(datanodes.length);
+      DataNode.LOG.info("primarydatanodeindex  =" + primarydatanodeindex);
+      DataNode primary = datanodes[primarydatanodeindex];
+      DataNode.LOG.info("primary.dnRegistration=" + primary.dnRegistration);
+      primary.recoverBlocks(new Block[]{lastblock}, new DatanodeInfo[][]{datanodeinfos}).join();
+
+      BlockMetaDataInfo[] updatedmetainfo = new BlockMetaDataInfo[REPLICATION_NUM];
+      int minsize = min(newblocksizes);
+      long currentGS = cluster.getNamesystem().getGenerationStamp();
+      lastblock.setGenerationStamp(currentGS);
+      for(int i = 0; i < REPLICATION_NUM; i++) {
+        updatedmetainfo[i] = idps[i].getBlockMetaDataInfo(lastblock);
+        assertEquals(lastblock.getBlockId(), updatedmetainfo[i].getBlockId());
+        assertEquals(minsize, updatedmetainfo[i].getNumBytes());
+        assertEquals(currentGS, updatedmetainfo[i].getGenerationStamp());
+      }
+    }
+    finally {
+      if (cluster != null) {cluster.shutdown();}
+    }
+  }
+}

+ 151 - 0
src/test/org/apache/hadoop/hdfs/TestLeaseRecovery2.java

@@ -0,0 +1,151 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.log4j.Level;
+
+public class TestLeaseRecovery2 extends junit.framework.TestCase {
+  {
+    ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+  }
+
+  static final long BLOCK_SIZE = 1024;
+  static final int FILE_SIZE = 1024*16;
+  static final short REPLICATION_NUM = (short)3;
+  static byte[] buffer = new byte[FILE_SIZE];
+
+  public void testBlockSynchronization() throws Exception {
+    final long softLease = 1000;
+    final long hardLease = 60 * 60 *1000;
+    final short repl = 3;
+    final Configuration conf = new Configuration();
+    final int bufferSize = conf.getInt("io.file.buffer.size", 4096);
+    conf.setLong("dfs.block.size", BLOCK_SIZE);
+    conf.setInt("dfs.heartbeat.interval", 1);
+  //  conf.setInt("io.bytes.per.checksum", 16);
+
+    MiniDFSCluster cluster = null;
+    byte[] actual = new byte[FILE_SIZE];
+
+    try {
+      cluster = new MiniDFSCluster(conf, 5, true, null);
+      cluster.waitActive();
+
+      //create a file
+      DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
+      // create a random file name
+      String filestr = "/foo" + AppendTestUtil.nextInt();
+      System.out.println("filestr=" + filestr);
+      Path filepath = new Path(filestr);
+      FSDataOutputStream stm = dfs.create(filepath, true,
+          bufferSize, repl, BLOCK_SIZE);
+      assertTrue(dfs.dfs.exists(filestr));
+
+      // write random number of bytes into it.
+      int size = AppendTestUtil.nextInt(FILE_SIZE);
+      System.out.println("size=" + size);
+      stm.write(buffer, 0, size);
+
+      // sync file
+      AppendTestUtil.LOG.info("sync");
+      stm.sync();
+      AppendTestUtil.LOG.info("leasechecker.interruptAndJoin()");
+      dfs.dfs.leasechecker.interruptAndJoin();
+
+      // set the soft limit to be 1 second so that the
+      // namenode triggers lease recovery on next attempt to write-for-open.
+      cluster.setLeasePeriod(softLease, hardLease);
+
+      // try to re-open the file before closing the previous handle. This
+      // should fail but will trigger lease recovery.
+      {
+        Configuration conf2 = new Configuration(conf);
+        String username = UserGroupInformation.getCurrentUGI().getUserName()+"_1";
+        UnixUserGroupInformation.saveToConf(conf2,
+            UnixUserGroupInformation.UGI_PROPERTY_NAME,
+            new UnixUserGroupInformation(username, new String[]{"supergroup"}));
+        FileSystem dfs2 = FileSystem.get(conf2);
+  
+        boolean done = false;
+        for(int i = 0; i < 10 && !done; i++) {
+          AppendTestUtil.LOG.info("i=" + i);
+          try {
+            dfs2.create(filepath, false, bufferSize, repl, BLOCK_SIZE);
+            fail("Creation of an existing file should never succeed.");
+          } catch (IOException ioe) {
+            final String message = ioe.getMessage();
+            if (message.contains("file exists")) {
+              AppendTestUtil.LOG.info("done", ioe);
+              done = true;
+            }
+            else if (message.contains(AlreadyBeingCreatedException.class.getSimpleName())) {
+              AppendTestUtil.LOG.info("GOOD! got " + message);
+            }
+            else {
+              AppendTestUtil.LOG.warn("UNEXPECTED IOException", ioe);
+            }
+          }
+
+          if (!done) {
+            AppendTestUtil.LOG.info("sleep " + 5000 + "ms");
+            try {Thread.sleep(5000);} catch (InterruptedException e) {}
+          }
+        }
+        assertTrue(done);
+      }
+
+      AppendTestUtil.LOG.info("Lease for file " +  filepath + " is recovered. "
+          + "Validating its contents now...");
+
+      // verify that file-size matches
+      assertTrue("File should be " + size + " bytes, but is actually " +
+                 " found to be " + dfs.getFileStatus(filepath).getLen() +
+                 " bytes",
+                 dfs.getFileStatus(filepath).getLen() == size);
+
+      // verify that there is enough data to read.
+      System.out.println("File size is good. Now validating sizes from datanodes...");
+      FSDataInputStream stmin = dfs.open(filepath);
+      stmin.readFully(0, actual, 0, size);
+      stmin.close();
+    }
+    finally {
+      try {
+        if (cluster != null) {cluster.shutdown();}
+      } catch (Exception e) {
+        // ignore
+      }
+    }
+  }
+}

+ 95 - 0
src/test/org/apache/hadoop/hdfs/TestLocalDFS.java

@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import junit.framework.TestCase;
+import java.io.*;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+/**
+ * This class tests the DFS class via the FileSystem interface in a single node
+ * mini-cluster.
+ */
+public class TestLocalDFS extends TestCase {
+
+  private void writeFile(FileSystem fileSys, Path name) throws IOException {
+    DataOutputStream stm = fileSys.create(name);
+    stm.writeBytes("oom");
+    stm.close();
+  }
+  
+  private void readFile(FileSystem fileSys, Path name) throws IOException {
+    DataInputStream stm = fileSys.open(name);
+    byte[] buffer = new byte[4];
+    int bytesRead = stm.read(buffer, 0 , 4);
+    assertEquals("oom", new String(buffer, 0 , bytesRead));
+    stm.close();
+  }
+  
+  private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
+    assertTrue(fileSys.exists(name));
+    fileSys.delete(name, true);
+    assertTrue(!fileSys.exists(name));
+  }
+
+  static String getUserName(FileSystem fs) {
+    if (fs instanceof DistributedFileSystem) {
+      return ((DistributedFileSystem)fs).dfs.ugi.getUserName();
+    }
+    return System.getProperty("user.name");
+  }
+
+  /**
+   * Tests get/set working directory in DFS.
+   */
+  public void testWorkingDirectory() throws IOException {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fileSys = cluster.getFileSystem();
+    try {
+      Path orig_path = fileSys.getWorkingDirectory();
+      assertTrue(orig_path.isAbsolute());
+      Path file1 = new Path("somewhat/random.txt");
+      writeFile(fileSys, file1);
+      assertTrue(fileSys.exists(new Path(orig_path, file1.toString())));
+      fileSys.delete(file1, true);
+      Path subdir1 = new Path("/somewhere");
+      fileSys.setWorkingDirectory(subdir1);
+      writeFile(fileSys, file1);
+      cleanupFile(fileSys, new Path(subdir1, file1.toString()));
+      Path subdir2 = new Path("else");
+      fileSys.setWorkingDirectory(subdir2);
+      writeFile(fileSys, file1);
+      readFile(fileSys, file1);
+      cleanupFile(fileSys, new Path(new Path(subdir1, subdir2.toString()),
+                                    file1.toString()));
+
+      // test home directory
+      Path home = new Path("/user/" + getUserName(fileSys))
+        .makeQualified(fileSys);
+      Path fsHome = fileSys.getHomeDirectory();
+      assertEquals(home, fsHome);
+
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+  }
+}

+ 117 - 0
src/test/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java

@@ -0,0 +1,117 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.net.URL;
+
+import junit.framework.TestCase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ChecksumException;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.Path;
+
+/**
+ * The test makes sure that NameNode detects presense blocks that do not have
+ * any valid replicas. In addition, it verifies that HDFS front page displays
+ * a warning in such a case.
+ */
+public class TestMissingBlocksAlert extends TestCase {
+  
+  private static final Log LOG = 
+                           LogFactory.getLog(TestMissingBlocksAlert.class);
+  
+  public void testMissingBlocksAlert() throws IOException, 
+                                       InterruptedException {
+    
+    MiniDFSCluster cluster = null;
+    
+    try {
+      Configuration conf = new Configuration();
+      //minimize test delay
+      conf.setInt("dfs.replication.interval", 0);
+      int fileLen = 10*1024;
+
+      //start a cluster with single datanode
+      cluster = new MiniDFSCluster(conf, 1, true, null);
+      cluster.waitActive();
+
+      DistributedFileSystem dfs = 
+                            (DistributedFileSystem) cluster.getFileSystem();
+
+      // create a normal file
+      DFSTestUtil.createFile(dfs, new Path("/testMissingBlocksAlert/file1"), 
+                             fileLen, (short)3, 0);
+
+      Path corruptFile = new Path("/testMissingBlocks/corruptFile");
+      DFSTestUtil.createFile(dfs, corruptFile, fileLen, (short)3, 0);
+
+
+      // Corrupt the block
+      String block = DFSTestUtil.getFirstBlock(dfs, corruptFile).getBlockName();
+      TestDatanodeBlockScanner.corruptReplica(block, 0);
+
+      // read the file so that the corrupt block is reported to NN
+      FSDataInputStream in = dfs.open(corruptFile); 
+      try {
+        in.readFully(new byte[fileLen]);
+      } catch (ChecksumException ignored) { // checksum error is expected.      
+      }
+      in.close();
+
+      LOG.info("Waiting for missing blocks count to increase...");
+
+      while (dfs.getMissingBlocksCount() <= 0) {
+        Thread.sleep(100);
+      }
+      assertTrue(dfs.getMissingBlocksCount() == 1);
+
+
+      // Now verify that it shows up on webui
+      URL url = new URL("http://" + conf.get("dfs.http.address") + 
+                        "/dfshealth.jsp");
+      String dfsFrontPage = DFSTestUtil.urlGet(url);
+      String warnStr = "WARNING : There are about ";
+      assertTrue("HDFS Front page does not contain expected warning", 
+                 dfsFrontPage.contains(warnStr + "1 missing blocks"));
+
+      // now do the reverse : remove the file expect the number of missing 
+      // blocks to go to zero
+
+      dfs.delete(corruptFile, true);
+
+      LOG.info("Waiting for missing blocks count to be zero...");
+      while (dfs.getMissingBlocksCount() > 0) {
+        Thread.sleep(100);
+      }
+
+      // and make sure WARNING disappears
+      // Now verify that it shows up on webui
+      dfsFrontPage = DFSTestUtil.urlGet(url);
+      assertFalse("HDFS Front page contains unexpected warning", 
+                  dfsFrontPage.contains(warnStr));
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+}

+ 187 - 0
src/test/org/apache/hadoop/hdfs/TestModTime.java

@@ -0,0 +1,187 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import junit.framework.TestCase;
+import java.io.*;
+import java.util.Random;
+import java.net.*;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileStatus;
+
+/**
+ * This class tests the decommissioning of nodes.
+ * @author Dhruba Borthakur
+ */
+public class TestModTime extends TestCase {
+  static final long seed = 0xDEADBEEFL;
+  static final int blockSize = 8192;
+  static final int fileSize = 16384;
+  static final int numDatanodes = 6;
+
+
+  Random myrand = new Random();
+  Path hostsFile;
+  Path excludeFile;
+
+  private void writeFile(FileSystem fileSys, Path name, int repl)
+    throws IOException {
+    // create and write a file that contains three blocks of data
+    FSDataOutputStream stm = fileSys.create(name, true, 
+                                            fileSys.getConf().getInt("io.file.buffer.size", 4096),
+                                            (short)repl, (long)blockSize);
+    byte[] buffer = new byte[fileSize];
+    Random rand = new Random(seed);
+    rand.nextBytes(buffer);
+    stm.write(buffer);
+    stm.close();
+  }
+  
+  private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
+    assertTrue(fileSys.exists(name));
+    fileSys.delete(name, true);
+    assertTrue(!fileSys.exists(name));
+  }
+
+  private void printDatanodeReport(DatanodeInfo[] info) {
+    System.out.println("-------------------------------------------------");
+    for (int i = 0; i < info.length; i++) {
+      System.out.println(info[i].getDatanodeReport());
+      System.out.println();
+    }
+  }
+
+  /**
+   * Tests modification time in DFS.
+   */
+  public void testModTime() throws IOException {
+    Configuration conf = new Configuration();
+
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
+    cluster.waitActive();
+    InetSocketAddress addr = new InetSocketAddress("localhost", 
+                                                   cluster.getNameNodePort());
+    DFSClient client = new DFSClient(addr, conf);
+    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
+    assertEquals("Number of Datanodes ", numDatanodes, info.length);
+    FileSystem fileSys = cluster.getFileSystem();
+    int replicas = numDatanodes - 1;
+    assertTrue(fileSys instanceof DistributedFileSystem);
+
+    try {
+
+     //
+     // create file and record ctime and mtime of test file
+     //
+     System.out.println("Creating testdir1 and testdir1/test1.dat.");
+     Path dir1 = new Path("testdir1");
+     Path file1 = new Path(dir1, "test1.dat");
+     writeFile(fileSys, file1, replicas);
+     FileStatus stat = fileSys.getFileStatus(file1);
+     long mtime1 = stat.getModificationTime();
+     assertTrue(mtime1 != 0);
+     //
+     // record dir times
+     //
+     stat = fileSys.getFileStatus(dir1);
+     long mdir1 = stat.getModificationTime();
+
+     //
+     // create second test file
+     //
+     System.out.println("Creating testdir1/test2.dat.");
+     Path file2 = new Path(dir1, "test2.dat");
+     writeFile(fileSys, file2, replicas);
+     stat = fileSys.getFileStatus(file2);
+
+     //
+     // verify that mod time of dir remains the same
+     // as before. modification time of directory has increased.
+     //
+     stat = fileSys.getFileStatus(dir1);
+     assertTrue(stat.getModificationTime() >= mdir1);
+     mdir1 = stat.getModificationTime();
+     //
+     // create another directory
+     //
+     Path dir2 = (new Path("testdir2/")).makeQualified(fileSys);
+     System.out.println("Creating testdir2 " + dir2);
+     assertTrue(fileSys.mkdirs(dir2));
+     stat = fileSys.getFileStatus(dir2);
+     long mdir2 = stat.getModificationTime();
+     //
+     // rename file1 from testdir into testdir2
+     //
+     Path newfile = new Path(dir2, "testnew.dat");
+     System.out.println("Moving " + file1 + " to " + newfile);
+     fileSys.rename(file1, newfile);
+     //
+     // verify that modification time of file1 did not change.
+     //
+     stat = fileSys.getFileStatus(newfile);
+     assertTrue(stat.getModificationTime() == mtime1);
+     //
+     // verify that modification time of  testdir1 and testdir2
+     // were changed. 
+     //
+     stat = fileSys.getFileStatus(dir1);
+     assertTrue(stat.getModificationTime() != mdir1);
+     mdir1 = stat.getModificationTime();
+
+     stat = fileSys.getFileStatus(dir2);
+     assertTrue(stat.getModificationTime() != mdir2);
+     mdir2 = stat.getModificationTime();
+     //
+     // delete newfile
+     //
+     System.out.println("Deleting testdir2/testnew.dat.");
+     assertTrue(fileSys.delete(newfile, true));
+     //
+     // verify that modification time of testdir1 has not changed.
+     //
+     stat = fileSys.getFileStatus(dir1);
+     assertTrue(stat.getModificationTime() == mdir1);
+     //
+     // verify that modification time of testdir2 has changed.
+     //
+     stat = fileSys.getFileStatus(dir2);
+     assertTrue(stat.getModificationTime() != mdir2);
+     mdir2 = stat.getModificationTime();
+
+     cleanupFile(fileSys, file2);
+     cleanupFile(fileSys, dir1);
+     cleanupFile(fileSys, dir2);
+    } catch (IOException e) {
+      info = client.datanodeReport(DatanodeReportType.ALL);
+      printDatanodeReport(info);
+      throw e;
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    new TestModTime().testModTime();
+  }
+}

+ 220 - 0
src/test/org/apache/hadoop/hdfs/TestPread.java

@@ -0,0 +1,220 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+
+/**
+ * This class tests the DFS positional read functionality in a single node
+ * mini-cluster.
+ */
+public class TestPread extends TestCase {
+  static final long seed = 0xDEADBEEFL;
+  static final int blockSize = 4096;
+  boolean simulatedStorage = false;
+
+  private void writeFile(FileSystem fileSys, Path name) throws IOException {
+    // create and write a file that contains three blocks of data
+    DataOutputStream stm = fileSys.create(name, true, 4096, (short)1,
+                                          (long)blockSize);
+    // test empty file open and read
+    stm.close();
+    FSDataInputStream in = fileSys.open(name);
+    byte[] buffer = new byte[12 * blockSize];
+    in.readFully(0, buffer, 0, 0);
+    IOException res = null;
+    try { // read beyond the end of the file
+      in.readFully(0, buffer, 0, 1);
+    } catch (IOException e) {
+      // should throw an exception
+      res = e;
+    }
+    assertTrue("Error reading beyond file boundary.", res != null);
+    in.close();
+    if (!fileSys.delete(name, true))
+      assertTrue("Cannot delete file", false);
+    
+    // now create the real file
+    stm = fileSys.create(name, true, 4096, (short)1, (long)blockSize);
+    Random rand = new Random(seed);
+    rand.nextBytes(buffer);
+    stm.write(buffer);
+    stm.close();
+  }
+  
+  private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
+    for (int idx = 0; idx < actual.length; idx++) {
+      assertEquals(message+" byte "+(from+idx)+" differs. expected "+
+                        expected[from+idx]+" actual "+actual[idx],
+                        actual[idx], expected[from+idx]);
+      actual[idx] = 0;
+    }
+  }
+  
+  private void doPread(FSDataInputStream stm, long position, byte[] buffer,
+                       int offset, int length) throws IOException {
+    int nread = 0;
+    while (nread < length) {
+      int nbytes = stm.read(position+nread, buffer, offset+nread, length-nread);
+      assertTrue("Error in pread", nbytes > 0);
+      nread += nbytes;
+    }
+  }
+  
+  private void pReadFile(FileSystem fileSys, Path name) throws IOException {
+    FSDataInputStream stm = fileSys.open(name);
+    byte[] expected = new byte[12 * blockSize];
+    if (simulatedStorage) {
+      for (int i= 0; i < expected.length; i++) {  
+        expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
+      }
+    } else {
+      Random rand = new Random(seed);
+      rand.nextBytes(expected);
+    }
+    // do a sanity check. Read first 4K bytes
+    byte[] actual = new byte[4096];
+    stm.readFully(actual);
+    checkAndEraseData(actual, 0, expected, "Read Sanity Test");
+    // now do a pread for the first 8K bytes
+    actual = new byte[8192];
+    doPread(stm, 0L, actual, 0, 8192);
+    checkAndEraseData(actual, 0, expected, "Pread Test 1");
+    // Now check to see if the normal read returns 4K-8K byte range
+    actual = new byte[4096];
+    stm.readFully(actual);
+    checkAndEraseData(actual, 4096, expected, "Pread Test 2");
+    // Now see if we can cross a single block boundary successfully
+    // read 4K bytes from blockSize - 2K offset
+    stm.readFully(blockSize - 2048, actual, 0, 4096);
+    checkAndEraseData(actual, (blockSize - 2048), expected, "Pread Test 3");
+    // now see if we can cross two block boundaries successfully
+    // read blockSize + 4K bytes from blockSize - 2K offset
+    actual = new byte[blockSize + 4096];
+    stm.readFully(blockSize - 2048, actual);
+    checkAndEraseData(actual, (blockSize - 2048), expected, "Pread Test 4");
+    // now see if we can cross two block boundaries that are not cached
+    // read blockSize + 4K bytes from 10*blockSize - 2K offset
+    actual = new byte[blockSize + 4096];
+    stm.readFully(10 * blockSize - 2048, actual);
+    checkAndEraseData(actual, (10 * blockSize - 2048), expected, "Pread Test 5");
+    // now check that even after all these preads, we can still read
+    // bytes 8K-12K
+    actual = new byte[4096];
+    stm.readFully(actual);
+    checkAndEraseData(actual, 8192, expected, "Pread Test 6");
+    // done
+    stm.close();
+    // check block location caching
+    stm = fileSys.open(name);
+    stm.readFully(1, actual, 0, 4096);
+    stm.readFully(4*blockSize, actual, 0, 4096);
+    stm.readFully(7*blockSize, actual, 0, 4096);
+    actual = new byte[3*4096];
+    stm.readFully(0*blockSize, actual, 0, 3*4096);
+    checkAndEraseData(actual, 0, expected, "Pread Test 7");
+    actual = new byte[8*4096];
+    stm.readFully(3*blockSize, actual, 0, 8*4096);
+    checkAndEraseData(actual, 3*blockSize, expected, "Pread Test 8");
+    // read the tail
+    stm.readFully(11*blockSize+blockSize/2, actual, 0, blockSize/2);
+    IOException res = null;
+    try { // read beyond the end of the file
+      stm.readFully(11*blockSize+blockSize/2, actual, 0, blockSize);
+    } catch (IOException e) {
+      // should throw an exception
+      res = e;
+    }
+    assertTrue("Error reading beyond file boundary.", res != null);
+    
+    stm.close();
+  }
+  
+  private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
+    assertTrue(fileSys.exists(name));
+    assertTrue(fileSys.delete(name, true));
+    assertTrue(!fileSys.exists(name));
+  }
+  
+  /**
+   * Tests positional read in DFS.
+   */
+  public void testPreadDFS() throws IOException {
+    dfsPreadTest(false); //normal pread
+    dfsPreadTest(true); //trigger read code path without transferTo.
+  }
+  
+  private void dfsPreadTest(boolean disableTransferTo) throws IOException {
+    Configuration conf = new Configuration();
+    conf.setLong("dfs.block.size", 4096);
+    conf.setLong("dfs.read.prefetch.size", 4096);
+    if (simulatedStorage) {
+      conf.setBoolean("dfs.datanode.simulateddatastorage", true);
+    }
+    if (disableTransferTo) {
+      conf.setBoolean("dfs.datanode.transferTo.allowed", false);
+    }
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
+    FileSystem fileSys = cluster.getFileSystem();
+    try {
+      Path file1 = new Path("preadtest.dat");
+      writeFile(fileSys, file1);
+      pReadFile(fileSys, file1);
+      cleanupFile(fileSys, file1);
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+  }
+  
+  public void testPreadDFSSimulated() throws IOException {
+    simulatedStorage = true;
+    testPreadDFS();
+    simulatedStorage = true;
+  }
+  
+  /**
+   * Tests positional read in LocalFS.
+   */
+  public void testPreadLocalFS() throws IOException {
+    Configuration conf = new Configuration();
+    FileSystem fileSys = FileSystem.getLocal(conf);
+    try {
+      Path file1 = new Path("build/test/data", "preadtest.dat");
+      writeFile(fileSys, file1);
+      pReadFile(fileSys, file1);
+      cleanupFile(fileSys, file1);
+    } finally {
+      fileSys.close();
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    new TestPread().testPreadDFS();
+  }
+}

+ 620 - 0
src/test/org/apache/hadoop/hdfs/TestQuota.java

@@ -0,0 +1,620 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.OutputStream;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
+import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+
+import junit.framework.TestCase;
+
+/** A class for testing quota-related commands */
+public class TestQuota extends TestCase {
+  
+  private void runCommand(DFSAdmin admin, boolean expectError, String... args) 
+                         throws Exception {
+    runCommand(admin, args, expectError);
+  }
+  
+  private void runCommand(DFSAdmin admin, String args[], boolean expectEror)
+  throws Exception {
+    int val = admin.run(args);
+    if (expectEror) {
+      assertEquals(val, -1);
+    } else {
+      assertTrue(val>=0);
+    }
+  }
+  
+  /** Test quota related commands: 
+   *    setQuota, clrQuota, setSpaceQuota, clrSpaceQuota, and count 
+   */
+  public void testQuotaCommands() throws Exception {
+    final Configuration conf = new Configuration();
+    // set a smaller block size so that we can test with smaller 
+    // Space quotas
+    conf.set("dfs.block.size", "512");
+    conf.setBoolean("dfs.support.append", true);
+    final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    final FileSystem fs = cluster.getFileSystem();
+    assertTrue("Not a HDFS: "+fs.getUri(),
+                fs instanceof DistributedFileSystem);
+    final DistributedFileSystem dfs = (DistributedFileSystem)fs;
+    DFSAdmin admin = new DFSAdmin(conf);
+    
+    try {
+      final int fileLen = 1024;
+      final short replication = 5;
+      final long spaceQuota = fileLen * replication * 15 / 8;
+
+      // 1: create a directory /test and set its quota to be 3
+      final Path parent = new Path("/test");
+      assertTrue(dfs.mkdirs(parent));
+      String[] args = new String[]{"-setQuota", "3", parent.toString()};
+      runCommand(admin, args, false);
+
+      //try setting space quota with a 'binary prefix'
+      runCommand(admin, false, "-setSpaceQuota", "2t", parent.toString());
+      assertEquals(2L<<40, dfs.getContentSummary(parent).getSpaceQuota());
+      
+      // set diskspace quota to 10000 
+      runCommand(admin, false, "-setSpaceQuota", 
+                 Long.toString(spaceQuota), parent.toString());
+      
+      // 2: create directory /test/data0
+      final Path childDir0 = new Path(parent, "data0");
+      assertTrue(dfs.mkdirs(childDir0));
+
+      // 3: create a file /test/datafile0
+      final Path childFile0 = new Path(parent, "datafile0");
+      DFSTestUtil.createFile(fs, childFile0, fileLen, replication, 0);
+      
+      // 4: count -q /test
+      ContentSummary c = dfs.getContentSummary(parent);
+      assertEquals(c.getFileCount()+c.getDirectoryCount(), 3);
+      assertEquals(c.getQuota(), 3);
+      assertEquals(c.getSpaceConsumed(), fileLen*replication);
+      assertEquals(c.getSpaceQuota(), spaceQuota);
+      
+      // 5: count -q /test/data0
+      c = dfs.getContentSummary(childDir0);
+      assertEquals(c.getFileCount()+c.getDirectoryCount(), 1);
+      assertEquals(c.getQuota(), -1);
+      // check disk space consumed
+      c = dfs.getContentSummary(parent);
+      assertEquals(c.getSpaceConsumed(), fileLen*replication);
+
+      // 6: create a directory /test/data1
+      final Path childDir1 = new Path(parent, "data1");
+      boolean hasException = false;
+      try {
+        assertFalse(dfs.mkdirs(childDir1));
+      } catch (QuotaExceededException e) {
+        hasException = true;
+      }
+      assertTrue(hasException);
+      
+      OutputStream fout;
+      
+      // 7: create a file /test/datafile1
+      final Path childFile1 = new Path(parent, "datafile1");
+      hasException = false;
+      try {
+        fout = dfs.create(childFile1);
+      } catch (QuotaExceededException e) {
+        hasException = true;
+      }
+      assertTrue(hasException);
+      
+      // 8: clear quota /test
+      runCommand(admin, new String[]{"-clrQuota", parent.toString()}, false);
+      c = dfs.getContentSummary(parent);
+      assertEquals(c.getQuota(), -1);
+      assertEquals(c.getSpaceQuota(), spaceQuota);
+      
+      // 9: clear quota /test/data0
+      runCommand(admin, new String[]{"-clrQuota", childDir0.toString()}, false);
+      c = dfs.getContentSummary(childDir0);
+      assertEquals(c.getQuota(), -1);
+      
+      // 10: create a file /test/datafile1
+      fout = dfs.create(childFile1, replication);
+      
+      // 10.s: but writing fileLen bytes should result in an quota exception
+      hasException = false;
+      try {
+        fout.write(new byte[fileLen]);
+        fout.close();
+      } catch (QuotaExceededException e) {
+        hasException = true;
+        IOUtils.closeStream(fout);
+      }
+      assertTrue(hasException);
+      
+      //delete the file
+      dfs.delete(childFile1, false);
+      
+      // 9.s: clear diskspace quota
+      runCommand(admin, false, "-clrSpaceQuota", parent.toString());
+      c = dfs.getContentSummary(parent);
+      assertEquals(c.getQuota(), -1);
+      assertEquals(c.getSpaceQuota(), -1);       
+      
+      // now creating childFile1 should succeed
+      DFSTestUtil.createFile(dfs, childFile1, fileLen, replication, 0);
+      
+      // 11: set the quota of /test to be 1
+      args = new String[]{"-setQuota", "1", parent.toString()};
+      runCommand(admin, args, true);
+      runCommand(admin, true, "-setSpaceQuota",  // for space quota
+                 Integer.toString(fileLen), args[2]);
+      
+      // 12: set the quota of /test/data0 to be 1
+      args = new String[]{"-setQuota", "1", childDir0.toString()};
+      runCommand(admin, args, false);
+      
+      // 13: not able create a directory under data0
+      hasException = false;
+      try {
+        assertFalse(dfs.mkdirs(new Path(childDir0, "in")));
+      } catch (QuotaExceededException e) {
+        hasException = true;
+      }
+      assertTrue(hasException);
+      c = dfs.getContentSummary(childDir0);
+      assertEquals(c.getDirectoryCount()+c.getFileCount(), 1);
+      assertEquals(c.getQuota(), 1);
+      
+      // 14a: set quota on a non-existent directory
+      Path nonExistentPath = new Path("/test1");
+      assertFalse(dfs.exists(nonExistentPath));
+      args = new String[]{"-setQuota", "1", nonExistentPath.toString()};
+      runCommand(admin, args, true);
+      runCommand(admin, true, "-setSpaceQuota", "1g", // for space quota
+                 nonExistentPath.toString());
+      
+      // 14b: set quota on a file
+      assertTrue(dfs.isFile(childFile0));
+      args[1] = childFile0.toString();
+      runCommand(admin, args, true);
+      // same for space quota
+      runCommand(admin, true, "-setSpaceQuota", "1t", args[1]);
+      
+      // 15a: clear quota on a file
+      args[0] = "-clrQuota";
+      runCommand(admin, args, true);
+      runCommand(admin, true, "-clrSpaceQuota", args[1]);
+      
+      // 15b: clear quota on a non-existent directory
+      args[1] = nonExistentPath.toString();
+      runCommand(admin, args, true);
+      runCommand(admin, true, "-clrSpaceQuota", args[1]);
+      
+      // 16a: set the quota of /test to be 0
+      args = new String[]{"-setQuota", "0", parent.toString()};
+      runCommand(admin, args, true);
+      runCommand(admin, true, "-setSpaceQuota", "0", args[2]);
+      
+      // 16b: set the quota of /test to be -1
+      args[1] = "-1";
+      runCommand(admin, args, true);
+      runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
+      
+      // 16c: set the quota of /test to be Long.MAX_VALUE+1
+      args[1] = String.valueOf(Long.MAX_VALUE+1L);
+      runCommand(admin, args, true);
+      runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
+      
+      // 16d: set the quota of /test to be a non integer
+      args[1] = "33aa1.5";
+      runCommand(admin, args, true);
+      runCommand(admin, true, "-setSpaceQuota", args[1], args[2]);
+      
+      // 16e: set space quota with a value larger than Long.MAX_VALUE
+      runCommand(admin, true, "-setSpaceQuota", 
+                 (Long.MAX_VALUE/1024/1024 + 1024) + "m", args[2]);
+      
+      // 17:  setQuota by a non-administrator
+      UnixUserGroupInformation.saveToConf(conf, 
+          UnixUserGroupInformation.UGI_PROPERTY_NAME, 
+          new UnixUserGroupInformation(new String[]{"userxx\n", "groupyy\n"}));
+      DFSAdmin userAdmin = new DFSAdmin(conf);
+      args[1] = "100";
+      runCommand(userAdmin, args, true);
+      runCommand(userAdmin, true, "-setSpaceQuota", "1g", args[2]);
+      
+      // 18: clrQuota by a non-administrator
+      args = new String[] {"-clrQuota", parent.toString()};
+      runCommand(userAdmin, args, true);
+      runCommand(userAdmin, true, "-clrSpaceQuota",  args[1]);      
+    } finally {
+      cluster.shutdown();
+    }
+  }
+  
+  /** Test commands that change the size of the name space:
+   *  mkdirs, rename, and delete */
+  public void testNamespaceCommands() throws Exception {
+    final Configuration conf = new Configuration();
+    final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    final FileSystem fs = cluster.getFileSystem();
+    assertTrue("Not a HDFS: "+fs.getUri(),
+                fs instanceof DistributedFileSystem);
+    final DistributedFileSystem dfs = (DistributedFileSystem)fs;
+    
+    try {
+      // 1: create directory /nqdir0/qdir1/qdir20/nqdir30
+      assertTrue(dfs.mkdirs(new Path("/nqdir0/qdir1/qdir20/nqdir30")));
+
+      // 2: set the quota of /nqdir0/qdir1 to be 6
+      final Path quotaDir1 = new Path("/nqdir0/qdir1");
+      dfs.setQuota(quotaDir1, 6, FSConstants.QUOTA_DONT_SET);
+      ContentSummary c = dfs.getContentSummary(quotaDir1);
+      assertEquals(c.getDirectoryCount(), 3);
+      assertEquals(c.getQuota(), 6);
+
+      // 3: set the quota of /nqdir0/qdir1/qdir20 to be 7
+      final Path quotaDir2 = new Path("/nqdir0/qdir1/qdir20");
+      dfs.setQuota(quotaDir2, 7, FSConstants.QUOTA_DONT_SET);
+      c = dfs.getContentSummary(quotaDir2);
+      assertEquals(c.getDirectoryCount(), 2);
+      assertEquals(c.getQuota(), 7);
+
+      // 4: Create directory /nqdir0/qdir1/qdir21 and set its quota to 2
+      final Path quotaDir3 = new Path("/nqdir0/qdir1/qdir21");
+      assertTrue(dfs.mkdirs(quotaDir3));
+      dfs.setQuota(quotaDir3, 2, FSConstants.QUOTA_DONT_SET);
+      c = dfs.getContentSummary(quotaDir3);
+      assertEquals(c.getDirectoryCount(), 1);
+      assertEquals(c.getQuota(), 2);
+
+      // 5: Create directory /nqdir0/qdir1/qdir21/nqdir32
+      Path tempPath = new Path(quotaDir3, "nqdir32");
+      assertTrue(dfs.mkdirs(tempPath));
+      c = dfs.getContentSummary(quotaDir3);
+      assertEquals(c.getDirectoryCount(), 2);
+      assertEquals(c.getQuota(), 2);
+
+      // 6: Create directory /nqdir0/qdir1/qdir21/nqdir33
+      tempPath = new Path(quotaDir3, "nqdir33");
+      boolean hasException = false;
+      try {
+        assertFalse(dfs.mkdirs(tempPath));
+      } catch (QuotaExceededException e) {
+        hasException = true;
+      }
+      assertTrue(hasException);
+      c = dfs.getContentSummary(quotaDir3);
+      assertEquals(c.getDirectoryCount(), 2);
+      assertEquals(c.getQuota(), 2);
+
+      // 7: Create directory /nqdir0/qdir1/qdir20/nqdir31
+      tempPath = new Path(quotaDir2, "nqdir31");
+      assertTrue(dfs.mkdirs(tempPath));
+      c = dfs.getContentSummary(quotaDir2);
+      assertEquals(c.getDirectoryCount(), 3);
+      assertEquals(c.getQuota(), 7);
+      c = dfs.getContentSummary(quotaDir1);
+      assertEquals(c.getDirectoryCount(), 6);
+      assertEquals(c.getQuota(), 6);
+
+      // 8: Create directory /nqdir0/qdir1/qdir20/nqdir33
+      tempPath = new Path(quotaDir2, "nqdir33");
+      hasException = false;
+      try {
+        assertFalse(dfs.mkdirs(tempPath));
+      } catch (QuotaExceededException e) {
+        hasException = true;
+      }
+      assertTrue(hasException);
+
+      // 9: Move /nqdir0/qdir1/qdir21/nqdir32 /nqdir0/qdir1/qdir20/nqdir30
+      tempPath = new Path(quotaDir2, "nqdir30");
+      dfs.rename(new Path(quotaDir3, "nqdir32"), tempPath);
+      c = dfs.getContentSummary(quotaDir2);
+      assertEquals(c.getDirectoryCount(), 4);
+      assertEquals(c.getQuota(), 7);
+      c = dfs.getContentSummary(quotaDir1);
+      assertEquals(c.getDirectoryCount(), 6);
+      assertEquals(c.getQuota(), 6);
+
+      // 10: Move /nqdir0/qdir1/qdir20/nqdir30 to /nqdir0/qdir1/qdir21
+      hasException = false;
+      try {
+        assertFalse(dfs.rename(tempPath, quotaDir3));
+      } catch (QuotaExceededException e) {
+        hasException = true;
+      }
+      assertTrue(hasException);
+      assertTrue(dfs.exists(tempPath));
+      assertFalse(dfs.exists(new Path(quotaDir3, "nqdir30")));
+      
+      // 10.a: Rename /nqdir0/qdir1/qdir20/nqdir30 to /nqdir0/qdir1/qdir21/nqdir32
+      hasException = false;
+      try {
+        assertFalse(dfs.rename(tempPath, new Path(quotaDir3, "nqdir32")));
+      } catch (QuotaExceededException e) {
+        hasException = true;
+      }
+      assertTrue(hasException);
+      assertTrue(dfs.exists(tempPath));
+      assertFalse(dfs.exists(new Path(quotaDir3, "nqdir32")));
+
+      // 11: Move /nqdir0/qdir1/qdir20/nqdir30 to /nqdir0
+      assertTrue(dfs.rename(tempPath, new Path("/nqdir0")));
+      c = dfs.getContentSummary(quotaDir2);
+      assertEquals(c.getDirectoryCount(), 2);
+      assertEquals(c.getQuota(), 7);
+      c = dfs.getContentSummary(quotaDir1);
+      assertEquals(c.getDirectoryCount(), 4);
+      assertEquals(c.getQuota(), 6);
+
+      // 12: Create directory /nqdir0/nqdir30/nqdir33
+      assertTrue(dfs.mkdirs(new Path("/nqdir0/nqdir30/nqdir33")));
+
+      // 13: Move /nqdir0/nqdir30 /nqdir0/qdir1/qdir20/qdir30
+      hasException = false;
+      try {
+        assertFalse(dfs.rename(new Path("/nqdir0/nqdir30"), tempPath));
+      } catch (QuotaExceededException e) {
+        hasException = true;
+      }
+      assertTrue(hasException);
+
+      // 14: Move /nqdir0/qdir1/qdir21 /nqdir0/qdir1/qdir20
+      assertTrue(dfs.rename(quotaDir3, quotaDir2));
+      c = dfs.getContentSummary(quotaDir1);
+      assertEquals(c.getDirectoryCount(), 4);
+      assertEquals(c.getQuota(), 6);
+      c = dfs.getContentSummary(quotaDir2);
+      assertEquals(c.getDirectoryCount(), 3);
+      assertEquals(c.getQuota(), 7);
+      tempPath = new Path(quotaDir2, "qdir21");
+      c = dfs.getContentSummary(tempPath);
+      assertEquals(c.getDirectoryCount(), 1);
+      assertEquals(c.getQuota(), 2);
+
+      // 15: Delete /nqdir0/qdir1/qdir20/qdir21
+      dfs.delete(tempPath, true);
+      c = dfs.getContentSummary(quotaDir2);
+      assertEquals(c.getDirectoryCount(), 2);
+      assertEquals(c.getQuota(), 7);
+      c = dfs.getContentSummary(quotaDir1);
+      assertEquals(c.getDirectoryCount(), 3);
+      assertEquals(c.getQuota(), 6);
+
+      // 16: Move /nqdir0/qdir30 /nqdir0/qdir1/qdir20
+      assertTrue(dfs.rename(new Path("/nqdir0/nqdir30"), quotaDir2));
+      c = dfs.getContentSummary(quotaDir2);
+      assertEquals(c.getDirectoryCount(), 5);
+      assertEquals(c.getQuota(), 7);
+      c = dfs.getContentSummary(quotaDir1);
+      assertEquals(c.getDirectoryCount(), 6);
+      assertEquals(c.getQuota(), 6);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+  
+  /**
+   * Test HDFS operations that change disk space consumed by a directory tree.
+   * namely create, rename, delete, append, and setReplication.
+   * 
+   * This is based on testNamespaceCommands() above.
+   */
+  public void testSpaceCommands() throws Exception {
+    final Configuration conf = new Configuration();
+    // set a smaller block size so that we can test with smaller 
+    // diskspace quotas
+    conf.set("dfs.block.size", "512");
+    conf.setBoolean("dfs.support.append", true);
+    final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    final FileSystem fs = cluster.getFileSystem();
+    assertTrue("Not a HDFS: "+fs.getUri(),
+                fs instanceof DistributedFileSystem);
+    final DistributedFileSystem dfs = (DistributedFileSystem)fs;
+
+    try {
+      int fileLen = 1024;
+      short replication = 3;
+      int fileSpace = fileLen * replication;
+      
+      // create directory /nqdir0/qdir1/qdir20/nqdir30
+      assertTrue(dfs.mkdirs(new Path("/nqdir0/qdir1/qdir20/nqdir30")));
+
+      // set the quota of /nqdir0/qdir1 to 4 * fileSpace 
+      final Path quotaDir1 = new Path("/nqdir0/qdir1");
+      dfs.setQuota(quotaDir1, FSConstants.QUOTA_DONT_SET, 4 * fileSpace);
+      ContentSummary c = dfs.getContentSummary(quotaDir1);
+      assertEquals(c.getSpaceQuota(), 4 * fileSpace);
+      
+      // set the quota of /nqdir0/qdir1/qdir20 to 6 * fileSpace 
+      final Path quotaDir20 = new Path("/nqdir0/qdir1/qdir20");
+      dfs.setQuota(quotaDir20, FSConstants.QUOTA_DONT_SET, 6 * fileSpace);
+      c = dfs.getContentSummary(quotaDir20);
+      assertEquals(c.getSpaceQuota(), 6 * fileSpace);
+
+
+      // Create /nqdir0/qdir1/qdir21 and set its space quota to 2 * fileSpace
+      final Path quotaDir21 = new Path("/nqdir0/qdir1/qdir21");
+      assertTrue(dfs.mkdirs(quotaDir21));
+      dfs.setQuota(quotaDir21, FSConstants.QUOTA_DONT_SET, 2 * fileSpace);
+      c = dfs.getContentSummary(quotaDir21);
+      assertEquals(c.getSpaceQuota(), 2 * fileSpace);
+
+      // 5: Create directory /nqdir0/qdir1/qdir21/nqdir32
+      Path tempPath = new Path(quotaDir21, "nqdir32");
+      assertTrue(dfs.mkdirs(tempPath));
+      
+      // create a file under nqdir32/fileDir
+      DFSTestUtil.createFile(dfs, new Path(tempPath, "fileDir/file1"), fileLen, 
+                             replication, 0);
+      c = dfs.getContentSummary(quotaDir21);
+      assertEquals(c.getSpaceConsumed(), fileSpace);
+      
+      // Create a larger file /nqdir0/qdir1/qdir21/nqdir33/
+      boolean hasException = false;
+      try {
+        DFSTestUtil.createFile(dfs, new Path(quotaDir21, "nqdir33/file2"), 
+                               2*fileLen, replication, 0);
+      } catch (QuotaExceededException e) {
+        hasException = true;
+      }
+      assertTrue(hasException);
+      // delete nqdir33
+      assertTrue(dfs.delete(new Path(quotaDir21, "nqdir33"), true));
+      c = dfs.getContentSummary(quotaDir21);
+      assertEquals(c.getSpaceConsumed(), fileSpace);
+      assertEquals(c.getSpaceQuota(), 2*fileSpace);
+
+      // Verify space before the move:
+      c = dfs.getContentSummary(quotaDir20);
+      assertEquals(c.getSpaceConsumed(), 0);
+      
+      // Move /nqdir0/qdir1/qdir21/nqdir32 /nqdir0/qdir1/qdir20/nqdir30
+      Path dstPath = new Path(quotaDir20, "nqdir30");
+      Path srcPath = new Path(quotaDir21, "nqdir32");
+      assertTrue(dfs.rename(srcPath, dstPath));
+      
+      // verify space after the move
+      c = dfs.getContentSummary(quotaDir20);
+      assertEquals(c.getSpaceConsumed(), fileSpace);
+      // verify space for its parent
+      c = dfs.getContentSummary(quotaDir1);
+      assertEquals(c.getSpaceConsumed(), fileSpace);
+      // verify space for source for the move
+      c = dfs.getContentSummary(quotaDir21);
+      assertEquals(c.getSpaceConsumed(), 0);
+      
+      final Path file2 = new Path(dstPath, "fileDir/file2");
+      int file2Len = 2 * fileLen;
+      // create a larger file under /nqdir0/qdir1/qdir20/nqdir30
+      DFSTestUtil.createFile(dfs, file2, file2Len, replication, 0);
+      
+      c = dfs.getContentSummary(quotaDir20);
+      assertEquals(c.getSpaceConsumed(), 3 * fileSpace);
+      c = dfs.getContentSummary(quotaDir21);
+      assertEquals(c.getSpaceConsumed(), 0);
+      
+      // Reverse: Move /nqdir0/qdir1/qdir20/nqdir30 to /nqdir0/qdir1/qdir21/
+      hasException = false;
+      try {
+        assertFalse(dfs.rename(dstPath, srcPath));
+      } catch (QuotaExceededException e) {
+        hasException = true;
+      }
+      assertTrue(hasException);
+      
+      // make sure no intermediate directories left by failed rename
+      assertFalse(dfs.exists(srcPath));
+      // directory should exist
+      assertTrue(dfs.exists(dstPath));
+            
+      // verify space after the failed move
+      c = dfs.getContentSummary(quotaDir20);
+      assertEquals(c.getSpaceConsumed(), 3 * fileSpace);
+      c = dfs.getContentSummary(quotaDir21);
+      assertEquals(c.getSpaceConsumed(), 0);
+      
+      // Test Append :
+      
+      // verify space quota
+      c = dfs.getContentSummary(quotaDir1);
+      assertEquals(c.getSpaceQuota(), 4 * fileSpace);
+      
+      // verify space before append;
+      c = dfs.getContentSummary(dstPath);
+      assertEquals(c.getSpaceConsumed(), 3 * fileSpace);
+      
+      OutputStream out = dfs.append(file2);
+      // appending 1 fileLen should succeed
+      out.write(new byte[fileLen]);
+      out.close();
+      
+      file2Len += fileLen; // after append
+      
+      // verify space after append;
+      c = dfs.getContentSummary(dstPath);
+      assertEquals(c.getSpaceConsumed(), 4 * fileSpace);
+      
+      // now increase the quota for quotaDir1
+      dfs.setQuota(quotaDir1, FSConstants.QUOTA_DONT_SET, 5 * fileSpace);
+      // Now, appending more than 1 fileLen should result in an error
+      out = dfs.append(file2);
+      hasException = false;
+      try {
+        out.write(new byte[fileLen + 1024]);
+        out.flush();
+        out.close();
+      } catch (QuotaExceededException e) {
+        hasException = true;
+        IOUtils.closeStream(out);
+      }
+      assertTrue(hasException);
+      
+      file2Len += fileLen; // after partial append
+      
+      // verify space after partial append
+      c = dfs.getContentSummary(dstPath);
+      assertEquals(c.getSpaceConsumed(), 5 * fileSpace);
+      
+      // Test set replication :
+      
+      // first reduce the replication
+      dfs.setReplication(file2, (short)(replication-1));
+      
+      // verify that space is reduced by file2Len
+      c = dfs.getContentSummary(dstPath);
+      assertEquals(c.getSpaceConsumed(), 5 * fileSpace - file2Len);
+      
+      // now try to increase the replication and and expect an error.
+      hasException = false;
+      try {
+        dfs.setReplication(file2, (short)(replication+1));
+      } catch (QuotaExceededException e) {
+        hasException = true;
+      }
+      assertTrue(hasException);
+
+      // verify space consumed remains unchanged.
+      c = dfs.getContentSummary(dstPath);
+      assertEquals(c.getSpaceConsumed(), 5 * fileSpace - file2Len);
+      
+      // now increase the quota for quotaDir1 and quotaDir20
+      dfs.setQuota(quotaDir1, FSConstants.QUOTA_DONT_SET, 10 * fileSpace);
+      dfs.setQuota(quotaDir20, FSConstants.QUOTA_DONT_SET, 10 * fileSpace);
+      
+      // then increasing replication should be ok.
+      dfs.setReplication(file2, (short)(replication+1));
+      // verify increase in space
+      c = dfs.getContentSummary(dstPath);
+      assertEquals(c.getSpaceConsumed(), 5 * fileSpace + file2Len);
+      
+    } finally {
+      cluster.shutdown();
+    }
+  }
+}

+ 310 - 0
src/test/org/apache/hadoop/hdfs/TestRenameWhileOpen.java

@@ -0,0 +1,310 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.log4j.Level;
+
+public class TestRenameWhileOpen extends junit.framework.TestCase {
+  {
+    ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ALL);
+    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ALL);
+  }
+
+  /**
+   * open /user/dir1/file1 /user/dir2/file2
+   * mkdir /user/dir3
+   * move /user/dir1 /user/dir3
+   */
+  public void testWhileOpenRenameParent() throws IOException {
+    Configuration conf = new Configuration();
+    final int MAX_IDLE_TIME = 2000; // 2s
+    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
+    conf.setInt("heartbeat.recheck.interval", 1000);
+    conf.setInt("dfs.heartbeat.interval", 1);
+    conf.setInt("dfs.safemode.threshold.pct", 1);
+    conf.setBoolean("dfs.support.append", true);
+
+    // create cluster
+    System.out.println("Test 1*****************************");
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fs = null;
+    try {
+      cluster.waitActive();
+      fs = cluster.getFileSystem();
+      final int nnport = cluster.getNameNodePort();
+
+      // create file1.
+      Path dir1 = new Path("/user/a+b/dir1");
+      Path file1 = new Path(dir1, "file1");
+      FSDataOutputStream stm1 = TestFileCreation.createFile(fs, file1, 1);
+      System.out.println("testFileCreationDeleteParent: "
+          + "Created file " + file1);
+      TestFileCreation.writeFile(stm1);
+      stm1.sync();
+
+      // create file2.
+      Path dir2 = new Path("/user/dir2");
+      Path file2 = new Path(dir2, "file2");
+      FSDataOutputStream stm2 = TestFileCreation.createFile(fs, file2, 1);
+      System.out.println("testFileCreationDeleteParent: "
+          + "Created file " + file2);
+      TestFileCreation.writeFile(stm2);
+      stm2.sync();
+
+      // move dir1 while file1 is open
+      Path dir3 = new Path("/user/dir3");
+      fs.mkdirs(dir3);
+      fs.rename(dir1, dir3);
+
+      // restart cluster with the same namenode port as before.
+      // This ensures that leases are persisted in fsimage.
+      cluster.shutdown();
+      try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
+      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
+                                   null, null, null);
+      cluster.waitActive();
+
+      // restart cluster yet again. This triggers the code to read in
+      // persistent leases from fsimage.
+      cluster.shutdown();
+      try {Thread.sleep(5000);} catch (InterruptedException e) {}
+      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
+                                   null, null, null);
+      cluster.waitActive();
+      fs = cluster.getFileSystem();
+
+      Path newfile = new Path("/user/dir3/dir1", "file1");
+      assertTrue(!fs.exists(file1));
+      assertTrue(fs.exists(file2));
+      assertTrue(fs.exists(newfile));
+      TestFileCreation.checkFullFile(fs, newfile);
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * open /user/dir1/file1 /user/dir2/file2
+   * move /user/dir1 /user/dir3
+   */
+  public void testWhileOpenRenameParentToNonexistentDir() throws IOException {
+    Configuration conf = new Configuration();
+    final int MAX_IDLE_TIME = 2000; // 2s
+    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
+    conf.setInt("heartbeat.recheck.interval", 1000);
+    conf.setInt("dfs.heartbeat.interval", 1);
+    conf.setInt("dfs.safemode.threshold.pct", 1);
+    conf.setBoolean("dfs.support.append", true);
+    System.out.println("Test 2************************************");
+
+    // create cluster
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fs = null;
+    try {
+      cluster.waitActive();
+      fs = cluster.getFileSystem();
+      final int nnport = cluster.getNameNodePort();
+
+      // create file1.
+      Path dir1 = new Path("/user/dir1");
+      Path file1 = new Path(dir1, "file1");
+      FSDataOutputStream stm1 = TestFileCreation.createFile(fs, file1, 1);
+      System.out.println("testFileCreationDeleteParent: "
+          + "Created file " + file1);
+      TestFileCreation.writeFile(stm1);
+      stm1.sync();
+
+      // create file2.
+      Path dir2 = new Path("/user/dir2");
+      Path file2 = new Path(dir2, "file2");
+      FSDataOutputStream stm2 = TestFileCreation.createFile(fs, file2, 1);
+      System.out.println("testFileCreationDeleteParent: "
+          + "Created file " + file2);
+      TestFileCreation.writeFile(stm2);
+      stm2.sync();
+
+      // move dir1 while file1 is open
+      Path dir3 = new Path("/user/dir3");
+      fs.rename(dir1, dir3);
+
+      // restart cluster with the same namenode port as before.
+      // This ensures that leases are persisted in fsimage.
+      cluster.shutdown();
+      try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
+      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
+                                   null, null, null);
+      cluster.waitActive();
+
+      // restart cluster yet again. This triggers the code to read in
+      // persistent leases from fsimage.
+      cluster.shutdown();
+      try {Thread.sleep(5000);} catch (InterruptedException e) {}
+      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
+                                   null, null, null);
+      cluster.waitActive();
+      fs = cluster.getFileSystem();
+
+      Path newfile = new Path("/user/dir3", "file1");
+      assertTrue(!fs.exists(file1));
+      assertTrue(fs.exists(file2));
+      assertTrue(fs.exists(newfile));
+      TestFileCreation.checkFullFile(fs, newfile);
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * open /user/dir1/file1 
+   * mkdir /user/dir2
+   * move /user/dir1/file1 /user/dir2/
+   */
+  public void testWhileOpenRenameToExistentDirectory() throws IOException {
+    Configuration conf = new Configuration();
+    final int MAX_IDLE_TIME = 2000; // 2s
+    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
+    conf.setInt("heartbeat.recheck.interval", 1000);
+    conf.setInt("dfs.heartbeat.interval", 1);
+    conf.setInt("dfs.safemode.threshold.pct", 1);
+    conf.setBoolean("dfs.support.append", true);
+    System.out.println("Test 3************************************");
+
+    // create cluster
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fs = null;
+    try {
+      cluster.waitActive();
+      fs = cluster.getFileSystem();
+      final int nnport = cluster.getNameNodePort();
+
+      // create file1.
+      Path dir1 = new Path("/user/dir1");
+      Path file1 = new Path(dir1, "file1");
+      FSDataOutputStream stm1 = TestFileCreation.createFile(fs, file1, 1);
+      System.out.println("testFileCreationDeleteParent: " +
+                         "Created file " + file1);
+      TestFileCreation.writeFile(stm1);
+      stm1.sync();
+
+      Path dir2 = new Path("/user/dir2");
+      fs.mkdirs(dir2);
+
+      fs.rename(file1, dir2);
+
+      // restart cluster with the same namenode port as before.
+      // This ensures that leases are persisted in fsimage.
+      cluster.shutdown();
+      try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
+      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
+                                   null, null, null);
+      cluster.waitActive();
+
+      // restart cluster yet again. This triggers the code to read in
+      // persistent leases from fsimage.
+      cluster.shutdown();
+      try {Thread.sleep(5000);} catch (InterruptedException e) {}
+      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
+                                   null, null, null);
+      cluster.waitActive();
+      fs = cluster.getFileSystem();
+
+      Path newfile = new Path("/user/dir2", "file1");
+      assertTrue(!fs.exists(file1));
+      assertTrue(fs.exists(newfile));
+      TestFileCreation.checkFullFile(fs, newfile);
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * open /user/dir1/file1 
+   * move /user/dir1/file1 /user/dir2/
+   */
+  public void testWhileOpenRenameToNonExistentDirectory() throws IOException {
+    Configuration conf = new Configuration();
+    final int MAX_IDLE_TIME = 2000; // 2s
+    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
+    conf.setInt("heartbeat.recheck.interval", 1000);
+    conf.setInt("dfs.heartbeat.interval", 1);
+    conf.setInt("dfs.safemode.threshold.pct", 1);
+    conf.setBoolean("dfs.support.append", true);
+    System.out.println("Test 4************************************");
+
+    // create cluster
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fs = null;
+    try {
+      cluster.waitActive();
+      fs = cluster.getFileSystem();
+      final int nnport = cluster.getNameNodePort();
+
+      // create file1.
+      Path dir1 = new Path("/user/dir1");
+      Path file1 = new Path(dir1, "file1");
+      FSDataOutputStream stm1 = TestFileCreation.createFile(fs, file1, 1);
+      System.out.println("testFileCreationDeleteParent: "
+          + "Created file " + file1);
+      TestFileCreation.writeFile(stm1);
+      stm1.sync();
+
+      Path dir2 = new Path("/user/dir2");
+
+      fs.rename(file1, dir2);
+
+      // restart cluster with the same namenode port as before.
+      // This ensures that leases are persisted in fsimage.
+      cluster.shutdown();
+      try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
+      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
+                                   null, null, null);
+      cluster.waitActive();
+
+      // restart cluster yet again. This triggers the code to read in
+      // persistent leases from fsimage.
+      cluster.shutdown();
+      try {Thread.sleep(5000);} catch (InterruptedException e) {}
+      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
+                                   null, null, null);
+      cluster.waitActive();
+      fs = cluster.getFileSystem();
+
+      Path newfile = new Path("/user", "dir2");
+      assertTrue(!fs.exists(file1));
+      assertTrue(fs.exists(newfile));
+      TestFileCreation.checkFullFile(fs, newfile);
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+  }
+}

+ 453 - 0
src/test/org/apache/hadoop/hdfs/TestReplication.java

@@ -0,0 +1,453 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import junit.framework.TestCase;
+import java.io.*;
+import java.util.Iterator;
+import java.util.Random;
+import java.net.*;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.BlockLocation;
+
+/**
+ * This class tests the replication of a DFS file.
+ */
+public class TestReplication extends TestCase {
+  private static final long seed = 0xDEADBEEFL;
+  private static final int blockSize = 8192;
+  private static final int fileSize = 16384;
+  private static final String racks[] = new String[] {
+    "/d1/r1", "/d1/r1", "/d1/r2", "/d1/r2", "/d1/r2", "/d2/r3", "/d2/r3"
+  };
+  private static final int numDatanodes = racks.length;
+  private static final Log LOG = LogFactory.getLog(
+                                       "org.apache.hadoop.hdfs.TestReplication");
+
+  private void writeFile(FileSystem fileSys, Path name, int repl)
+    throws IOException {
+    // create and write a file that contains three blocks of data
+    FSDataOutputStream stm = fileSys.create(name, true,
+                                            fileSys.getConf().getInt("io.file.buffer.size", 4096),
+                                            (short)repl, (long)blockSize);
+    byte[] buffer = new byte[fileSize];
+    Random rand = new Random(seed);
+    rand.nextBytes(buffer);
+    stm.write(buffer);
+    stm.close();
+  }
+  
+  /* check if there are at least two nodes are on the same rack */
+  private void checkFile(FileSystem fileSys, Path name, int repl)
+    throws IOException {
+    Configuration conf = fileSys.getConf();
+    ClientProtocol namenode = DFSClient.createNamenode(conf);
+      
+    waitForBlockReplication(name.toString(), namenode, 
+                            Math.min(numDatanodes, repl), -1);
+    
+    LocatedBlocks locations = namenode.getBlockLocations(name.toString(),0,
+                                                         Long.MAX_VALUE);
+    FileStatus stat = fileSys.getFileStatus(name);
+    BlockLocation[] blockLocations = fileSys.getFileBlockLocations(stat,0L,
+                                                         Long.MAX_VALUE);
+    // verify that rack locations match
+    assertTrue(blockLocations.length == locations.locatedBlockCount());
+    for (int i = 0; i < blockLocations.length; i++) {
+      LocatedBlock blk = locations.get(i);
+      DatanodeInfo[] datanodes = blk.getLocations();
+      String[] topologyPaths = blockLocations[i].getTopologyPaths();
+      assertTrue(topologyPaths.length == datanodes.length);
+      for (int j = 0; j < topologyPaths.length; j++) {
+        boolean found = false;
+        for (int k = 0; k < racks.length; k++) {
+          if (topologyPaths[j].startsWith(racks[k])) {
+            found = true;
+            break;
+          }
+        }
+        assertTrue(found);
+      }
+    }
+
+    boolean isOnSameRack = true, isNotOnSameRack = true;
+    for (LocatedBlock blk : locations.getLocatedBlocks()) {
+      DatanodeInfo[] datanodes = blk.getLocations();
+      if (datanodes.length <= 1) break;
+      if (datanodes.length == 2) {
+        isNotOnSameRack = !(datanodes[0].getNetworkLocation().equals(
+                                                                     datanodes[1].getNetworkLocation()));
+        break;
+      }
+      isOnSameRack = false;
+      isNotOnSameRack = false;
+      for (int i = 0; i < datanodes.length-1; i++) {
+        LOG.info("datanode "+ i + ": "+ datanodes[i].getName());
+        boolean onRack = false;
+        for( int j=i+1; j<datanodes.length; j++) {
+           if( datanodes[i].getNetworkLocation().equals(
+            datanodes[j].getNetworkLocation()) ) {
+             onRack = true;
+           }
+        }
+        if (onRack) {
+          isOnSameRack = true;
+        }
+        if (!onRack) {
+          isNotOnSameRack = true;                      
+        }
+        if (isOnSameRack && isNotOnSameRack) break;
+      }
+      if (!isOnSameRack || !isNotOnSameRack) break;
+    }
+    assertTrue(isOnSameRack);
+    assertTrue(isNotOnSameRack);
+  }
+  
+  private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
+    assertTrue(fileSys.exists(name));
+    fileSys.delete(name, true);
+    assertTrue(!fileSys.exists(name));
+  }
+
+  /* 
+   * Test if Datanode reports bad blocks during replication request
+   */
+  public void testBadBlockReportOnTransfer() throws Exception {
+    Configuration conf = new Configuration();
+    FileSystem fs = null;
+    DFSClient dfsClient = null;
+    LocatedBlocks blocks = null;
+    int replicaCount = 0;
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    cluster.waitActive();
+    fs = cluster.getFileSystem();
+    dfsClient = new DFSClient(new InetSocketAddress("localhost",
+                              cluster.getNameNodePort()), conf);
+  
+    // Create file with replication factor of 1
+    Path file1 = new Path("/tmp/testBadBlockReportOnTransfer/file1");
+    DFSTestUtil.createFile(fs, file1, 1024, (short)1, 0);
+    DFSTestUtil.waitReplication(fs, file1, (short)1);
+  
+    // Corrupt the block belonging to the created file
+    String block = DFSTestUtil.getFirstBlock(fs, file1).getBlockName();
+    cluster.corruptBlockOnDataNodes(block);
+  
+    // Increase replication factor, this should invoke transfer request
+    // Receiving datanode fails on checksum and reports it to namenode
+    fs.setReplication(file1, (short)2);
+  
+    // Now get block details and check if the block is corrupt
+    blocks = dfsClient.namenode.
+              getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
+    while (blocks.get(0).isCorrupt() != true) {
+      try {
+        LOG.info("Waiting until block is marked as corrupt...");
+        Thread.sleep(1000);
+      } catch (InterruptedException ie) {
+      }
+      blocks = dfsClient.namenode.
+                getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
+    }
+    replicaCount = blocks.get(0).getLocations().length;
+    assertTrue(replicaCount == 1);
+    cluster.shutdown();
+  }
+  
+  /**
+   * Tests replication in DFS.
+   */
+  public void runReplication(boolean simulated) throws IOException {
+    Configuration conf = new Configuration();
+    conf.setBoolean("dfs.replication.considerLoad", false);
+    if (simulated) {
+      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+    }
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, racks);
+    cluster.waitActive();
+    
+    InetSocketAddress addr = new InetSocketAddress("localhost",
+                                                   cluster.getNameNodePort());
+    DFSClient client = new DFSClient(addr, conf);
+    
+    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
+    assertEquals("Number of Datanodes ", numDatanodes, info.length);
+    FileSystem fileSys = cluster.getFileSystem();
+    try {
+      Path file1 = new Path("/smallblocktest.dat");
+      writeFile(fileSys, file1, 3);
+      checkFile(fileSys, file1, 3);
+      cleanupFile(fileSys, file1);
+      writeFile(fileSys, file1, 10);
+      checkFile(fileSys, file1, 10);
+      cleanupFile(fileSys, file1);
+      writeFile(fileSys, file1, 4);
+      checkFile(fileSys, file1, 4);
+      cleanupFile(fileSys, file1);
+      writeFile(fileSys, file1, 1);
+      checkFile(fileSys, file1, 1);
+      cleanupFile(fileSys, file1);
+      writeFile(fileSys, file1, 2);
+      checkFile(fileSys, file1, 2);
+      cleanupFile(fileSys, file1);
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+  }
+
+
+  public void testReplicationSimulatedStorag() throws IOException {
+    runReplication(true);
+  }
+  
+  
+  public void testReplication() throws IOException {
+    runReplication(false);
+  }
+  
+  // Waits for all of the blocks to have expected replication
+  private void waitForBlockReplication(String filename, 
+                                       ClientProtocol namenode,
+                                       int expected, long maxWaitSec) 
+                                       throws IOException {
+    long start = System.currentTimeMillis();
+    
+    //wait for all the blocks to be replicated;
+    LOG.info("Checking for block replication for " + filename);
+    int iters = 0;
+    while (true) {
+      boolean replOk = true;
+      LocatedBlocks blocks = namenode.getBlockLocations(filename, 0, 
+                                                        Long.MAX_VALUE);
+      
+      for (Iterator<LocatedBlock> iter = blocks.getLocatedBlocks().iterator();
+           iter.hasNext();) {
+        LocatedBlock block = iter.next();
+        int actual = block.getLocations().length;
+        if ( actual < expected ) {
+          if (true || iters > 0) {
+            LOG.info("Not enough replicas for " + block.getBlock() +
+                               " yet. Expecting " + expected + ", got " + 
+                               actual + ".");
+          }
+          replOk = false;
+          break;
+        }
+      }
+      
+      if (replOk) {
+        return;
+      }
+      
+      iters++;
+      
+      if (maxWaitSec > 0 && 
+          (System.currentTimeMillis() - start) > (maxWaitSec * 1000)) {
+        throw new IOException("Timedout while waiting for all blocks to " +
+                              " be replicated for " + filename);
+      }
+      
+      try {
+        Thread.sleep(500);
+      } catch (InterruptedException ignored) {}
+    }
+  }
+  
+  /* This test makes sure that NameNode retries all the available blocks 
+   * for under replicated blocks. 
+   * 
+   * It creates a file with one block and replication of 4. It corrupts 
+   * two of the blocks and removes one of the replicas. Expected behaviour is
+   * that missing replica will be copied from one valid source.
+   */
+  public void testPendingReplicationRetry() throws IOException {
+    
+    MiniDFSCluster cluster = null;
+    int numDataNodes = 4;
+    String testFile = "/replication-test-file";
+    Path testPath = new Path(testFile);
+    
+    byte buffer[] = new byte[1024];
+    for (int i=0; i<buffer.length; i++) {
+      buffer[i] = '1';
+    }
+    
+    try {
+      Configuration conf = new Configuration();
+      conf.set("dfs.replication", Integer.toString(numDataNodes));
+      //first time format
+      cluster = new MiniDFSCluster(0, conf, numDataNodes, true,
+                                   true, null, null);
+      cluster.waitActive();
+      DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
+                                            cluster.getNameNodePort()),
+                                            conf);
+      
+      OutputStream out = cluster.getFileSystem().create(testPath);
+      out.write(buffer);
+      out.close();
+      
+      waitForBlockReplication(testFile, dfsClient.namenode, numDataNodes, -1);
+
+      // get first block of the file.
+      String block = dfsClient.namenode.
+                       getBlockLocations(testFile, 0, Long.MAX_VALUE).
+                       get(0).getBlock().getBlockName();
+      
+      cluster.shutdown();
+      cluster = null;
+      
+      //Now mess up some of the replicas.
+      //Delete the first and corrupt the next two.
+      File baseDir = new File(System.getProperty("test.build.data"), 
+                                                 "dfs/data");
+      for (int i=0; i<25; i++) {
+        buffer[i] = '0';
+      }
+      
+      int fileCount = 0;
+      for (int i=0; i<6; i++) {
+        File blockFile = new File(baseDir, "data" + (i+1) + "/current/" + block);
+        LOG.info("Checking for file " + blockFile);
+        
+        if (blockFile.exists()) {
+          if (fileCount == 0) {
+            LOG.info("Deleting file " + blockFile);
+            assertTrue(blockFile.delete());
+          } else {
+            // corrupt it.
+            LOG.info("Corrupting file " + blockFile);
+            long len = blockFile.length();
+            assertTrue(len > 50);
+            RandomAccessFile blockOut = new RandomAccessFile(blockFile, "rw");
+            try {
+              blockOut.seek(len/3);
+              blockOut.write(buffer, 0, 25);
+            } finally {
+              blockOut.close();
+            }
+          }
+          fileCount++;
+        }
+      }
+      assertEquals(3, fileCount);
+      
+      /* Start the MiniDFSCluster with more datanodes since once a writeBlock
+       * to a datanode node fails, same block can not be written to it
+       * immediately. In our case some replication attempts will fail.
+       */
+      
+      LOG.info("Restarting minicluster after deleting a replica and corrupting 2 crcs");
+      conf = new Configuration();
+      conf.set("dfs.replication", Integer.toString(numDataNodes));
+      conf.set("dfs.replication.pending.timeout.sec", Integer.toString(2));
+      conf.set("dfs.datanode.block.write.timeout.sec", Integer.toString(5));
+      conf.set("dfs.safemode.threshold.pct", "0.75f"); // only 3 copies exist
+      
+      cluster = new MiniDFSCluster(0, conf, numDataNodes*2, false,
+                                   true, null, null);
+      cluster.waitActive();
+      
+      dfsClient = new DFSClient(new InetSocketAddress("localhost",
+                                  cluster.getNameNodePort()),
+                                  conf);
+      
+      waitForBlockReplication(testFile, dfsClient.namenode, numDataNodes, -1);
+      
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }  
+  }
+  
+  /**
+   * Test if replication can detect mismatched length on-disk blocks
+   * @throws Exception
+   */
+  public void testReplicateLenMismatchedBlock() throws Exception {
+    MiniDFSCluster cluster = new MiniDFSCluster(new Configuration(), 2, true, null);
+    try {
+      cluster.waitActive();
+      // test truncated block
+      changeBlockLen(cluster, -1);
+      // test extended block
+      changeBlockLen(cluster, 1);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+  
+  private void changeBlockLen(MiniDFSCluster cluster, 
+      int lenDelta) throws IOException, InterruptedException {
+    final Path fileName = new Path("/file1");
+    final short REPLICATION_FACTOR = (short)1;
+    final FileSystem fs = cluster.getFileSystem();
+    final int fileLen = fs.getConf().getInt("io.bytes.per.checksum", 512);
+    DFSTestUtil.createFile(fs, fileName, fileLen, REPLICATION_FACTOR, 0);
+    DFSTestUtil.waitReplication(fs, fileName, REPLICATION_FACTOR);
+
+    String block = DFSTestUtil.getFirstBlock(fs, fileName).getBlockName();
+
+    // Change the length of a replica
+    for (int i=0; i<cluster.getDataNodes().size(); i++) {
+      if (TestDatanodeBlockScanner.changeReplicaLength(block, i, lenDelta)) {
+        break;
+      }
+    }
+
+    // increase the file's replication factor
+    fs.setReplication(fileName, (short)(REPLICATION_FACTOR+1));
+
+    // block replication triggers corrupt block detection
+    DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", 
+        cluster.getNameNodePort()), fs.getConf());
+    LocatedBlocks blocks = dfsClient.namenode.getBlockLocations(
+        fileName.toString(), 0, fileLen);
+    if (lenDelta < 0) { // replica truncated
+    	while (!blocks.get(0).isCorrupt() || 
+    			REPLICATION_FACTOR != blocks.get(0).getLocations().length) {
+    		Thread.sleep(100);
+    		blocks = dfsClient.namenode.getBlockLocations(
+    				fileName.toString(), 0, fileLen);
+    	}
+    } else { // no corruption detected; block replicated
+    	while (REPLICATION_FACTOR+1 != blocks.get(0).getLocations().length) {
+    		Thread.sleep(100);
+    		blocks = dfsClient.namenode.getBlockLocations(
+    				fileName.toString(), 0, fileLen);
+    	}
+    }
+    fs.delete(fileName, true);
+  }
+}

+ 81 - 0
src/test/org/apache/hadoop/hdfs/TestRestartDFS.java

@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+/**
+ * A JUnit test for checking if restarting DFS preserves integrity.
+ */
+public class TestRestartDFS extends TestCase {
+  /** check if DFS remains in proper condition after a restart */
+  public void testRestartDFS() throws Exception {
+    final Configuration conf = new Configuration();
+    MiniDFSCluster cluster = null;
+    DFSTestUtil files = new DFSTestUtil("TestRestartDFS", 20, 3, 8*1024);
+
+    final String dir = "/srcdat";
+    final Path rootpath = new Path("/");
+    final Path dirpath = new Path(dir);
+
+    long rootmtime;
+    FileStatus rootstatus;
+    FileStatus dirstatus;
+
+    try {
+      cluster = new MiniDFSCluster(conf, 4, true, null);
+      FileSystem fs = cluster.getFileSystem();
+      files.createFiles(fs, dir);
+
+      rootmtime = fs.getFileStatus(rootpath).getModificationTime();
+      rootstatus = fs.getFileStatus(dirpath);
+      dirstatus = fs.getFileStatus(dirpath);
+
+      fs.setOwner(rootpath, rootstatus.getOwner() + "_XXX", null);
+      fs.setOwner(dirpath, null, dirstatus.getGroup() + "_XXX");
+    } finally {
+      if (cluster != null) { cluster.shutdown(); }
+    }
+    try {
+      // Here we restart the MiniDFScluster without formatting namenode
+      cluster = new MiniDFSCluster(conf, 4, false, null);
+      FileSystem fs = cluster.getFileSystem();
+      assertTrue("Filesystem corrupted after restart.",
+                 files.checkFiles(fs, dir));
+
+      final FileStatus newrootstatus = fs.getFileStatus(rootpath);
+      assertEquals(rootmtime, newrootstatus.getModificationTime());
+      assertEquals(rootstatus.getOwner() + "_XXX", newrootstatus.getOwner());
+      assertEquals(rootstatus.getGroup(), newrootstatus.getGroup());
+
+      final FileStatus newdirstatus = fs.getFileStatus(dirpath);
+      assertEquals(dirstatus.getOwner(), newdirstatus.getOwner());
+      assertEquals(dirstatus.getGroup() + "_XXX", newdirstatus.getGroup());
+
+      files.cleanup(fs, dir);
+    } finally {
+      if (cluster != null) { cluster.shutdown(); }
+    }
+  }
+}

+ 111 - 0
src/test/org/apache/hadoop/hdfs/TestSafeMode.java

@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
+
+import junit.framework.TestCase;
+
+/**
+ * Tests to verify safe mode correctness.
+ */
+public class TestSafeMode extends TestCase {
+  
+  static Log LOG = LogFactory.getLog(TestSafeMode.class);
+
+  /**
+   * This test verifies that if SafeMode is manually entered, name-node does not
+   * come out of safe mode even after the startup safe mode conditions are met.
+   * <ol>
+   * <li>Start cluster with 1 data-node.</li>
+   * <li>Create 2 files with replication 1.</li>
+   * <li>Re-start cluster with 0 data-nodes. 
+   * Name-node should stay in automatic safe-mode.</li>
+   * <li>Enter safe mode manually.</li>
+   * <li>Start the data-node.</li>
+   * <li>Wait longer than <tt>dfs.safemode.extension</tt> and 
+   * verify that the name-node is still in safe mode.</li>
+   * </ol>
+   *  
+   * @throws IOException
+   */
+  public void testManualSafeMode() throws IOException {
+    MiniDFSCluster cluster = null;
+    DistributedFileSystem fs = null;
+    try {
+      Configuration conf = new Configuration();
+      // disable safemode extension to make the test run faster.
+      conf.set("dfs.safemode.extension", "1");
+      cluster = new MiniDFSCluster(conf, 1, true, null);
+      cluster.waitActive();
+      
+      fs = (DistributedFileSystem)cluster.getFileSystem();
+      Path file1 = new Path("/tmp/testManualSafeMode/file1");
+      Path file2 = new Path("/tmp/testManualSafeMode/file2");
+      
+      LOG.info("Created file1 and file2.");
+      
+      // create two files with one block each.
+      DFSTestUtil.createFile(fs, file1, 1000, (short)1, 0);
+      DFSTestUtil.createFile(fs, file2, 2000, (short)1, 0);
+      fs.close();
+      cluster.shutdown();
+      
+      // now bring up just the NameNode.
+      cluster = new MiniDFSCluster(conf, 0, false, null);
+      cluster.waitActive();
+      fs = (DistributedFileSystem)cluster.getFileSystem();
+      
+      LOG.info("Restarted cluster with just the NameNode");
+      
+      assertTrue("No datanode is started. Should be in SafeMode", 
+                 fs.setSafeMode(SafeModeAction.SAFEMODE_GET));
+      
+      // manually set safemode.
+      fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+      
+      // now bring up the datanode and wait for it to be active.
+      cluster.startDataNodes(conf, 1, true, null, null);
+      cluster.waitActive();
+      
+      LOG.info("Datanode is started.");
+
+      // wait longer than dfs.safemode.extension
+      try {
+        Thread.sleep(2000);
+      } catch (InterruptedException ignored) {}
+      
+      assertTrue("should still be in SafeMode",
+          fs.setSafeMode(SafeModeAction.SAFEMODE_GET));
+      
+      fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+      assertFalse("should not be in SafeMode",
+          fs.setSafeMode(SafeModeAction.SAFEMODE_GET));
+    } finally {
+      if(fs != null) fs.close();
+      if(cluster!= null) cluster.shutdown();
+    }
+  }
+}

+ 156 - 0
src/test/org/apache/hadoop/hdfs/TestSeekBug.java

@@ -0,0 +1,156 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ChecksumFileSystem;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+
+/**
+ * This class tests the presence of seek bug as described
+ * in HADOOP-508 
+ */
+public class TestSeekBug extends TestCase {
+  static final long seed = 0xDEADBEEFL;
+  static final int ONEMB = 1 << 20;
+  
+  private void writeFile(FileSystem fileSys, Path name) throws IOException {
+    // create and write a file that contains 1MB
+    DataOutputStream stm = fileSys.create(name);
+    byte[] buffer = new byte[ONEMB];
+    Random rand = new Random(seed);
+    rand.nextBytes(buffer);
+    stm.write(buffer);
+    stm.close();
+  }
+  
+  private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
+    for (int idx = 0; idx < actual.length; idx++) {
+      this.assertEquals(message+" byte "+(from+idx)+" differs. expected "+
+                        expected[from+idx]+" actual "+actual[idx],
+                        actual[idx], expected[from+idx]);
+      actual[idx] = 0;
+    }
+  }
+  
+  private void seekReadFile(FileSystem fileSys, Path name) throws IOException {
+    FSDataInputStream stm = fileSys.open(name, 4096);
+    byte[] expected = new byte[ONEMB];
+    Random rand = new Random(seed);
+    rand.nextBytes(expected);
+    
+    // First read 128 bytes to set count in BufferedInputStream
+    byte[] actual = new byte[128];
+    stm.read(actual, 0, actual.length);
+    // Now read a byte array that is bigger than the internal buffer
+    actual = new byte[100000];
+    stm.read(actual, 0, actual.length);
+    checkAndEraseData(actual, 128, expected, "First Read Test");
+    // now do a small seek, within the range that is already read
+    stm.seek(96036); // 4 byte seek
+    actual = new byte[128];
+    stm.read(actual, 0, actual.length);
+    checkAndEraseData(actual, 96036, expected, "Seek Bug");
+    // all done
+    stm.close();
+  }
+
+  /*
+   * Read some data, skip a few bytes and read more. HADOOP-922.
+   */
+  private void smallReadSeek(FileSystem fileSys, Path name) throws IOException {
+    if (fileSys instanceof ChecksumFileSystem) {
+      fileSys = ((ChecksumFileSystem)fileSys).getRawFileSystem();
+    }
+    // Make the buffer size small to trigger code for HADOOP-922
+    FSDataInputStream stmRaw = fileSys.open(name, 1);
+    byte[] expected = new byte[ONEMB];
+    Random rand = new Random(seed);
+    rand.nextBytes(expected);
+    
+    // Issue a simple read first.
+    byte[] actual = new byte[128];
+    stmRaw.seek(100000);
+    stmRaw.read(actual, 0, actual.length);
+    checkAndEraseData(actual, 100000, expected, "First Small Read Test");
+
+    // now do a small seek of 4 bytes, within the same block.
+    int newpos1 = 100000 + 128 + 4;
+    stmRaw.seek(newpos1);
+    stmRaw.read(actual, 0, actual.length);
+    checkAndEraseData(actual, newpos1, expected, "Small Seek Bug 1");
+
+    // seek another 256 bytes this time
+    int newpos2 = newpos1 + 256;
+    stmRaw.seek(newpos2);
+    stmRaw.read(actual, 0, actual.length);
+    checkAndEraseData(actual, newpos2, expected, "Small Seek Bug 2");
+
+    // all done
+    stmRaw.close();
+  }
+  
+  private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
+    assertTrue(fileSys.exists(name));
+    fileSys.delete(name, true);
+    assertTrue(!fileSys.exists(name));
+  }
+  
+  /**
+   * Test if the seek bug exists in FSDataInputStream in DFS.
+   */
+  public void testSeekBugDFS() throws IOException {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fileSys = cluster.getFileSystem();
+    try {
+      Path file1 = new Path("seektest.dat");
+      writeFile(fileSys, file1);
+      seekReadFile(fileSys, file1);
+      smallReadSeek(fileSys, file1);
+      cleanupFile(fileSys, file1);
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+  }
+  
+  /**
+   * Tests if the seek bug exists in FSDataInputStream in LocalFS.
+   */
+  public void testSeekBugLocalFS() throws IOException {
+    Configuration conf = new Configuration();
+    FileSystem fileSys = FileSystem.getLocal(conf);
+    try {
+      Path file1 = new Path("build/test/data", "seektest.dat");
+      writeFile(fileSys, file1);
+      seekReadFile(fileSys, file1);
+      cleanupFile(fileSys, file1);
+    } finally {
+      fileSys.close();
+    }
+  }
+}

+ 189 - 0
src/test/org/apache/hadoop/hdfs/TestSetTimes.java

@@ -0,0 +1,189 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import junit.framework.TestCase;
+import java.io.*;
+import java.util.Random;
+import java.net.*;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileStatus;
+import java.text.SimpleDateFormat;
+import java.util.Date;
+
+/**
+ * This class tests the access time on files.
+ *
+ */
+public class TestSetTimes extends TestCase {
+  static final long seed = 0xDEADBEEFL;
+  static final int blockSize = 8192;
+  static final int fileSize = 16384;
+  static final int numDatanodes = 1;
+
+  static final SimpleDateFormat dateForm = new SimpleDateFormat("yyyy-MM-dd HH:mm");
+
+  Random myrand = new Random();
+  Path hostsFile;
+  Path excludeFile;
+
+  private FSDataOutputStream writeFile(FileSystem fileSys, Path name, int repl)
+    throws IOException {
+    FSDataOutputStream stm = fileSys.create(name, true, 
+                                            fileSys.getConf().getInt("io.file.buffer.size", 4096),
+                                            (short)repl, (long)blockSize);
+    byte[] buffer = new byte[fileSize];
+    Random rand = new Random(seed);
+    rand.nextBytes(buffer);
+    stm.write(buffer);
+    return stm;
+  }
+  
+  private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
+    assertTrue(fileSys.exists(name));
+    fileSys.delete(name, true);
+    assertTrue(!fileSys.exists(name));
+  }
+
+  private void printDatanodeReport(DatanodeInfo[] info) {
+    System.out.println("-------------------------------------------------");
+    for (int i = 0; i < info.length; i++) {
+      System.out.println(info[i].getDatanodeReport());
+      System.out.println();
+    }
+  }
+
+  /**
+   * Tests mod & access time in DFS.
+   */
+  public void testTimes() throws IOException {
+    Configuration conf = new Configuration();
+    final int MAX_IDLE_TIME = 2000; // 2s
+    conf.setInt("ipc.client.connection.maxidletime", MAX_IDLE_TIME);
+    conf.setInt("heartbeat.recheck.interval", 1000);
+    conf.setInt("dfs.heartbeat.interval", 1);
+
+
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
+    cluster.waitActive();
+    final int nnport = cluster.getNameNodePort();
+    InetSocketAddress addr = new InetSocketAddress("localhost", 
+                                                   cluster.getNameNodePort());
+    DFSClient client = new DFSClient(addr, conf);
+    DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
+    assertEquals("Number of Datanodes ", numDatanodes, info.length);
+    FileSystem fileSys = cluster.getFileSystem();
+    int replicas = 1;
+    assertTrue(fileSys instanceof DistributedFileSystem);
+
+    try {
+      //
+      // create file and record atime/mtime
+      //
+      System.out.println("Creating testdir1 and testdir1/test1.dat.");
+      Path dir1 = new Path("testdir1");
+      Path file1 = new Path(dir1, "test1.dat");
+      FSDataOutputStream stm = writeFile(fileSys, file1, replicas);
+      FileStatus stat = fileSys.getFileStatus(file1);
+      long atimeBeforeClose = stat.getAccessTime();
+      String adate = dateForm.format(new Date(atimeBeforeClose));
+      System.out.println("atime on " + file1 + " before close is " + 
+                         adate + " (" + atimeBeforeClose + ")");
+      assertTrue(atimeBeforeClose != 0);
+      stm.close();
+
+      stat = fileSys.getFileStatus(file1);
+      long atime1 = stat.getAccessTime();
+      long mtime1 = stat.getModificationTime();
+      adate = dateForm.format(new Date(atime1));
+      String mdate = dateForm.format(new Date(mtime1));
+      System.out.println("atime on " + file1 + " is " + adate + 
+                         " (" + atime1 + ")");
+      System.out.println("mtime on " + file1 + " is " + mdate + 
+                         " (" + mtime1 + ")");
+      assertTrue(atime1 != 0);
+
+      //
+      // record dir times
+      //
+      stat = fileSys.getFileStatus(dir1);
+      long mdir1 = stat.getAccessTime();
+      assertTrue(mdir1 == 0);
+
+      // set the access time to be one day in the past
+      long atime2 = atime1 - (24L * 3600L * 1000L);
+      fileSys.setTimes(file1, -1, atime2);
+
+      // check new access time on file
+      stat = fileSys.getFileStatus(file1);
+      long atime3 = stat.getAccessTime();
+      String adate3 = dateForm.format(new Date(atime3));
+      System.out.println("new atime on " + file1 + " is " + 
+                         adate3 + " (" + atime3 + ")");
+      assertTrue(atime2 == atime3);
+      assertTrue(mtime1 == stat.getModificationTime());
+
+      // set the modification time to be 1 hour in the past
+      long mtime2 = mtime1 - (3600L * 1000L);
+      fileSys.setTimes(file1, mtime2, -1);
+
+      // check new modification time on file
+      stat = fileSys.getFileStatus(file1);
+      long mtime3 = stat.getModificationTime();
+      String mdate3 = dateForm.format(new Date(mtime3));
+      System.out.println("new mtime on " + file1 + " is " + 
+                         mdate3 + " (" + mtime3 + ")");
+      assertTrue(atime2 == stat.getAccessTime());
+      assertTrue(mtime2 == mtime3);
+
+      // shutdown cluster and restart
+      cluster.shutdown();
+      try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
+      cluster = new MiniDFSCluster(nnport, conf, 1, false, true,
+                                   null, null, null);
+      cluster.waitActive();
+      fileSys = cluster.getFileSystem();
+
+      // verify that access times and modification times persist after a
+      // cluster restart.
+      System.out.println("Verifying times after cluster restart");
+      stat = fileSys.getFileStatus(file1);
+      assertTrue(atime2 == stat.getAccessTime());
+      assertTrue(mtime3 == stat.getModificationTime());
+    
+      cleanupFile(fileSys, file1);
+      cleanupFile(fileSys, dir1);
+    } catch (IOException e) {
+      info = client.datanodeReport(DatanodeReportType.ALL);
+      printDatanodeReport(info);
+      throw e;
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    new TestSetTimes().testTimes();
+  }
+}

+ 28 - 0
src/test/org/apache/hadoop/hdfs/TestSetrepDecreasing.java

@@ -0,0 +1,28 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+public class TestSetrepDecreasing extends TestCase {
+  public void testSetrepDecreasing() throws IOException {
+    TestSetrepIncreasing.setrep(5, 3, false);
+  }
+}

+ 77 - 0
src/test/org/apache/hadoop/hdfs/TestSetrepIncreasing.java

@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import junit.framework.TestCase;
+import java.io.*;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+
+public class TestSetrepIncreasing extends TestCase {
+  static void setrep(int fromREP, int toREP, boolean simulatedStorage) throws IOException {
+    Configuration conf = new Configuration();
+    if (simulatedStorage) {
+      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+    }
+    conf.set("dfs.replication", "" + fromREP);
+    conf.setLong("dfs.blockreport.intervalMsec", 1000L);
+    conf.set("dfs.replication.pending.timeout.sec", Integer.toString(2));
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 10, true, null);
+    FileSystem fs = cluster.getFileSystem();
+    assertTrue("Not a HDFS: "+fs.getUri(), fs instanceof DistributedFileSystem);
+
+    try {
+      Path root = TestDFSShell.mkdir(fs, 
+          new Path("/test/setrep" + fromREP + "-" + toREP));
+      Path f = TestDFSShell.writeFile(fs, new Path(root, "foo"));
+      
+      // Verify setrep for changing replication
+      {
+        String[] args = {"-setrep", "-w", "" + toREP, "" + f};
+        FsShell shell = new FsShell();
+        shell.setConf(conf);
+        try {
+          assertEquals(0, shell.run(args));
+        } catch (Exception e) {
+          assertTrue("-setrep " + e, false);
+        }
+      }
+
+      //get fs again since the old one may be closed
+      fs = cluster.getFileSystem();
+      FileStatus file = fs.getFileStatus(f);
+      long len = file.getLen();
+      for(BlockLocation locations : fs.getFileBlockLocations(file, 0, len)) {
+        assertTrue(locations.getHosts().length == toREP);
+      }
+      TestDFSShell.show("done setrep waiting: " + root);
+    } finally {
+      try {fs.close();} catch (Exception e) {}
+      cluster.shutdown();
+    }
+  }
+
+  public void testSetrepIncreasing() throws IOException {
+    setrep(3, 7, false);
+  }
+  public void testSetrepIncreasingSimulatedStorage() throws IOException {
+    setrep(3, 7, true);
+  }
+}

+ 115 - 0
src/test/org/apache/hadoop/hdfs/TestSmallBlock.java

@@ -0,0 +1,115 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import junit.framework.TestCase;
+import java.io.*;
+import java.util.Random;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+
+/**
+ * This class tests the creation of files with block-size
+ * smaller than the default buffer size of 4K.
+ */
+public class TestSmallBlock extends TestCase {
+  static final long seed = 0xDEADBEEFL;
+  static final int blockSize = 1;
+  static final int fileSize = 20;
+  boolean simulatedStorage = false;
+
+  private void writeFile(FileSystem fileSys, Path name) throws IOException {
+    // create and write a file that contains three blocks of data
+    FSDataOutputStream stm = fileSys.create(name, true, 
+                                            fileSys.getConf().getInt("io.file.buffer.size", 4096),
+                                            (short)1, (long)blockSize);
+    byte[] buffer = new byte[fileSize];
+    Random rand = new Random(seed);
+    rand.nextBytes(buffer);
+    stm.write(buffer);
+    stm.close();
+  }
+  
+  private void checkAndEraseData(byte[] actual, int from, byte[] expected, String message) {
+    for (int idx = 0; idx < actual.length; idx++) {
+      this.assertEquals(message+" byte "+(from+idx)+" differs. expected "+
+                        expected[from+idx]+" actual "+actual[idx],
+                        actual[idx], expected[from+idx]);
+      actual[idx] = 0;
+    }
+  }
+  
+  private void checkFile(FileSystem fileSys, Path name) throws IOException {
+    BlockLocation[] locations = fileSys.getFileBlockLocations(
+        fileSys.getFileStatus(name), 0, fileSize);
+    assertEquals("Number of blocks", fileSize, locations.length);
+    FSDataInputStream stm = fileSys.open(name);
+    byte[] expected = new byte[fileSize];
+    if (simulatedStorage) {
+      for (int i = 0; i < expected.length; ++i) {  
+        expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE;
+      }
+    } else {
+      Random rand = new Random(seed);
+      rand.nextBytes(expected);
+    }
+    // do a sanity check. Read the file
+    byte[] actual = new byte[fileSize];
+    stm.readFully(0, actual);
+    checkAndEraseData(actual, 0, expected, "Read Sanity Test");
+    stm.close();
+  }
+  
+  private void cleanupFile(FileSystem fileSys, Path name) throws IOException {
+    assertTrue(fileSys.exists(name));
+    fileSys.delete(name, true);
+    assertTrue(!fileSys.exists(name));
+  }
+  
+  /**
+   * Tests small block size in in DFS.
+   */
+  public void testSmallBlock() throws IOException {
+    Configuration conf = new Configuration();
+    if (simulatedStorage) {
+      conf.setBoolean("dfs.datanode.simulateddatastorage", true);
+    }
+    conf.set("io.bytes.per.checksum", "1");
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fileSys = cluster.getFileSystem();
+    try {
+      Path file1 = new Path("smallblocktest.dat");
+      writeFile(fileSys, file1);
+      checkFile(fileSys, file1);
+      cleanupFile(fileSys, file1);
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+  }
+  public void testSmallBlockSimulatedStorage() throws IOException {
+    simulatedStorage = true;
+    testSmallBlock();
+    simulatedStorage = false;
+  }
+}

+ 390 - 0
src/test/org/apache/hadoop/hdfs/UpgradeUtilities.java

@@ -0,0 +1,390 @@
+/*
+ * UpgradeUtilities.java
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.RandomAccessFile;
+import java.util.Arrays;
+import java.util.Random;
+import java.util.zip.CRC32;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+
+import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.NAME_NODE;
+import static org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType.DATA_NODE;
+
+import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.hdfs.server.datanode.DataStorage;
+import org.apache.hadoop.hdfs.server.namenode.FSImage;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+
+/**
+ * This class defines a number of static helper methods used by the
+ * DFS Upgrade unit tests.  By default, a singleton master populated storage
+ * directory is created for a Namenode (contains edits, fsimage,
+ * version, and time files) and a Datanode (contains version and
+ * block files).  The master directories are lazily created.  They are then
+ * copied by the createStorageDirs() method to create new storage
+ * directories of the appropriate type (Namenode or Datanode).
+ */
+public class UpgradeUtilities {
+
+  // Root scratch directory on local filesystem 
+  private static File TEST_ROOT_DIR = new File(
+      System.getProperty("test.build.data","/tmp").replace(' ', '+'));
+  // The singleton master storage directory for Namenode
+  private static File namenodeStorage = new File(TEST_ROOT_DIR, "namenodeMaster");
+  // A checksum of the contents in namenodeStorage directory
+  private static long namenodeStorageChecksum;
+  // The namespaceId of the namenodeStorage directory
+  private static int namenodeStorageNamespaceID;
+  // The fsscTime of the namenodeStorage directory
+  private static long namenodeStorageFsscTime;
+  // The singleton master storage directory for Datanode
+  private static File datanodeStorage = new File(TEST_ROOT_DIR, "datanodeMaster");
+  // A checksum of the contents in datanodeStorage directory
+  private static long datanodeStorageChecksum;
+  
+  /**
+   * Initialize the data structures used by this class.  
+   * IMPORTANT NOTE: This method must be called once before calling 
+   *                 any other public method on this class.  
+   * <p>
+   * Creates a singleton master populated storage
+   * directory for a Namenode (contains edits, fsimage,
+   * version, and time files) and a Datanode (contains version and
+   * block files).  This can be a lengthy operation.
+   */
+  public static void initialize() throws Exception {
+    createEmptyDirs(new String[] {TEST_ROOT_DIR.toString()});
+    Configuration config = new Configuration();
+    config.set("dfs.name.dir", namenodeStorage.toString());
+    config.set("dfs.data.dir", datanodeStorage.toString());
+    MiniDFSCluster cluster = null;
+    try {
+      // format data-node
+      createEmptyDirs(new String[] {datanodeStorage.toString()});
+      
+      // format and start NameNode and start DataNode
+      NameNode.format(config); 
+      cluster = new MiniDFSCluster(config, 1, StartupOption.REGULAR);
+        
+      NameNode namenode = cluster.getNameNode();
+      namenodeStorageNamespaceID = namenode.versionRequest().getNamespaceID();
+      namenodeStorageFsscTime = namenode.versionRequest().getCTime();
+      
+      FileSystem fs = FileSystem.get(config);
+      Path baseDir = new Path("/TestUpgrade");
+      fs.mkdirs(baseDir);
+      
+      // write some files
+      int bufferSize = 4096;
+      byte[] buffer = new byte[bufferSize];
+      for(int i=0; i < bufferSize; i++)
+        buffer[i] = (byte)('0' + i % 50);
+      writeFile(fs, new Path(baseDir, "file1"), buffer, bufferSize);
+      writeFile(fs, new Path(baseDir, "file2"), buffer, bufferSize);
+      
+      // save image
+      namenode.getFSImage().saveFSImage();
+      
+      // write more files
+      writeFile(fs, new Path(baseDir, "file3"), buffer, bufferSize);
+      writeFile(fs, new Path(baseDir, "file4"), buffer, bufferSize);
+    } finally {
+      // shutdown
+      if (cluster != null) cluster.shutdown();
+      FileUtil.fullyDelete(new File(namenodeStorage,"in_use.lock"));
+      FileUtil.fullyDelete(new File(datanodeStorage,"in_use.lock"));
+    }
+    namenodeStorageChecksum = checksumContents(
+                                               NAME_NODE, new File(namenodeStorage,"current"));
+    datanodeStorageChecksum = checksumContents(
+                                               DATA_NODE, new File(datanodeStorage,"current"));
+  }
+  
+  // Private helper method that writes a file to the given file system.
+  private static void writeFile(FileSystem fs, Path path, byte[] buffer,
+                                int bufferSize) throws IOException 
+  {
+    OutputStream out;
+    out = fs.create(path, true, bufferSize, (short) 1, 1024);
+    out.write(buffer, 0, bufferSize);
+    out.close();
+  }
+  
+  /**
+   * Initialize dfs.name.dir and dfs.data.dir with the specified number of
+   * directory entries. Also initialize dfs.blockreport.intervalMsec.
+   */
+  public static Configuration initializeStorageStateConf(int numDirs,
+                                                         Configuration conf) {
+    StringBuffer nameNodeDirs =
+      new StringBuffer(new File(TEST_ROOT_DIR, "name1").toString());
+    StringBuffer dataNodeDirs =
+      new StringBuffer(new File(TEST_ROOT_DIR, "data1").toString());
+    for (int i = 2; i <= numDirs; i++) {
+      nameNodeDirs.append("," + new File(TEST_ROOT_DIR, "name"+i));
+      dataNodeDirs.append("," + new File(TEST_ROOT_DIR, "data"+i));
+    }
+    if (conf == null) {
+      conf = new Configuration();
+    }
+    conf.set("dfs.name.dir", nameNodeDirs.toString());
+    conf.set("dfs.data.dir", dataNodeDirs.toString());
+    conf.setInt("dfs.blockreport.intervalMsec", 10000);
+    return conf;
+  }
+  
+  /**
+   * Create empty directories.  If a specified directory already exists
+   * then it is first removed.
+   */
+  public static void createEmptyDirs(String[] dirs) throws IOException {
+    for (String d : dirs) {
+      File dir = new File(d);
+      if (dir.exists()) {
+        FileUtil.fullyDelete(dir);
+      }
+      dir.mkdirs();
+    }
+  }
+  
+  /**
+   * Return the checksum for the singleton master storage directory
+   * of the given node type.
+   */
+  public static long checksumMasterContents(NodeType nodeType) throws IOException {
+    if (nodeType == NAME_NODE) {
+      return namenodeStorageChecksum;
+    } else {
+      return datanodeStorageChecksum;
+    }
+  }
+  
+  /**
+   * Compute the checksum of all the files in the specified directory.
+   * The contents of subdirectories are not included. This method provides
+   * an easy way to ensure equality between the contents of two directories.
+   *
+   * @param nodeType if DATA_NODE then any file named "VERSION" is ignored.
+   *    This is because this file file is changed every time
+   *    the Datanode is started.
+   * @param dir must be a directory. Subdirectories are ignored.
+   *
+   * @throws IllegalArgumentException if specified directory is not a directory
+   * @throws IOException if an IOException occurs while reading the files
+   * @return the computed checksum value
+   */
+  public static long checksumContents(NodeType nodeType, File dir) throws IOException {
+    if (!dir.isDirectory()) {
+      throw new IllegalArgumentException(
+                                         "Given argument is not a directory:" + dir);
+    }
+    File[] list = dir.listFiles();
+    Arrays.sort(list);
+    CRC32 checksum = new CRC32();
+    for (int i = 0; i < list.length; i++) {
+      if (!list[i].isFile()) {
+        continue;
+      }
+      // skip VERSION file for DataNodes
+      if (nodeType == DATA_NODE && list[i].getName().equals("VERSION")) {
+        continue; 
+      }
+      FileInputStream fis = null;
+      try {
+        fis = new FileInputStream(list[i]);
+        byte[] buffer = new byte[1024];
+        int bytesRead;
+        while ((bytesRead = fis.read(buffer)) != -1) {
+          checksum.update(buffer, 0, bytesRead);
+        }
+      } finally {
+        if(fis != null) {
+          fis.close();
+        }
+      }
+    }
+    return checksum.getValue();
+  }
+  
+  /**
+   * Simulate the <code>dfs.name.dir</code> or <code>dfs.data.dir</code>
+   * of a populated DFS filesystem.
+   *
+   * This method creates and populates the directory specified by
+   *  <code>parent/dirName</code>, for each parent directory.
+   * The contents of the new directories will be
+   * appropriate for the given node type.  If the directory does not
+   * exist, it will be created.  If the directory already exists, it
+   * will first be deleted.
+   *
+   * By default, a singleton master populated storage
+   * directory is created for a Namenode (contains edits, fsimage,
+   * version, and time files) and a Datanode (contains version and
+   * block files).  These directories are then
+   * copied by this method to create new storage
+   * directories of the appropriate type (Namenode or Datanode).
+   *
+   * @return the array of created directories
+   */
+  public static File[] createStorageDirs(NodeType nodeType, String[] parents, String dirName) throws Exception {
+    File[] retVal = new File[parents.length];
+    for (int i = 0; i < parents.length; i++) {
+      File newDir = new File(parents[i], dirName);
+      createEmptyDirs(new String[] {newDir.toString()});
+      LocalFileSystem localFS = FileSystem.getLocal(new Configuration());
+      switch (nodeType) {
+      case NAME_NODE:
+        localFS.copyToLocalFile(new Path(namenodeStorage.toString(), "current"),
+                                new Path(newDir.toString()),
+                                false);
+        Path newImgDir = new Path(newDir.getParent(), "image");
+        if (!localFS.exists(newImgDir))
+          localFS.copyToLocalFile(
+              new Path(namenodeStorage.toString(), "image"),
+              newImgDir,
+              false);
+        break;
+      case DATA_NODE:
+        localFS.copyToLocalFile(new Path(datanodeStorage.toString(), "current"),
+                                new Path(newDir.toString()),
+                                false);
+        Path newStorageFile = new Path(newDir.getParent(), "storage");
+        if (!localFS.exists(newStorageFile))
+          localFS.copyToLocalFile(
+              new Path(datanodeStorage.toString(), "storage"),
+              newStorageFile,
+              false);
+        break;
+      }
+      retVal[i] = newDir;
+    }
+    return retVal;
+  }
+  
+  /**
+   * Create a <code>version</code> file inside the specified parent
+   * directory.  If such a file already exists, it will be overwritten.
+   * The given version string will be written to the file as the layout
+   * version. None of the parameters may be null.
+   *
+   * @param version
+   *
+   * @return the created version file
+   */
+  public static File[] createVersionFile(NodeType nodeType, File[] parent,
+                                         StorageInfo version) throws IOException 
+  {
+    Storage storage = null;
+    File[] versionFiles = new File[parent.length];
+    for (int i = 0; i < parent.length; i++) {
+      File versionFile = new File(parent[i], "VERSION");
+      FileUtil.fullyDelete(versionFile);
+      switch (nodeType) {
+      case NAME_NODE:
+        storage = new FSImage(version);
+        break;
+      case DATA_NODE:
+        storage = new DataStorage(version, "doNotCare");
+        break;
+      }
+      StorageDirectory sd = storage.new StorageDirectory(parent[i].getParentFile());
+      sd.write(versionFile);
+      versionFiles[i] = versionFile;
+    }
+    return versionFiles;
+  }
+  
+  /**
+   * Corrupt the specified file.  Some random bytes within the file
+   * will be changed to some random values.
+   *
+   * @throws IllegalArgumentException if the given file is not a file
+   * @throws IOException if an IOException occurs while reading or writing the file
+   */
+  public static void corruptFile(File file) throws IOException {
+    if (!file.isFile()) {
+      throw new IllegalArgumentException(
+                                         "Given argument is not a file:" + file);
+    }
+    RandomAccessFile raf = new RandomAccessFile(file,"rws");
+    Random random = new Random();
+    for (long i = 0; i < raf.length(); i++) {
+      raf.seek(i);
+      if (random.nextBoolean()) {
+        raf.writeByte(random.nextInt());
+      }
+    }
+    raf.close();
+  }
+  
+  /**
+   * Return the layout version inherent in the current version
+   * of the Namenode, whether it is running or not.
+   */
+  public static int getCurrentLayoutVersion() {
+    return FSConstants.LAYOUT_VERSION;
+  }
+  
+  /**
+   * Return the namespace ID inherent in the currently running
+   * Namenode.  If no Namenode is running, return the namespace ID of
+   * the master Namenode storage directory.
+   *
+   * The UpgradeUtilities.initialize() method must be called once before
+   * calling this method.
+   */
+  public static int getCurrentNamespaceID(MiniDFSCluster cluster) throws IOException {
+    if (cluster != null) {
+      return cluster.getNameNode().versionRequest().getNamespaceID();
+    }
+    return namenodeStorageNamespaceID;
+  }
+  
+  /**
+   * Return the File System State Creation Timestamp (FSSCTime) inherent
+   * in the currently running Namenode.  If no Namenode is running,
+   * return the FSSCTime of the master Namenode storage directory.
+   *
+   * The UpgradeUtilities.initialize() method must be called once before
+   * calling this method.
+   */
+  public static long getCurrentFsscTime(MiniDFSCluster cluster) throws IOException {
+    if (cluster != null) {
+      return cluster.getNameNode().versionRequest().getCTime();
+    }
+    return namenodeStorageFsscTime;
+  }
+}
+

二進制
src/test/org/apache/hadoop/hdfs/hadoop-14-dfs-dir.tgz


+ 67 - 0
src/test/org/apache/hadoop/hdfs/hadoop-dfs-dir.txt

@@ -0,0 +1,67 @@
+#
+# This is a readme for hadoop-version-dfs-dir.tgz and hadoop-dfs-dir.txt.
+#
+# See HADOOP-1629 for more info if needed.
+# These two files are used by unit test TestDFSUpgradeFromImage.java 
+# 
+# hadoop-14-dfs-dir.tgz : 
+# ---------------------
+# This file contains the HDFS directory structure for one namenode and 4 datanodes.
+# The structure is setup similar to the structure used in MiniDFSCluster.
+# The directory was created with Hadoo-0.14.x.
+#
+# In the test, this directory is unpacked and MiniDFSCluster is run with 
+# "-upgrade" option. The test waits for the upgrade to complete 
+# (leave safe mode) and then all the files are read. The test checks that the
+# directory structure and file checksums exactly match the information
+# in this file.
+#
+# hadoop-dfs-dir.txt :
+# ---------------------
+# Along with this description this file contains the expected files and 
+# checksums or the files in the upgraded DFS.
+# 
+# The original DFS directory was created with various types of files and with
+# some recoverable errors (i.e. corrupt or missing .crc files).
+#
+# A similar set of files exist in two different DFS directories. 
+# For e.g. "top-dir-1Mb-512" contains files created with dfs.block.size of 1Mb 
+# and io.bytes.per.checksum of 512.
+#
+# In the future, when Hadoop project no longer supports upgrade from
+# Hadoop-0.12, then a new DFS directory image must be created.
+#
+# To generate checksum info for new files :
+# ---------------------------------------
+# Uncomment the last coment (starts with "printChecksums") and run the 
+# test again. When the test sees this line, it prints the checksum
+# information that should replace the checksum information in 
+# this file. When run in this mode, the test will fail with a descriptive IOException.
+#
+# Next, extract the checksum info from the test log like this:
+#  sed -n 's/.*CRC info for reference file : //p' test-log.txt >> this_file
+# This will append a new list of files and checksums to this file.  Be sure to remove the existing checksum info.
+#
+# For your reference, the format of the checksum info below is "filename whitespace*\twhitespace* checksum\n"
+#
+# Uncomment the following line to produce checksum info for a new DFS image.
+#printChecksums
+
+/1kb-multiple-checksum-blocks-64-16 	 191893480
+/top-dir-120000-60/1Mb-file 	 4079112547
+/top-dir-120000-60/4k-file 	 3716287280
+/top-dir-120000-60/5Mb-file 	 2563834633
+/top-dir-120000-60/directory1/500thousand-file 	 3036538664
+/top-dir-120000-60/directory1/file-with-corrupt-crc 	 1984689737
+/top-dir-120000-60/directory1/file-with-no-crc 	 4004594475
+/top-dir-120000-60/directory1/zero1 	 0
+/top-dir-120000-60/zerolen 	 0
+/top-dir-1Mb-512/1Mb-file 	 4079112547
+/top-dir-1Mb-512/4k-file 	 3716287280
+/top-dir-1Mb-512/5Mb-file 	 2563834633
+/top-dir-1Mb-512/directory1/500thousand-file 	 3036538664
+/top-dir-1Mb-512/directory1/file-with-corrupt-crc 	 1984689737
+/top-dir-1Mb-512/directory1/file-with-no-crc 	 4004594475
+/top-dir-1Mb-512/directory1/zero1 	 0
+/top-dir-1Mb-512/zerolen 	 0
+overallCRC 	 1419480698

+ 299 - 0
src/test/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java

@@ -0,0 +1,299 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.balancer;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.AccessTokenHandler;
+
+import junit.framework.TestCase;
+/**
+ * This class tests if a balancer schedules tasks correctly.
+ */
+public class TestBalancer extends TestCase {
+  final private static long CAPACITY = 500L;
+  final private static String RACK0 = "/rack0";
+  final private static String RACK1 = "/rack1";
+  final private static String RACK2 = "/rack2";
+  final static private String fileName = "/tmp.txt";
+  final static private Path filePath = new Path(fileName);
+  private MiniDFSCluster cluster;
+
+  ClientProtocol client;
+
+  static final int DEFAULT_BLOCK_SIZE = 10;
+  private Balancer balancer;
+  private Random r = new Random();
+
+  static {
+    Balancer.setBlockMoveWaitTime(1000L) ;
+  }
+
+  private void initConf(Configuration conf) {
+    conf.setBoolean(AccessTokenHandler.STRING_ENABLE_ACCESS_TOKEN, false);
+    conf.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+    conf.setInt("io.bytes.per.checksum", DEFAULT_BLOCK_SIZE);
+    conf.setLong("dfs.heartbeat.interval", 1L);
+    conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+    conf.setLong("dfs.balancer.movedWinWidth", 2000L);
+  }
+
+  /* create a file with a length of <code>fileLen</code> */
+  private void createFile(long fileLen, short replicationFactor)
+  throws IOException {
+    FileSystem fs = cluster.getFileSystem();
+    DFSTestUtil.createFile(fs, filePath, fileLen, 
+        replicationFactor, r.nextLong());
+    DFSTestUtil.waitReplication(fs, filePath, replicationFactor);
+  }
+
+
+  /* fill up a cluster with <code>numNodes</code> datanodes 
+   * whose used space to be <code>size</code>
+   */
+  private Block[] generateBlocks(Configuration conf, long size, short numNodes) throws IOException {
+    cluster = new MiniDFSCluster( conf, numNodes, true, null);
+    try {
+      cluster.waitActive();
+      client = DFSClient.createNamenode(conf);
+
+      short replicationFactor = (short)(numNodes-1);
+      long fileLen = size/replicationFactor;
+      createFile(fileLen, replicationFactor);
+
+      List<LocatedBlock> locatedBlocks = client.
+      getBlockLocations(fileName, 0, fileLen).getLocatedBlocks();
+
+      int numOfBlocks = locatedBlocks.size();
+      Block[] blocks = new Block[numOfBlocks];
+      for(int i=0; i<numOfBlocks; i++) {
+        Block b = locatedBlocks.get(i).getBlock();
+        blocks[i] = new Block(b.getBlockId(), b.getNumBytes(), b.getGenerationStamp());
+      }
+
+      return blocks;
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  /* Distribute all blocks according to the given distribution */
+  Block[][] distributeBlocks(Block[] blocks, short replicationFactor, 
+      final long[] distribution ) {
+    // make a copy
+    long[] usedSpace = new long[distribution.length];
+    System.arraycopy(distribution, 0, usedSpace, 0, distribution.length);
+
+    List<List<Block>> blockReports = 
+      new ArrayList<List<Block>>(usedSpace.length);
+    Block[][] results = new Block[usedSpace.length][];
+    for(int i=0; i<usedSpace.length; i++) {
+      blockReports.add(new ArrayList<Block>());
+    }
+    for(int i=0; i<blocks.length; i++) {
+      for(int j=0; j<replicationFactor; j++) {
+        boolean notChosen = true;
+        while(notChosen) {
+          int chosenIndex = r.nextInt(usedSpace.length);
+          if( usedSpace[chosenIndex]>0 ) {
+            notChosen = false;
+            blockReports.get(chosenIndex).add(blocks[i]);
+            usedSpace[chosenIndex] -= blocks[i].getNumBytes();
+          }
+        }
+      }
+    }
+    for(int i=0; i<usedSpace.length; i++) {
+      List<Block> nodeBlockList = blockReports.get(i);
+      results[i] = nodeBlockList.toArray(new Block[nodeBlockList.size()]);
+    }
+    return results;
+  }
+
+  /* we first start a cluster and fill the cluster up to a certain size.
+   * then redistribute blocks according the required distribution.
+   * Afterwards a balancer is running to balance the cluster.
+   */
+  private void testUnevenDistribution(Configuration conf,
+      long distribution[], long capacities[], String[] racks) throws Exception {
+    int numDatanodes = distribution.length;
+    if (capacities.length != numDatanodes || racks.length != numDatanodes) {
+      throw new IllegalArgumentException("Array length is not the same");
+    }
+
+    // calculate total space that need to be filled
+    long totalUsedSpace=0L;
+    for(int i=0; i<distribution.length; i++) {
+      totalUsedSpace += distribution[i];
+    }
+
+    // fill the cluster
+    Block[] blocks = generateBlocks(conf, totalUsedSpace, (short)numDatanodes);
+
+    // redistribute blocks
+    Block[][] blocksDN = distributeBlocks(
+        blocks, (short)(numDatanodes-1), distribution);
+
+    // restart the cluster: do NOT format the cluster
+    conf.set("dfs.safemode.threshold.pct", "0.0f"); 
+    cluster = new MiniDFSCluster(0, conf, numDatanodes,
+        false, true, null, racks, capacities);
+    cluster.waitActive();
+    client = DFSClient.createNamenode(conf);
+
+    cluster.injectBlocks(blocksDN);
+
+    long totalCapacity = 0L;
+    for(long capacity:capacities) {
+      totalCapacity += capacity;
+    }
+    runBalancer(conf, totalUsedSpace, totalCapacity);
+  }
+
+  /* wait for one heartbeat */
+  private void waitForHeartBeat( long expectedUsedSpace, long expectedTotalSpace )
+  throws IOException {
+    long[] status = client.getStats();
+    while(status[0] != expectedTotalSpace || status[1] != expectedUsedSpace ) {
+      try {
+        Thread.sleep(100L);
+      } catch(InterruptedException ignored) {
+      }
+      status = client.getStats();
+    }
+  }
+
+  /* This test start a one-node cluster, fill the node to be 30% full;
+   * It then adds an empty node and start balancing.
+   * @param newCapacity new node's capacity
+   * @param new 
+   */
+  private void test(Configuration conf, long[] capacities, String[] racks, 
+      long newCapacity, String newRack) throws Exception {
+    int numOfDatanodes = capacities.length;
+    assertEquals(numOfDatanodes, racks.length);
+    cluster = new MiniDFSCluster(0, conf, capacities.length, true, true, null, 
+        racks, capacities);
+    try {
+      cluster.waitActive();
+      client = DFSClient.createNamenode(conf);
+
+      long totalCapacity=0L;
+      for(long capacity:capacities) {
+        totalCapacity += capacity;
+      }
+      // fill up the cluster to be 30% full
+      long totalUsedSpace = totalCapacity*3/10;
+      createFile(totalUsedSpace/numOfDatanodes, (short)numOfDatanodes);
+      // start up an empty node with the same capacity and on the same rack
+      cluster.startDataNodes(conf, 1, true, null,
+          new String[]{newRack}, new long[]{newCapacity});
+
+      totalCapacity += newCapacity;
+
+      // run balancer and validate results
+      runBalancer(conf, totalUsedSpace, totalCapacity);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  /* Start balancer and check if the cluster is balanced after the run */
+  private void runBalancer(Configuration conf, long totalUsedSpace, long totalCapacity )
+  throws Exception {
+    waitForHeartBeat(totalUsedSpace, totalCapacity);
+
+    // start rebalancing
+    balancer = new Balancer(conf);
+    balancer.run(new String[0]);
+
+    waitForHeartBeat(totalUsedSpace, totalCapacity);
+    boolean balanced;
+    do {
+      DatanodeInfo[] datanodeReport = 
+        client.getDatanodeReport(DatanodeReportType.ALL);
+      assertEquals(datanodeReport.length, cluster.getDataNodes().size());
+      balanced = true;
+      double avgUtilization = ((double)totalUsedSpace)/totalCapacity*100;
+      for(DatanodeInfo datanode:datanodeReport) {
+        if(Math.abs(avgUtilization-
+            ((double)datanode.getDfsUsed())/datanode.getCapacity()*100)>10) {
+          balanced = false;
+          try {
+            Thread.sleep(100);
+          } catch(InterruptedException ignored) {
+          }
+          break;
+        }
+      }
+    } while(!balanced);
+
+  }
+  /** Test a cluster with even distribution, 
+   * then a new empty node is added to the cluster*/
+  public void testBalancer0() throws Exception {
+    Configuration conf = new Configuration();
+    initConf(conf);
+    /** one-node cluster test*/
+    // add an empty node with half of the CAPACITY & the same rack
+    test(conf, new long[]{CAPACITY}, new String[]{RACK0}, CAPACITY/2, RACK0);
+
+    /** two-node cluster test */
+    test(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1},
+        CAPACITY, RACK2);
+    
+    /** End-to-end testing of access token, involving NN, DN, and Balancer */
+    Configuration newConf = new Configuration(conf);
+    newConf.setBoolean(AccessTokenHandler.STRING_ENABLE_ACCESS_TOKEN, true);
+    test(newConf, new long[]{CAPACITY}, new String[]{RACK0}, CAPACITY/2, RACK0);
+  }
+
+  /** Test unevenly distributed cluster */
+  public void testBalancer1() throws Exception {
+    Configuration conf = new Configuration();
+    initConf(conf);
+    testUnevenDistribution(conf,
+        new long[] {50*CAPACITY/100, 10*CAPACITY/100},
+        new long[]{CAPACITY, CAPACITY},
+        new String[] {RACK0, RACK1});
+  }
+
+  /**
+   * @param args
+   */
+  public static void main(String[] args) throws Exception {
+    TestBalancer balancerTest = new TestBalancer();
+    balancerTest.testBalancer0();
+    balancerTest.testBalancer1();
+  }
+}

+ 239 - 0
src/test/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java

@@ -0,0 +1,239 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements.  See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership.  The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.hdfs.server.common;
+
+import java.io.IOException;
+import junit.framework.TestCase;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+
+import static org.apache.hadoop.hdfs.protocol.FSConstants.LAYOUT_VERSION;
+
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.TestDFSUpgradeFromImage;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.datanode.UpgradeObjectDatanode;
+import org.apache.hadoop.hdfs.server.namenode.UpgradeObjectNamenode;
+import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
+import org.apache.hadoop.hdfs.tools.DFSAdmin;
+
+/**
+ */
+public class TestDistributedUpgrade extends TestCase {
+  private static final Log LOG = LogFactory.getLog(TestDistributedUpgrade.class);
+  private Configuration conf;
+  private int testCounter = 0;
+  private MiniDFSCluster cluster = null;
+    
+  /**
+   * Writes an INFO log message containing the parameters.
+   */
+  void log(String label, int numDirs) {
+    LOG.info("============================================================");
+    LOG.info("***TEST " + (testCounter++) + "*** " 
+             + label + ":"
+             + " numDirs="+numDirs);
+  }
+  
+  /**
+   * Attempts to start a NameNode with the given operation.  Starting
+   * the NameNode should throw an exception.
+   */
+  void startNameNodeShouldFail(StartupOption operation) {
+    try {
+      //cluster = new MiniDFSCluster(conf, 0, operation); // should fail
+      // we set manage dirs to true as NN has to start from untar'ed image with 
+      // nn dirs set to name1 and name2
+      cluster = new MiniDFSCluster(0, conf, 0, false, true,
+          operation, null); // Should fail
+      throw new AssertionError("NameNode should have failed to start");
+    } catch (Exception expected) {
+      expected = null;
+      // expected
+    }
+  }
+  
+  /**
+   * Attempts to start a DataNode with the given operation.  Starting
+   * the DataNode should throw an exception.
+   */
+  void startDataNodeShouldFail(StartupOption operation) {
+    try {
+      cluster.startDataNodes(conf, 1, false, operation, null); // should fail
+      throw new AssertionError("DataNode should have failed to start");
+    } catch (Exception expected) {
+      // expected
+      assertFalse(cluster.isDataNodeUp());
+    }
+  }
+ 
+  /**
+   */
+  public void testDistributedUpgrade() throws Exception {
+    int numDirs = 1;
+    TestDFSUpgradeFromImage testImg = new TestDFSUpgradeFromImage();
+    testImg.unpackStorage();
+    int numDNs = testImg.numDataNodes;
+
+    // register new upgrade objects (ignore all existing)
+    UpgradeObjectCollection.initialize();
+    UpgradeObjectCollection.registerUpgrade(new UO_Datanode1());
+    UpgradeObjectCollection.registerUpgrade(new UO_Namenode1());
+    UpgradeObjectCollection.registerUpgrade(new UO_Datanode2());
+    UpgradeObjectCollection.registerUpgrade(new UO_Namenode2());
+    UpgradeObjectCollection.registerUpgrade(new UO_Datanode3());
+    UpgradeObjectCollection.registerUpgrade(new UO_Namenode3());
+
+    conf = new Configuration();
+    if (System.getProperty("test.build.data") == null) { // to test to be run outside of ant
+      System.setProperty("test.build.data", "build/test/data");
+    }
+    conf.setInt("dfs.datanode.scan.period.hours", -1); // block scanning off
+
+    log("NameNode start in regular mode when dustributed upgrade is required", numDirs);
+    startNameNodeShouldFail(StartupOption.REGULAR);
+
+    log("Start NameNode only distributed upgrade", numDirs);
+    // cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
+    cluster = new MiniDFSCluster(0, conf, 0, false, true,
+                                  StartupOption.UPGRADE, null);
+    cluster.shutdown();
+
+    log("NameNode start in regular mode when dustributed upgrade has been started", numDirs);
+    startNameNodeShouldFail(StartupOption.REGULAR);
+
+    log("NameNode rollback to the old version that require a dustributed upgrade", numDirs);
+    startNameNodeShouldFail(StartupOption.ROLLBACK);
+
+    log("Normal distributed upgrade for the cluster", numDirs);
+    cluster = new MiniDFSCluster(0, conf, numDNs, false, true,
+                                  StartupOption.UPGRADE, null);
+    DFSAdmin dfsAdmin = new DFSAdmin();
+    dfsAdmin.setConf(conf);
+    dfsAdmin.run(new String[] {"-safemode", "wait"});
+    cluster.shutdown();
+
+    // it should be ok to start in regular mode
+    log("NameCluster regular startup after the upgrade", numDirs);
+    cluster = new MiniDFSCluster(0, conf, numDNs, false, true,
+                                  StartupOption.REGULAR, null);
+    cluster.waitActive();
+    cluster.shutdown();
+  }
+
+  public static void main(String[] args) throws Exception {
+    new TestDistributedUpgrade().testDistributedUpgrade();
+    LOG.info("=== DONE ===");
+  }
+}
+
+/**
+ * Upgrade object for data-node
+ */
+class UO_Datanode extends UpgradeObjectDatanode {
+  int version;
+
+  UO_Datanode(int v) {
+    this.status = (short)0;
+    version = v;
+  }
+
+  public int getVersion() {
+    return version;
+  }
+
+  public void doUpgrade() throws IOException {
+    this.status = (short)100;
+    getDatanode().namenode.processUpgradeCommand(
+        new UpgradeCommand(UpgradeCommand.UC_ACTION_REPORT_STATUS, 
+            getVersion(), getUpgradeStatus()));
+  }
+
+  public UpgradeCommand startUpgrade() throws IOException {
+    return null;
+  }
+}
+
+/**
+ * Upgrade object for name-node
+ */
+class UO_Namenode extends UpgradeObjectNamenode {
+  int version;
+
+  UO_Namenode(int v) {
+    status = (short)0;
+    version = v;
+  }
+
+  public int getVersion() {
+    return version;
+  }
+
+  synchronized public UpgradeCommand processUpgradeCommand(
+                                  UpgradeCommand command) throws IOException {
+    switch(command.getAction()) {
+      case UpgradeCommand.UC_ACTION_REPORT_STATUS:
+        this.status += command.getCurrentStatus()/8;  // 4 reports needed
+        break;
+      default:
+        this.status++;
+    }
+    return null;
+  }
+
+  public UpgradeCommand completeUpgrade() throws IOException {
+    return null;
+  }
+}
+
+class UO_Datanode1 extends UO_Datanode {
+  UO_Datanode1() {
+    super(LAYOUT_VERSION+1);
+  }
+}
+
+class UO_Namenode1 extends UO_Namenode {
+  UO_Namenode1() {
+    super(LAYOUT_VERSION+1);
+  }
+}
+
+class UO_Datanode2 extends UO_Datanode {
+  UO_Datanode2() {
+    super(LAYOUT_VERSION+2);
+  }
+}
+
+class UO_Namenode2 extends UO_Namenode {
+  UO_Namenode2() {
+    super(LAYOUT_VERSION+2);
+  }
+}
+
+class UO_Datanode3 extends UO_Datanode {
+  UO_Datanode3() {
+    super(LAYOUT_VERSION+3);
+  }
+}
+
+class UO_Namenode3 extends UO_Namenode {
+  UO_Namenode3() {
+    super(LAYOUT_VERSION+3);
+  }
+}

+ 658 - 0
src/test/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java

@@ -0,0 +1,658 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.Random;
+
+import javax.management.NotCompliantMBeanException;
+import javax.management.ObjectName;
+import javax.management.StandardMBean;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
+import org.apache.hadoop.metrics.util.MBeanUtil;
+import org.apache.hadoop.util.DataChecksum;
+import org.apache.hadoop.util.DiskChecker.DiskErrorException;
+
+/**
+ * This class implements a simulated FSDataset.
+ * 
+ * Blocks that are created are recorded but their data (plus their CRCs) are
+ *  discarded.
+ * Fixed data is returned when blocks are read; a null CRC meta file is
+ * created for such data.
+ * 
+ * This FSDataset does not remember any block information across its
+ * restarts; it does however offer an operation to inject blocks
+ *  (See the TestInectionForSImulatedStorage()
+ * for a usage example of injection.
+ * 
+ * Note the synchronization is coarse grained - it is at each method. 
+ */
+
+public class SimulatedFSDataset  implements FSConstants, FSDatasetInterface, Configurable{
+  
+  public static final String CONFIG_PROPERTY_SIMULATED =
+                                    "dfs.datanode.simulateddatastorage";
+  public static final String CONFIG_PROPERTY_CAPACITY =
+                            "dfs.datanode.simulateddatastorage.capacity";
+  
+  public static final long DEFAULT_CAPACITY = 2L<<40; // 1 terabyte
+  public static final byte DEFAULT_DATABYTE = 9; // 1 terabyte
+  byte simulatedDataByte = DEFAULT_DATABYTE;
+  Configuration conf = null;
+  
+  static byte[] nullCrcFileData;
+  {
+    DataChecksum checksum = DataChecksum.newDataChecksum( DataChecksum.
+                              CHECKSUM_NULL, 16*1024 );
+    byte[] nullCrcHeader = checksum.getHeader();
+    nullCrcFileData =  new byte[2 + nullCrcHeader.length];
+    nullCrcFileData[0] = (byte) ((FSDataset.METADATA_VERSION >>> 8) & 0xff);
+    nullCrcFileData[1] = (byte) (FSDataset.METADATA_VERSION & 0xff);
+    for (int i = 0; i < nullCrcHeader.length; i++) {
+      nullCrcFileData[i+2] = nullCrcHeader[i];
+    }
+  }
+  
+  private class BInfo { // information about a single block
+    Block theBlock;
+    private boolean finalized = false; // if not finalized => ongoing creation
+    SimulatedOutputStream oStream = null;
+    BInfo(Block b, boolean forWriting) throws IOException {
+      theBlock = new Block(b);
+      if (theBlock.getNumBytes() < 0) {
+        theBlock.setNumBytes(0);
+      }
+      if (!storage.alloc(theBlock.getNumBytes())) { // expected length - actual length may
+                                          // be more - we find out at finalize
+        DataNode.LOG.warn("Lack of free storage on a block alloc");
+        throw new IOException("Creating block, no free space available");
+      }
+
+      if (forWriting) {
+        finalized = false;
+        oStream = new SimulatedOutputStream();
+      } else {
+        finalized = true;
+        oStream = null;
+      }
+    }
+
+    synchronized long getGenerationStamp() {
+      return theBlock.getGenerationStamp();
+    }
+
+    synchronized void updateBlock(Block b) {
+      theBlock.setGenerationStamp(b.getGenerationStamp());
+      setlength(b.getNumBytes());
+    }
+    
+    synchronized long getlength() {
+      if (!finalized) {
+         return oStream.getLength();
+      } else {
+        return theBlock.getNumBytes();
+      }
+    }
+
+    synchronized void setlength(long length) {
+      if (!finalized) {
+         oStream.setLength(length);
+      } else {
+        theBlock.setNumBytes(length);
+      }
+    }
+    
+    synchronized SimulatedInputStream getIStream() throws IOException {
+      if (!finalized) {
+        // throw new IOException("Trying to read an unfinalized block");
+         return new SimulatedInputStream(oStream.getLength(), DEFAULT_DATABYTE);
+      } else {
+        return new SimulatedInputStream(theBlock.getNumBytes(), DEFAULT_DATABYTE);
+      }
+    }
+    
+    synchronized void finalizeBlock(long finalSize) throws IOException {
+      if (finalized) {
+        throw new IOException(
+            "Finalizing a block that has already been finalized" + 
+            theBlock.getBlockId());
+      }
+      if (oStream == null) {
+        DataNode.LOG.error("Null oStream on unfinalized block - bug");
+        throw new IOException("Unexpected error on finalize");
+      }
+
+      if (oStream.getLength() != finalSize) {
+        DataNode.LOG.warn("Size passed to finalize (" + finalSize +
+                    ")does not match what was written:" + oStream.getLength());
+        throw new IOException(
+          "Size passed to finalize does not match the amount of data written");
+      }
+      // We had allocated the expected length when block was created; 
+      // adjust if necessary
+      long extraLen = finalSize - theBlock.getNumBytes();
+      if (extraLen > 0) {
+        if (!storage.alloc(extraLen)) {
+          DataNode.LOG.warn("Lack of free storage on a block alloc");
+          throw new IOException("Creating block, no free space available");
+        }
+      } else {
+        storage.free(-extraLen);
+      }
+      theBlock.setNumBytes(finalSize);  
+
+      finalized = true;
+      oStream = null;
+      return;
+    }
+    
+    SimulatedInputStream getMetaIStream() {
+      return new SimulatedInputStream(nullCrcFileData);  
+    }
+
+    synchronized boolean isFinalized() {
+      return finalized;
+    }
+  }
+  
+  static private class SimulatedStorage {
+    private long capacity;  // in bytes
+    private long used;    // in bytes
+    
+    synchronized long getFree() {
+      return capacity - used;
+    }
+    
+    synchronized long getCapacity() {
+      return capacity;
+    }
+    
+    synchronized long getUsed() {
+      return used;
+    }
+    
+    synchronized boolean alloc(long amount) {
+      if (getFree() >= amount) {
+        used += amount;
+        return true;
+      } else {
+        return false;    
+      }
+    }
+    
+    synchronized void free(long amount) {
+      used -= amount;
+    }
+    
+    SimulatedStorage(long cap) {
+      capacity = cap;
+      used = 0;   
+    }
+  }
+  
+  private HashMap<Block, BInfo> blockMap = null;
+  private SimulatedStorage storage = null;
+  private String storageId;
+  
+  public SimulatedFSDataset(Configuration conf) throws IOException {
+    setConf(conf);
+  }
+  
+  private SimulatedFSDataset() { // real construction when setConf called.. Uggg
+  }
+  
+  public Configuration getConf() {
+    return conf;
+  }
+
+  public void setConf(Configuration iconf)  {
+    conf = iconf;
+    storageId = conf.get("StorageId", "unknownStorageId" +
+                                        new Random().nextInt());
+    registerMBean(storageId);
+    storage = new SimulatedStorage(
+        conf.getLong(CONFIG_PROPERTY_CAPACITY, DEFAULT_CAPACITY));
+    //DataNode.LOG.info("Starting Simulated storage; Capacity = " + getCapacity() + 
+    //    "Used = " + getDfsUsed() + "Free =" + getRemaining());
+
+    blockMap = new HashMap<Block,BInfo>(); 
+  }
+
+  public synchronized void injectBlocks(Block[] injectBlocks)
+                                            throws IOException {
+    if (injectBlocks != null) {
+      for (Block b: injectBlocks) { // if any blocks in list is bad, reject list
+        if (b == null) {
+          throw new NullPointerException("Null blocks in block list");
+        }
+        if (isValidBlock(b)) {
+          throw new IOException("Block already exists in  block list");
+        }
+      }
+      HashMap<Block, BInfo> oldBlockMap = blockMap;
+      blockMap = 
+          new HashMap<Block,BInfo>(injectBlocks.length + oldBlockMap.size());
+      blockMap.putAll(oldBlockMap);
+      for (Block b: injectBlocks) {
+          BInfo binfo = new BInfo(b, false);
+          blockMap.put(b, binfo);
+      }
+    }
+  }
+
+  public synchronized void finalizeBlock(Block b) throws IOException {
+    BInfo binfo = blockMap.get(b);
+    if (binfo == null) {
+      throw new IOException("Finalizing a non existing block " + b);
+    }
+    binfo.finalizeBlock(b.getNumBytes());
+
+  }
+
+  public synchronized void unfinalizeBlock(Block b) throws IOException {
+    if (isBeingWritten(b)) {
+      blockMap.remove(b);
+    }
+  }
+
+  public synchronized Block[] getBlockReport() {
+    Block[] blockTable = new Block[blockMap.size()];
+    int count = 0;
+    for (BInfo b : blockMap.values()) {
+      if (b.isFinalized()) {
+        blockTable[count++] = b.theBlock;
+      }
+    }
+    if (count != blockTable.length) {
+      blockTable = Arrays.copyOf(blockTable, count);
+    }
+    return blockTable;
+  }
+
+  public long getCapacity() throws IOException {
+    return storage.getCapacity();
+  }
+
+  public long getDfsUsed() throws IOException {
+    return storage.getUsed();
+  }
+
+  public long getRemaining() throws IOException {
+    return storage.getFree();
+  }
+
+  public synchronized long getLength(Block b) throws IOException {
+    BInfo binfo = blockMap.get(b);
+    if (binfo == null) {
+      throw new IOException("Finalizing a non existing block " + b);
+    }
+    return binfo.getlength();
+  }
+
+  /** {@inheritDoc} */
+  public Block getStoredBlock(long blkid) throws IOException {
+    Block b = new Block(blkid);
+    BInfo binfo = blockMap.get(b);
+    if (binfo == null) {
+      return null;
+    }
+    b.setGenerationStamp(binfo.getGenerationStamp());
+    b.setNumBytes(binfo.getlength());
+    return b;
+  }
+
+  /** {@inheritDoc} */
+  public void updateBlock(Block oldblock, Block newblock) throws IOException {
+    BInfo binfo = blockMap.get(newblock);
+    if (binfo == null) {
+      throw new IOException("BInfo not found, b=" + newblock);
+    }
+    binfo.updateBlock(newblock);
+  }
+
+  public synchronized void invalidate(Block[] invalidBlks) throws IOException {
+    boolean error = false;
+    if (invalidBlks == null) {
+      return;
+    }
+    for (Block b: invalidBlks) {
+      if (b == null) {
+        continue;
+      }
+      BInfo binfo = blockMap.get(b);
+      if (binfo == null) {
+        error = true;
+        DataNode.LOG.warn("Invalidate: Missing block");
+        continue;
+      }
+      storage.free(binfo.getlength());
+      blockMap.remove(b);
+    }
+      if (error) {
+          throw new IOException("Invalidate: Missing blocks.");
+      }
+  }
+
+  public synchronized boolean isValidBlock(Block b) {
+    // return (blockMap.containsKey(b));
+    BInfo binfo = blockMap.get(b);
+    if (binfo == null) {
+      return false;
+    }
+    return binfo.isFinalized();
+  }
+
+  /* check if a block is created but not finalized */
+  private synchronized boolean isBeingWritten(Block b) {
+    BInfo binfo = blockMap.get(b);
+    if (binfo == null) {
+      return false;
+    }
+    return !binfo.isFinalized();  
+  }
+  
+  public String toString() {
+    return getStorageInfo();
+  }
+
+  public synchronized BlockWriteStreams writeToBlock(Block b, 
+                                            boolean isRecovery)
+                                            throws IOException {
+    if (isValidBlock(b)) {
+          throw new BlockAlreadyExistsException("Block " + b + 
+              " is valid, and cannot be written to.");
+      }
+    if (isBeingWritten(b)) {
+        throw new BlockAlreadyExistsException("Block " + b + 
+            " is being written, and cannot be written to.");
+    }
+      BInfo binfo = new BInfo(b, true);
+      blockMap.put(b, binfo);
+      SimulatedOutputStream crcStream = new SimulatedOutputStream();
+      return new BlockWriteStreams(binfo.oStream, crcStream);
+  }
+
+  public synchronized InputStream getBlockInputStream(Block b)
+                                            throws IOException {
+    BInfo binfo = blockMap.get(b);
+    if (binfo == null) {
+      throw new IOException("No such Block " + b );  
+    }
+    
+    //DataNode.LOG.info("Opening block(" + b.blkid + ") of length " + b.len);
+    return binfo.getIStream();
+  }
+  
+  public synchronized InputStream getBlockInputStream(Block b, long seekOffset)
+                              throws IOException {
+    InputStream result = getBlockInputStream(b);
+    result.skip(seekOffset);
+    return result;
+  }
+
+  /** Not supported */
+  public BlockInputStreams getTmpInputStreams(Block b, long blkoff, long ckoff
+      ) throws IOException {
+    throw new IOException("Not supported");
+  }
+
+  /** No-op */
+  public void validateBlockMetadata(Block b) {
+  }
+
+  /**
+   * Returns metaData of block b as an input stream
+   * @param b - the block for which the metadata is desired
+   * @return metaData of block b as an input stream
+   * @throws IOException - block does not exist or problems accessing
+   *  the meta file
+   */
+  private synchronized InputStream getMetaDataInStream(Block b)
+                                              throws IOException {
+    BInfo binfo = blockMap.get(b);
+    if (binfo == null) {
+      throw new IOException("No such Block " + b );  
+    }
+    if (!binfo.finalized) {
+      throw new IOException("Block " + b + 
+          " is being written, its meta cannot be read");
+    }
+    return binfo.getMetaIStream();
+  }
+
+  public synchronized long getMetaDataLength(Block b) throws IOException {
+    BInfo binfo = blockMap.get(b);
+    if (binfo == null) {
+      throw new IOException("No such Block " + b );  
+    }
+    if (!binfo.finalized) {
+      throw new IOException("Block " + b +
+          " is being written, its metalength cannot be read");
+    }
+    return binfo.getMetaIStream().getLength();
+  }
+  
+  public MetaDataInputStream getMetaDataInputStream(Block b)
+  throws IOException {
+
+       return new MetaDataInputStream(getMetaDataInStream(b),
+                                                getMetaDataLength(b));
+  }
+
+  public synchronized boolean metaFileExists(Block b) throws IOException {
+    if (!isValidBlock(b)) {
+          throw new IOException("Block " + b +
+              " is valid, and cannot be written to.");
+      }
+    return true; // crc exists for all valid blocks
+  }
+
+  public void checkDataDir() throws DiskErrorException {
+    // nothing to check for simulated data set
+  }
+
+  public synchronized long getChannelPosition(Block b, 
+                                              BlockWriteStreams stream)
+                                              throws IOException {
+    BInfo binfo = blockMap.get(b);
+    if (binfo == null) {
+      throw new IOException("No such Block " + b );
+    }
+    return binfo.getlength();
+  }
+
+  public synchronized void setChannelPosition(Block b, BlockWriteStreams stream, 
+                                              long dataOffset, long ckOffset)
+                                              throws IOException {
+    BInfo binfo = blockMap.get(b);
+    if (binfo == null) {
+      throw new IOException("No such Block " + b );
+    }
+    binfo.setlength(dataOffset);
+  }
+
+  /** 
+   * Simulated input and output streams
+   *
+   */
+  static private class SimulatedInputStream extends java.io.InputStream {
+    
+
+    byte theRepeatedData = 7;
+    long length; // bytes
+    int currentPos = 0;
+    byte[] data = null;
+    
+    /**
+     * An input stream of size l with repeated bytes
+     * @param l
+     * @param iRepeatedData
+     */
+    SimulatedInputStream(long l, byte iRepeatedData) {
+      length = l;
+      theRepeatedData = iRepeatedData;
+    }
+    
+    /**
+     * An input stream of of the supplied data
+     * 
+     * @param iData
+     */
+    SimulatedInputStream(byte[] iData) {
+      data = iData;
+      length = data.length;
+      
+    }
+    
+    /**
+     * 
+     * @return the lenght of the input stream
+     */
+    long getLength() {
+      return length;
+    }
+
+    @Override
+    public int read() throws IOException {
+      if (currentPos >= length)
+        return -1;
+      if (data !=null) {
+        return data[currentPos++];
+      } else {
+        currentPos++;
+        return theRepeatedData;
+      }
+    }
+    
+    @Override
+    public int read(byte[] b) throws IOException { 
+
+      if (b == null) {
+        throw new NullPointerException();
+      }
+      if (b.length == 0) {
+        return 0;
+      }
+      if (currentPos >= length) { // EOF
+        return -1;
+      }
+      int bytesRead = (int) Math.min(b.length, length-currentPos);
+      if (data != null) {
+        System.arraycopy(data, currentPos, b, 0, bytesRead);
+      } else { // all data is zero
+        for (int i : b) {  
+          b[i] = theRepeatedData;
+        }
+      }
+      currentPos += bytesRead;
+      return bytesRead;
+    }
+  }
+  
+  /**
+   * This class implements an output stream that merely throws its data away, but records its
+   * length.
+   *
+   */
+  static private class SimulatedOutputStream extends OutputStream {
+    long length = 0;
+    
+    /**
+     * constructor for Simulated Output Steram
+     */
+    SimulatedOutputStream() {
+    }
+    
+    /**
+     * 
+     * @return the length of the data created so far.
+     */
+    long getLength() {
+      return length;
+    }
+
+    /**
+     */
+    void setLength(long length) {
+      this.length = length;
+    }
+    
+    @Override
+    public void write(int arg0) throws IOException {
+      length++;
+    }
+    
+    @Override
+    public void write(byte[] b) throws IOException {
+      length += b.length;
+    }
+    
+    @Override
+    public void write(byte[] b,
+              int off,
+              int len) throws IOException  {
+      length += len;
+    }
+  }
+  
+  private ObjectName mbeanName;
+
+
+  
+  /**
+   * Register the FSDataset MBean using the name
+   *        "hadoop:service=DataNode,name=FSDatasetState-<storageid>"
+   *  We use storage id for MBean name since a minicluster within a single
+   * Java VM may have multiple Simulated Datanodes.
+   */
+  void registerMBean(final String storageId) {
+    // We wrap to bypass standard mbean naming convetion.
+    // This wraping can be removed in java 6 as it is more flexible in 
+    // package naming for mbeans and their impl.
+    StandardMBean bean;
+
+    try {
+      bean = new StandardMBean(this,FSDatasetMBean.class);
+      mbeanName = MBeanUtil.registerMBean("DataNode",
+          "FSDatasetState-" + storageId, bean);
+    } catch (NotCompliantMBeanException e) {
+      e.printStackTrace();
+    }
+ 
+    DataNode.LOG.info("Registered FSDatasetStatusMBean");
+  }
+
+  public void shutdown() {
+    if (mbeanName != null)
+      MBeanUtil.unregisterMBean(mbeanName);
+  }
+
+  public String getStorageInfo() {
+    return "Simulated FSDataset-" + storageId;
+  }
+}

+ 254 - 0
src/test/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java

@@ -0,0 +1,254 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.Util;
+import org.apache.hadoop.hdfs.server.datanode.BlockTransferThrottler;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.AccessToken;
+/**
+ * This class tests if block replacement request to data nodes work correctly.
+ */
+public class TestBlockReplacement extends TestCase {
+  private static final Log LOG = LogFactory.getLog(
+  "org.apache.hadoop.hdfs.TestBlockReplacement");
+
+  MiniDFSCluster cluster;
+  public void testThrottler() throws IOException {
+    Configuration conf = new Configuration();
+    FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
+    long bandwidthPerSec = 1024*1024L;
+    final long TOTAL_BYTES =6*bandwidthPerSec; 
+    long bytesToSend = TOTAL_BYTES; 
+    long start = Util.now();
+    BlockTransferThrottler throttler = new BlockTransferThrottler(bandwidthPerSec);
+    long totalBytes = 0L;
+    long bytesSent = 1024*512L; // 0.5MB
+    throttler.throttle(bytesSent);
+    bytesToSend -= bytesSent;
+    bytesSent = 1024*768L; // 0.75MB
+    throttler.throttle(bytesSent);
+    bytesToSend -= bytesSent;
+    try {
+      Thread.sleep(1000);
+    } catch (InterruptedException ignored) {}
+    throttler.throttle(bytesToSend);
+    long end = Util.now();
+    assertTrue(totalBytes*1000/(end-start)<=bandwidthPerSec);
+  }
+  
+  public void testBlockReplacement() throws IOException {
+    final Configuration CONF = new Configuration();
+    final String[] INITIAL_RACKS = {"/RACK0", "/RACK1", "/RACK2"};
+    final String[] NEW_RACKS = {"/RACK2"};
+
+    final short REPLICATION_FACTOR = (short)3;
+    final int DEFAULT_BLOCK_SIZE = 1024;
+    final Random r = new Random();
+    
+    CONF.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+    CONF.setInt("io.bytes.per.checksum", DEFAULT_BLOCK_SIZE/2);
+    CONF.setLong("dfs.blockreport.intervalMsec",500);
+    cluster = new MiniDFSCluster(
+          CONF, REPLICATION_FACTOR, true, INITIAL_RACKS );
+    try {
+      cluster.waitActive();
+      
+      FileSystem fs = cluster.getFileSystem();
+      Path fileName = new Path("/tmp.txt");
+      
+      // create a file with one block
+      DFSTestUtil.createFile(fs, fileName,
+          DEFAULT_BLOCK_SIZE, REPLICATION_FACTOR, r.nextLong());
+      DFSTestUtil.waitReplication(fs,fileName, REPLICATION_FACTOR);
+      
+      // get all datanodes
+      InetSocketAddress addr = new InetSocketAddress("localhost",
+          cluster.getNameNodePort());
+      DFSClient client = new DFSClient(addr, CONF);
+      List<LocatedBlock> locatedBlocks = client.namenode.
+        getBlockLocations("/tmp.txt", 0, DEFAULT_BLOCK_SIZE).getLocatedBlocks();
+      assertEquals(1, locatedBlocks.size());
+      LocatedBlock block = locatedBlocks.get(0);
+      DatanodeInfo[]  oldNodes = block.getLocations();
+      assertEquals(oldNodes.length, 3);
+      Block b = block.getBlock();
+      
+      // add a new datanode to the cluster
+      cluster.startDataNodes(CONF, 1, true, null, NEW_RACKS);
+      cluster.waitActive();
+      
+      DatanodeInfo[] datanodes = client.datanodeReport(DatanodeReportType.ALL);
+
+      // find out the new node
+      DatanodeInfo newNode=null;
+      for(DatanodeInfo node:datanodes) {
+        Boolean isNewNode = true;
+        for(DatanodeInfo oldNode:oldNodes) {
+          if(node.equals(oldNode)) {
+            isNewNode = false;
+            break;
+          }
+        }
+        if(isNewNode) {
+          newNode = node;
+          break;
+        }
+      }
+      
+      assertTrue(newNode!=null);
+      DatanodeInfo source=null;
+      ArrayList<DatanodeInfo> proxies = new ArrayList<DatanodeInfo>(2);
+      for(DatanodeInfo node:datanodes) {
+        if(node != newNode) {
+          if( node.getNetworkLocation().equals(newNode.getNetworkLocation())) {
+            source = node;
+          } else {
+            proxies.add( node );
+          }
+        }
+      }
+      assertTrue(source!=null && proxies.size()==2);
+      
+      // start to replace the block
+      // case 1: proxySource does not contain the block
+      LOG.info("Testcase 1: Proxy " + newNode.getName() 
+          + " does not contain the block " + b.getBlockName() );
+      assertFalse(replaceBlock(b, source, newNode, proxies.get(0)));
+      // case 2: destination contains the block
+      LOG.info("Testcase 2: Destination " + proxies.get(1).getName() 
+          + " contains the block " + b.getBlockName() );
+      assertFalse(replaceBlock(b, source, proxies.get(0), proxies.get(1)));
+      // case 3: correct case
+      LOG.info("Testcase 3: Proxy=" + source.getName() + " source=" + 
+          proxies.get(0).getName() + " destination=" + newNode.getName() );
+      assertTrue(replaceBlock(b, source, proxies.get(0), newNode));
+      // block locations should contain two proxies and newNode
+      checkBlocks(new DatanodeInfo[]{newNode, proxies.get(0), proxies.get(1)},
+          fileName.toString(), 
+          DEFAULT_BLOCK_SIZE, REPLICATION_FACTOR, client);
+      // case 4: proxies.get(0) is not a valid del hint
+      LOG.info("Testcase 4: invalid del hint " + proxies.get(0).getName() );
+      assertTrue(replaceBlock(b, proxies.get(1), proxies.get(0), source));
+      /* block locations should contain two proxies,
+       * and either of source or newNode
+       */
+      checkBlocks(proxies.toArray(new DatanodeInfo[proxies.size()]), 
+          fileName.toString(), 
+          DEFAULT_BLOCK_SIZE, REPLICATION_FACTOR, client);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+  
+  /* check if file's blocks exist at includeNodes */
+  private void checkBlocks(DatanodeInfo[] includeNodes, String fileName, 
+      long fileLen, short replFactor, DFSClient client) throws IOException {
+    Boolean notDone;
+    do {
+      try {
+        Thread.sleep(100);
+      } catch(InterruptedException e) {
+      }
+      List<LocatedBlock> blocks = client.namenode.
+      getBlockLocations(fileName, 0, fileLen).getLocatedBlocks();
+      assertEquals(1, blocks.size());
+      DatanodeInfo[] nodes = blocks.get(0).getLocations();
+      notDone = (nodes.length != replFactor);
+      if (notDone) {
+        LOG.info("Expected replication factor is " + replFactor +
+            " but the real replication factor is " + nodes.length );
+      } else {
+        List<DatanodeInfo> nodeLocations = Arrays.asList(nodes);
+        for (DatanodeInfo node : includeNodes) {
+          if (!nodeLocations.contains(node) ) {
+            notDone=true; 
+            LOG.info("Block is not located at " + node.getName() );
+            break;
+          }
+        }
+      }
+    } while(notDone);
+  }
+
+  /* Copy a block from sourceProxy to destination. If the block becomes
+   * over-replicated, preferably remove it from source.
+   * 
+   * Return true if a block is successfully copied; otherwise false.
+   */
+  private boolean replaceBlock( Block block, DatanodeInfo source,
+      DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
+    Socket sock = new Socket();
+    sock.connect(NetUtils.createSocketAddr(
+        destination.getName()), HdfsConstants.READ_TIMEOUT);
+    sock.setKeepAlive(true);
+    // sendRequest
+    DataOutputStream out = new DataOutputStream(sock.getOutputStream());
+    out.writeShort(DataTransferProtocol.DATA_TRANSFER_VERSION);
+    out.writeByte(DataTransferProtocol.OP_REPLACE_BLOCK);
+    out.writeLong(block.getBlockId());
+    out.writeLong(block.getGenerationStamp());
+    Text.writeString(out, source.getStorageID());
+    sourceProxy.write(out);
+    AccessToken.DUMMY_TOKEN.write(out);
+    out.flush();
+    // receiveResponse
+    DataInputStream reply = new DataInputStream(sock.getInputStream());
+
+    short status = reply.readShort();
+    if(status == DataTransferProtocol.OP_STATUS_SUCCESS) {
+      return true;
+    }
+    return false;
+  }
+
+  /**
+   * @param args
+   */
+  public static void main(String[] args) throws Exception {
+    (new TestBlockReplacement()).testBlockReplacement();
+  }
+
+}

+ 50 - 0
src/test/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java

@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import java.util.List;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
+import org.apache.hadoop.conf.Configuration;
+import junit.framework.TestCase;
+
+public class TestDataNodeMetrics extends TestCase {
+  
+  public void testDataNodeMetrics() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    try {
+      FileSystem fs = cluster.getFileSystem();
+      final long LONG_FILE_LEN = Integer.MAX_VALUE+1L; 
+      DFSTestUtil.createFile(fs, new Path("/tmp.txt"),
+          LONG_FILE_LEN, (short)1, 1L);
+      List<DataNode> datanodes = cluster.getDataNodes();
+      assertEquals(datanodes.size(), 1);
+      DataNode datanode = datanodes.get(0);
+      DataNodeMetrics metrics = datanode.getMetrics();
+      assertEquals(LONG_FILE_LEN, metrics.bytesWritten.getCurrentIntervalValue());
+    } finally {
+      if (cluster != null) {cluster.shutdown();}
+    }
+  }
+}

+ 361 - 0
src/test/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java

@@ -0,0 +1,361 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.nio.channels.FileChannel;
+import java.util.Random;
+import java.util.Map.Entry;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
+
+import junit.framework.TestCase;
+
+/**
+ * Tests {@link DirectoryScanner} handling of differences
+ * between blocks on the disk and block in memory.
+ */
+public class TestDirectoryScanner extends TestCase {
+  private static final Log LOG = LogFactory.getLog(TestDirectoryScanner.class);
+  private static final Configuration CONF = new Configuration();
+  private static final int DEFAULT_GEN_STAMP = 9999;
+
+  private MiniDFSCluster cluster;
+  private FSDataset fds = null;
+  private DirectoryScanner scanner = null;
+  private Random rand = new Random();
+  private Random r = new Random();
+
+  static {
+    CONF.setLong("dfs.block.size", 100);
+    CONF.setInt("io.bytes.per.checksum", 1);
+    CONF.setLong("dfs.heartbeat.interval", 1L);
+  }
+
+  /** create a file with a length of <code>fileLen</code> */
+  private void createFile(String fileName, long fileLen) throws IOException {
+    FileSystem fs = cluster.getFileSystem();
+    Path filePath = new Path(fileName);
+    DFSTestUtil.createFile(fs, filePath, fileLen, (short) 1, r.nextLong());
+  }
+
+  /** Truncate a block file */
+  private long truncateBlockFile() throws IOException {
+    synchronized (fds) {
+      for (Entry<Block, DatanodeBlockInfo> entry : fds.volumeMap.entrySet()) {
+        Block b = entry.getKey();
+        File f = entry.getValue().getFile();
+        File mf = FSDataset.getMetaFile(f, b);
+        // Truncate a block file that has a corresponding metadata file
+        if (f.exists() && f.length() != 0 && mf.exists()) {
+          FileOutputStream s = new FileOutputStream(f);
+          FileChannel channel = s.getChannel();
+          channel.truncate(0);
+          LOG.info("Truncated block file " + f.getAbsolutePath());
+          return entry.getKey().getBlockId();
+        }
+      }
+    }
+    return 0;
+  }
+
+  /** Delete a block file */
+  private long deleteBlockFile() {
+    synchronized(fds) {
+      for (Entry<Block, DatanodeBlockInfo> entry : fds.volumeMap.entrySet()) {
+        Block b = entry.getKey();
+        File f = entry.getValue().getFile();
+        File mf = FSDataset.getMetaFile(f, b);
+        // Delete a block file that has corresponding metadata file
+        if (f.exists() && mf.exists() && f.delete()) {
+          LOG.info("Deleting block file " + f.getAbsolutePath());
+          return entry.getKey().getBlockId();
+        }
+      }
+    }
+    return 0;
+  }
+
+  /** Delete block meta file */
+  private long deleteMetaFile() {
+    synchronized(fds) {
+      for (Entry<Block, DatanodeBlockInfo> entry : fds.volumeMap.entrySet()) {
+        Block b = entry.getKey();
+        String blkfile = entry.getValue().getFile().getAbsolutePath();
+        long genStamp = b.getGenerationStamp();
+        String metafile = FSDataset.getMetaFileName(blkfile, genStamp);
+        File file = new File(metafile);
+        // Delete a metadata file
+        if (file.exists() && file.delete()) {
+          LOG.info("Deleting metadata file " + file.getAbsolutePath());
+          return entry.getKey().getBlockId();
+        }
+      }
+    }
+    return 0;
+  }
+
+  /** Get a random blockId that is not used already */
+  private long getFreeBlockId() {
+    long id = rand.nextLong();
+    while (true) {
+      id = rand.nextLong();
+      Block b = new Block(id);
+      DatanodeBlockInfo info = null;
+      synchronized(fds) {
+        info = fds.volumeMap.get(b);
+      }
+      if (info == null) {
+        break;
+      }
+    }
+    return id;
+  }
+
+  private String getBlockFile(long id) {
+    return Block.BLOCK_FILE_PREFIX + id;
+  }
+
+  private String getMetaFile(long id) {
+    return Block.BLOCK_FILE_PREFIX + id + "_" + DEFAULT_GEN_STAMP
+        + Block.METADATA_EXTENSION;
+  }
+
+  /** Create a block file in a random volume*/
+  private long createBlockFile() throws IOException {
+    FSVolume[] volumes = fds.volumes.volumes;
+    int index = rand.nextInt(volumes.length - 1);
+    long id = getFreeBlockId();
+    File file = new File(volumes[index].getDir().getPath(), getBlockFile(id));
+    if (file.createNewFile()) {
+      LOG.info("Created block file " + file.getName());
+    }
+    return id;
+  }
+
+  /** Create a metafile in a random volume*/
+  private long createMetaFile() throws IOException {
+    FSVolume[] volumes = fds.volumes.volumes;
+    int index = rand.nextInt(volumes.length - 1);
+    long id = getFreeBlockId();
+    File file = new File(volumes[index].getDir().getPath(), getMetaFile(id));
+    if (file.createNewFile()) {
+      LOG.info("Created metafile " + file.getName());
+    }
+    return id;
+  }
+
+  /** Create block file and corresponding metafile in a rondom volume */
+  private long createBlockMetaFile() throws IOException {
+    FSVolume[] volumes = fds.volumes.volumes;
+    int index = rand.nextInt(volumes.length - 1);
+    long id = getFreeBlockId();
+    File file = new File(volumes[index].getDir().getPath(), getBlockFile(id));
+    if (file.createNewFile()) {
+      LOG.info("Created block file " + file.getName());
+
+      // Create files with same prefix as block file but extension names
+      // such that during sorting, these files appear around meta file
+      // to test how DirectoryScanner handles extraneous files
+      String name1 = file.getAbsolutePath() + ".l";
+      String name2 = file.getAbsolutePath() + ".n";
+      file = new File(name1);
+      if (file.createNewFile()) {
+        LOG.info("Created extraneous file " + name1);
+      }
+
+      file = new File(name2);
+      if (file.createNewFile()) {
+        LOG.info("Created extraneous file " + name2);
+      }
+
+      file = new File(volumes[index].getDir().getPath(), getMetaFile(id));
+      if (file.createNewFile()) {
+        LOG.info("Created metafile " + file.getName());
+      }
+    }
+    return id;
+  }
+
+  private void scan(long totalBlocks, int diffsize, long missingMetaFile, long missingBlockFile,
+      long missingMemoryBlocks, long mismatchBlocks) {
+    scanner.reconcile();
+    assertEquals(totalBlocks, scanner.totalBlocks);
+    assertEquals(diffsize, scanner.diff.size());
+    assertEquals(missingMetaFile, scanner.missingMetaFile);
+    assertEquals(missingBlockFile, scanner.missingBlockFile);
+    assertEquals(missingMemoryBlocks, scanner.missingMemoryBlocks);
+    assertEquals(mismatchBlocks, scanner.mismatchBlocks);
+  }
+
+  public void test() throws Exception {
+    cluster = new MiniDFSCluster(CONF, 1, true, null);
+    try {
+      cluster.waitActive();
+      fds = (FSDataset) cluster.getDataNodes().get(0).getFSDataset();
+      scanner = new DirectoryScanner(fds, CONF);
+
+      // Add files with 100 blocks
+      createFile("/tmp/t1", 10000);
+      long totalBlocks = 100;
+
+      // Test1: No difference between in-memory and disk
+      scan(100, 0, 0, 0, 0, 0);
+
+      // Test2: block metafile is missing
+      long blockId = deleteMetaFile();
+      scan(totalBlocks, 1, 1, 0, 0, 1);
+      verifyGenStamp(blockId, Block.GRANDFATHER_GENERATION_STAMP);
+      scan(totalBlocks, 0, 0, 0, 0, 0);
+
+      // Test3: block file is missing
+      blockId = deleteBlockFile();
+      scan(totalBlocks, 1, 0, 1, 0, 0);
+      totalBlocks--;
+      verifyDeletion(blockId);
+      scan(totalBlocks, 0, 0, 0, 0, 0);
+
+      // Test4: A block file exists for which there is no metafile and
+      // a block in memory
+      blockId = createBlockFile();
+      totalBlocks++;
+      scan(totalBlocks, 1, 1, 0, 1, 0);
+      verifyAddition(blockId, Block.GRANDFATHER_GENERATION_STAMP, 0);
+      scan(totalBlocks, 0, 0, 0, 0, 0);
+
+      // Test5: A metafile exists for which there is no block file and
+      // a block in memory
+      blockId = createMetaFile();
+      scan(totalBlocks+1, 1, 0, 1, 1, 0);
+      File metafile = new File(getMetaFile(blockId));
+      assertTrue(!metafile.exists());
+      scan(totalBlocks, 0, 0, 0, 0, 0);
+
+      // Test6: A block file and metafile exists for which there is no block in
+      // memory
+      blockId = createBlockMetaFile();
+      totalBlocks++;
+      scan(totalBlocks, 1, 0, 0, 1, 0);
+      verifyAddition(blockId, DEFAULT_GEN_STAMP, 0);
+      scan(totalBlocks, 0, 0, 0, 0, 0);
+
+      // Test7: Delete bunch of metafiles
+      for (int i = 0; i < 10; i++) {
+        blockId = deleteMetaFile();
+      }
+      scan(totalBlocks, 10, 10, 0, 0, 10);
+      scan(totalBlocks, 0, 0, 0, 0, 0);
+
+      // Test8: Delete bunch of block files
+      for (int i = 0; i < 10; i++) {
+        blockId = deleteBlockFile();
+      }
+      scan(totalBlocks, 10, 0, 10, 0, 0);
+      totalBlocks -= 10;
+      scan(totalBlocks, 0, 0, 0, 0, 0);
+
+      // Test9: create a bunch of blocks files
+      for (int i = 0; i < 10 ; i++) {
+        blockId = createBlockFile();
+      }
+      totalBlocks += 10;
+      scan(totalBlocks, 10, 10, 0, 10, 0);
+      scan(totalBlocks, 0, 0, 0, 0, 0);
+
+      // Test10: create a bunch of metafiles
+      for (int i = 0; i < 10 ; i++) {
+        blockId = createMetaFile();
+      }
+      scan(totalBlocks+10, 10, 0, 10, 10, 0);
+      scan(totalBlocks, 0, 0, 0, 0, 0);
+
+      // Test11: create a bunch block files and meta files
+      for (int i = 0; i < 10 ; i++) {
+        blockId = createBlockMetaFile();
+      }
+      totalBlocks += 10;
+      scan(totalBlocks, 10, 0, 0, 10, 0);
+      scan(totalBlocks, 0, 0, 0, 0, 0);
+
+      // Test12: truncate block files to test block length mismatch
+      for (int i = 0; i < 10 ; i++) {
+        truncateBlockFile();
+      }
+      scan(totalBlocks, 10, 0, 0, 0, 10);
+      scan(totalBlocks, 0, 0, 0, 0, 0);
+
+      // Test13: all the conditions combined
+      createMetaFile();
+      createBlockFile();
+      createBlockMetaFile();
+      deleteMetaFile();
+      deleteBlockFile();
+      truncateBlockFile();
+      scan(totalBlocks+3, 6, 2, 2, 3, 2);
+      scan(totalBlocks+1, 0, 0, 0, 0, 0);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+
+  private void verifyAddition(long blockId, long genStamp, long size) {
+    Block memBlock = fds.getBlockKey(blockId);
+    assertNotNull(memBlock);
+    DatanodeBlockInfo blockInfo;
+    synchronized(fds) {
+      blockInfo = fds.volumeMap.get(memBlock);
+    }
+    assertNotNull(blockInfo);
+
+    // Added block has the same file as the one created by the test
+    File file = new File(getBlockFile(blockId));
+    assertEquals(file.getName(), blockInfo.getFile().getName());
+
+    // Generation stamp is same as that of created file
+    assertEquals(genStamp, memBlock.getGenerationStamp());
+
+    // File size matches
+    assertEquals(size, memBlock.getNumBytes());
+  }
+
+  private void verifyDeletion(long blockId) {
+    // Ensure block does not exist in memory
+    synchronized(fds) {
+      assertEquals(null, fds.volumeMap.get(new Block(blockId)));
+    }
+  }
+
+  private void verifyGenStamp(long blockId, long genStamp) {
+    Block memBlock;
+    synchronized(fds) {
+      memBlock = fds.getBlockKey(blockId);
+    }
+    assertNotNull(memBlock);
+    assertEquals(genStamp, memBlock.getGenerationStamp());
+  }
+}

+ 153 - 0
src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java

@@ -0,0 +1,153 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import java.io.DataOutputStream;
+import java.io.File;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.AccessToken;
+
+import junit.framework.TestCase;
+
+/** Test if a datanode can correctly handle errors during block read/write*/
+public class TestDiskError extends TestCase {
+  public void testShutdown() throws Exception {
+    if (System.getProperty("os.name").startsWith("Windows")) {
+      /**
+       * This test depends on OS not allowing file creations on a directory
+       * that does not have write permissions for the user. Apparently it is 
+       * not the case on Windows (at least under Cygwin), and possibly AIX.
+       * This is disabled on Windows.
+       */
+      return;
+    }
+    // bring up a cluster of 3
+    Configuration conf = new Configuration();
+    conf.setLong("dfs.block.size", 512L);
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
+    cluster.waitActive();
+    FileSystem fs = cluster.getFileSystem();
+    final int dnIndex = 0;
+    String dataDir = cluster.getDataDirectory();
+    File dir1 = new File(new File(dataDir, "data"+(2*dnIndex+1)), "tmp");
+    File dir2 = new File(new File(dataDir, "data"+(2*dnIndex+2)), "tmp");
+    try {
+      // make the data directory of the first datanode to be readonly
+      assertTrue(dir1.setReadOnly());
+      assertTrue(dir2.setReadOnly());
+
+      // create files and make sure that first datanode will be down
+      DataNode dn = cluster.getDataNodes().get(dnIndex);
+      for (int i=0; DataNode.isDatanodeUp(dn); i++) {
+        Path fileName = new Path("/test.txt"+i);
+        DFSTestUtil.createFile(fs, fileName, 1024, (short)2, 1L);
+        DFSTestUtil.waitReplication(fs, fileName, (short)2);
+        fs.delete(fileName, true);
+      }
+    } finally {
+      // restore its old permission
+      dir1.setWritable(true);
+      dir2.setWritable(true);
+      cluster.shutdown();
+    }
+  }
+  
+  public void testReplicationError() throws Exception {
+    // bring up a cluster of 1
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    cluster.waitActive();
+    FileSystem fs = cluster.getFileSystem();
+    
+    try {
+      // create a file of replication factor of 1
+      final Path fileName = new Path("/test.txt");
+      final int fileLen = 1;
+      DFSTestUtil.createFile(fs, fileName, 1, (short)1, 1L);
+      DFSTestUtil.waitReplication(fs, fileName, (short)1);
+
+      // get the block belonged to the created file
+      LocatedBlocks blocks = cluster.getNamesystem().getBlockLocations(
+          fileName.toString(), 0, (long)fileLen);
+      assertEquals(blocks.locatedBlockCount(), 1);
+      LocatedBlock block = blocks.get(0);
+      
+      // bring up a second datanode
+      cluster.startDataNodes(conf, 1, true, null, null);
+      cluster.waitActive();
+      final int sndNode = 1;
+      DataNode datanode = cluster.getDataNodes().get(sndNode);
+      
+      // replicate the block to the second datanode
+      InetSocketAddress target = datanode.getSelfAddr();
+      Socket s = new Socket(target.getAddress(), target.getPort());
+        //write the header.
+      DataOutputStream out = new DataOutputStream(
+          s.getOutputStream());
+
+      out.writeShort( DataTransferProtocol.DATA_TRANSFER_VERSION );
+      out.write( DataTransferProtocol.OP_WRITE_BLOCK );
+      out.writeLong( block.getBlock().getBlockId());
+      out.writeLong( block.getBlock().getGenerationStamp() );
+      out.writeInt(1);
+      out.writeBoolean( false );       // recovery flag
+      Text.writeString( out, "" );
+      out.writeBoolean(false); // Not sending src node information
+      out.writeInt(0);
+      AccessToken.DUMMY_TOKEN.write(out);
+      
+      // write check header
+      out.writeByte( 1 );
+      out.writeInt( 512 );
+
+      out.flush();
+
+      // close the connection before sending the content of the block
+      out.close();
+      
+      // the temporary block & meta files should be deleted
+      String dataDir = cluster.getDataDirectory();
+      File dir1 = new File(new File(dataDir, "data"+(2*sndNode+1)), "tmp");
+      File dir2 = new File(new File(dataDir, "data"+(2*sndNode+2)), "tmp");
+      while (dir1.listFiles().length != 0 || dir2.listFiles().length != 0) {
+        Thread.sleep(100);
+      }
+      
+      // then increase the file's replication factor
+      fs.setReplication(fileName, (short)2);
+      // replication should succeed
+      DFSTestUtil.waitReplication(fs, fileName, (short)1);
+      
+      // clean up the file
+      fs.delete(fileName, false);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+}

+ 114 - 0
src/test/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java

@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.*;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.datanode.DataBlockScanner;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo;
+import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
+
+/**
+ * This tests InterDataNodeProtocol for block handling. 
+ */
+public class TestInterDatanodeProtocol extends junit.framework.TestCase {
+  public static void checkMetaInfo(Block b, InterDatanodeProtocol idp,
+      DataBlockScanner scanner) throws IOException {
+    BlockMetaDataInfo metainfo = idp.getBlockMetaDataInfo(b);
+    assertEquals(b.getBlockId(), metainfo.getBlockId());
+    assertEquals(b.getNumBytes(), metainfo.getNumBytes());
+    if (scanner != null) {
+      assertEquals(scanner.getLastScanTime(b),
+          metainfo.getLastScanTime());
+    }
+  }
+
+  public static LocatedBlock getLastLocatedBlock(
+      ClientProtocol namenode, String src
+  ) throws IOException {
+    //get block info for the last block
+    LocatedBlocks locations = namenode.getBlockLocations(src, 0, Long.MAX_VALUE);
+    List<LocatedBlock> blocks = locations.getLocatedBlocks();
+    DataNode.LOG.info("blocks.size()=" + blocks.size());
+    assertTrue(blocks.size() > 0);
+
+    return blocks.get(blocks.size() - 1);
+  }
+
+  /**
+   * The following test first creates a file.
+   * It verifies the block information from a datanode.
+   * Then, it updates the block with new information and verifies again. 
+   */
+  public void testBlockMetaDataInfo() throws Exception {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = null;
+
+    try {
+      cluster = new MiniDFSCluster(conf, 3, true, null);
+      cluster.waitActive();
+
+      //create a file
+      DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
+      String filestr = "/foo";
+      Path filepath = new Path(filestr);
+      DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L);
+      assertTrue(dfs.getClient().exists(filestr));
+
+      //get block info
+      LocatedBlock locatedblock = getLastLocatedBlock(dfs.getClient().namenode, filestr);
+      DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
+      assertTrue(datanodeinfo.length > 0);
+
+      //connect to a data node
+      InterDatanodeProtocol idp = DataNode.createInterDataNodeProtocolProxy(
+          datanodeinfo[0], conf);
+      DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
+      assertTrue(datanode != null);
+      
+      //stop block scanner, so we could compare lastScanTime
+      datanode.blockScannerThread.interrupt();
+
+      //verify BlockMetaDataInfo
+      Block b = locatedblock.getBlock();
+      InterDatanodeProtocol.LOG.info("b=" + b + ", " + b.getClass());
+      checkMetaInfo(b, idp, datanode.blockScanner);
+
+      //verify updateBlock
+      Block newblock = new Block(
+          b.getBlockId(), b.getNumBytes()/2, b.getGenerationStamp()+1);
+      idp.updateBlock(b, newblock, false);
+      checkMetaInfo(newblock, idp, datanode.blockScanner);
+    }
+    finally {
+      if (cluster != null) {cluster.shutdown();}
+    }
+  }
+}

+ 294 - 0
src/test/org/apache/hadoop/hdfs/server/datanode/TestSimulatedFSDataset.java

@@ -0,0 +1,294 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import java.io.DataInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface;
+import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.util.DataChecksum;
+
+/**
+ * this class tests the methods of the  SimulatedFSDataset.
+ *
+ */
+
+public class TestSimulatedFSDataset extends TestCase {
+  
+  Configuration conf = null;
+  
+
+  
+  static final int NUMBLOCKS = 20;
+  static final int BLOCK_LENGTH_MULTIPLIER = 79;
+
+  protected void setUp() throws Exception {
+    super.setUp();
+      conf = new Configuration();
+      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+ 
+  }
+
+  protected void tearDown() throws Exception {
+    super.tearDown();
+  }
+  
+  long blockIdToLen(long blkid) {
+    return blkid*BLOCK_LENGTH_MULTIPLIER;
+  }
+  
+  int addSomeBlocks(FSDatasetInterface fsdataset, int startingBlockId) throws IOException {
+    int bytesAdded = 0;
+    for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
+      Block b = new Block(i, 0, 0); // we pass expected len as zero, - fsdataset should use the sizeof actual data written
+      OutputStream dataOut  = fsdataset.writeToBlock(b, false).dataOut;
+      assertEquals(0, fsdataset.getLength(b));
+      for (int j=1; j <= blockIdToLen(i); ++j) {
+        dataOut.write(j);
+        assertEquals(j, fsdataset.getLength(b)); // correct length even as we write
+        bytesAdded++;
+      }
+      dataOut.close();
+      b.setNumBytes(blockIdToLen(i));
+      fsdataset.finalizeBlock(b);
+      assertEquals(blockIdToLen(i), fsdataset.getLength(b));
+    }
+    return bytesAdded;  
+  }
+  int addSomeBlocks(FSDatasetInterface fsdataset ) throws IOException {
+    return addSomeBlocks(fsdataset, 1);
+  }
+
+  public void testGetMetaData() throws IOException {
+    FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
+    Block b = new Block(1, 5, 0);
+    try {
+      assertFalse(fsdataset.metaFileExists(b));
+      assertTrue("Expected an IO exception", false);
+    } catch (IOException e) {
+      // ok - as expected
+    }
+    addSomeBlocks(fsdataset); // Only need to add one but ....
+    b = new Block(1, 0, 0);
+    InputStream metaInput = fsdataset.getMetaDataInputStream(b);
+    DataInputStream metaDataInput = new DataInputStream(metaInput);
+    short version = metaDataInput.readShort();
+    assertEquals(FSDataset.METADATA_VERSION, version);
+    DataChecksum checksum = DataChecksum.newDataChecksum(metaDataInput);
+    assertEquals(DataChecksum.CHECKSUM_NULL, checksum.getChecksumType());
+    assertEquals(0, checksum.getChecksumSize());  
+  }
+
+
+  public void testStorageUsage() throws IOException {
+    FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
+    assertEquals(fsdataset.getDfsUsed(), 0);
+    assertEquals(fsdataset.getRemaining(), fsdataset.getCapacity());
+    int bytesAdded = addSomeBlocks(fsdataset);
+    assertEquals(bytesAdded, fsdataset.getDfsUsed());
+    assertEquals(fsdataset.getCapacity()-bytesAdded,  fsdataset.getRemaining());
+    
+  }
+
+
+
+  void  checkBlockDataAndSize(FSDatasetInterface fsdataset, 
+              Block b, long expectedLen) throws IOException { 
+    InputStream input = fsdataset.getBlockInputStream(b);
+    long lengthRead = 0;
+    int data;
+    while ((data = input.read()) != -1) {
+      assertEquals(SimulatedFSDataset.DEFAULT_DATABYTE, data);
+      lengthRead++;
+    }
+    assertEquals(expectedLen, lengthRead);
+  }
+  
+  public void testWriteRead() throws IOException {
+    FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
+    addSomeBlocks(fsdataset);
+    for (int i=1; i <= NUMBLOCKS; ++i) {
+      Block b = new Block(i, 0, 0);
+      assertTrue(fsdataset.isValidBlock(b));
+      assertEquals(blockIdToLen(i), fsdataset.getLength(b));
+      checkBlockDataAndSize(fsdataset, b, blockIdToLen(i));
+    }
+  }
+
+
+
+  public void testGetBlockReport() throws IOException {
+    FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
+    Block[] blockReport = fsdataset.getBlockReport();
+    assertEquals(0, blockReport.length);
+    int bytesAdded = addSomeBlocks(fsdataset);
+    blockReport = fsdataset.getBlockReport();
+    assertEquals(NUMBLOCKS, blockReport.length);
+    for (Block b: blockReport) {
+      assertNotNull(b);
+      assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
+    }
+  }
+  public void testInjectionEmpty() throws IOException {
+    FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
+    Block[] blockReport = fsdataset.getBlockReport();
+    assertEquals(0, blockReport.length);
+    int bytesAdded = addSomeBlocks(fsdataset);
+    blockReport = fsdataset.getBlockReport();
+    assertEquals(NUMBLOCKS, blockReport.length);
+    for (Block b: blockReport) {
+      assertNotNull(b);
+      assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
+    }
+    
+    // Inject blocks into an empty fsdataset
+    //  - injecting the blocks we got above.
+  
+   
+    SimulatedFSDataset sfsdataset = new SimulatedFSDataset(conf);
+    sfsdataset.injectBlocks(blockReport);
+    blockReport = sfsdataset.getBlockReport();
+    assertEquals(NUMBLOCKS, blockReport.length);
+    for (Block b: blockReport) {
+      assertNotNull(b);
+      assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
+      assertEquals(blockIdToLen(b.getBlockId()), sfsdataset.getLength(b));
+    }
+    assertEquals(bytesAdded, sfsdataset.getDfsUsed());
+    assertEquals(sfsdataset.getCapacity()-bytesAdded, sfsdataset.getRemaining());
+  }
+
+  public void testInjectionNonEmpty() throws IOException {
+    FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
+    
+    Block[] blockReport = fsdataset.getBlockReport();
+    assertEquals(0, blockReport.length);
+    int bytesAdded = addSomeBlocks(fsdataset);
+    blockReport = fsdataset.getBlockReport();
+    assertEquals(NUMBLOCKS, blockReport.length);
+    for (Block b: blockReport) {
+      assertNotNull(b);
+      assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
+    }
+    fsdataset = null;
+    
+    // Inject blocks into an non-empty fsdataset
+    //  - injecting the blocks we got above.
+  
+   
+    SimulatedFSDataset sfsdataset = new SimulatedFSDataset(conf);
+    // Add come blocks whose block ids do not conflict with
+    // the ones we are going to inject.
+    bytesAdded += addSomeBlocks(sfsdataset, NUMBLOCKS+1);
+    Block[] blockReport2 = sfsdataset.getBlockReport();
+    assertEquals(NUMBLOCKS, blockReport.length);
+    blockReport2 = sfsdataset.getBlockReport();
+    assertEquals(NUMBLOCKS, blockReport.length);
+    sfsdataset.injectBlocks(blockReport);
+    blockReport = sfsdataset.getBlockReport();
+    assertEquals(NUMBLOCKS*2, blockReport.length);
+    for (Block b: blockReport) {
+      assertNotNull(b);
+      assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
+      assertEquals(blockIdToLen(b.getBlockId()), sfsdataset.getLength(b));
+    }
+    assertEquals(bytesAdded, sfsdataset.getDfsUsed());
+    assertEquals(sfsdataset.getCapacity()-bytesAdded,  sfsdataset.getRemaining());
+    
+    
+    // Now test that the dataset cannot be created if it does not have sufficient cap
+
+    conf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY, 10);
+ 
+    try {
+      sfsdataset = new SimulatedFSDataset(conf);
+      sfsdataset.injectBlocks(blockReport);
+      assertTrue("Expected an IO exception", false);
+    } catch (IOException e) {
+      // ok - as expected
+    }
+
+  }
+
+  public void checkInvalidBlock(Block b) throws IOException {
+    FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
+    assertFalse(fsdataset.isValidBlock(b));
+    try {
+      fsdataset.getLength(b);
+      assertTrue("Expected an IO exception", false);
+    } catch (IOException e) {
+      // ok - as expected
+    }
+    
+    try {
+      fsdataset.getBlockInputStream(b);
+      assertTrue("Expected an IO exception", false);
+    } catch (IOException e) {
+      // ok - as expected
+    }
+    
+    try {
+      fsdataset.finalizeBlock(b);
+      assertTrue("Expected an IO exception", false);
+    } catch (IOException e) {
+      // ok - as expected
+    }
+    
+  }
+  
+  public void testInValidBlocks() throws IOException {
+    FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
+    Block b = new Block(1, 5, 0);
+    checkInvalidBlock(b);
+    
+    // Now check invlaid after adding some blocks
+    addSomeBlocks(fsdataset);
+    b = new Block(NUMBLOCKS + 99, 5, 0);
+    checkInvalidBlock(b);
+    
+  }
+
+  public void testInvalidate() throws IOException {
+    FSDatasetInterface fsdataset = new SimulatedFSDataset(conf); 
+    int bytesAdded = addSomeBlocks(fsdataset);
+    Block[] deleteBlocks = new Block[2];
+    deleteBlocks[0] = new Block(1, 0, 0);
+    deleteBlocks[1] = new Block(2, 0, 0);
+    fsdataset.invalidate(deleteBlocks);
+    checkInvalidBlock(deleteBlocks[0]);
+    checkInvalidBlock(deleteBlocks[1]);
+    long sizeDeleted = blockIdToLen(1) + blockIdToLen(2);
+    assertEquals(bytesAdded-sizeDeleted, fsdataset.getDfsUsed());
+    assertEquals(fsdataset.getCapacity()-bytesAdded+sizeDeleted,  fsdataset.getRemaining());
+    
+    
+    
+    // Now make sure the rest of the blocks are valid
+    for (int i=3; i <= NUMBLOCKS; ++i) {
+      Block b = new Block(i, 0, 0);
+      assertTrue(fsdataset.isValidBlock(b));
+    }
+  }
+
+}

+ 210 - 0
src/test/org/apache/hadoop/hdfs/server/namenode/CreateEditsLog.java

@@ -0,0 +1,210 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.common.GenerationStamp;
+import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.namenode.BlocksMap.BlockInfo;
+
+/**
+ * 
+ * CreateEditsLog
+ *   Synopsis: CreateEditsLog -f numFiles StartingBlockId numBlocksPerFile
+ *        [-r replicafactor] [-d editsLogDirectory]
+ *             Default replication factor is 1
+ *             Default edits log directory is /tmp/EditsLogOut
+ *   
+ *   Create a name node's edits log in /tmp/EditsLogOut.
+ *   The file /tmp/EditsLogOut/current/edits can be copied to a name node's
+ *   dfs.name.dir/current direcotry and the name node can be started as usual.
+ *   
+ *   The files are created in /createdViaInjectingInEditsLog
+ *   The file names contain the starting and ending blockIds; hence once can 
+ *   create multiple edits logs using this command using non overlapping 
+ *   block ids and feed the files to a single name node.
+ *   
+ *   See Also @link #DataNodeCluster for injecting a set of matching
+ *   blocks created with this command into a set of simulated data nodes.
+ *
+ */
+
+public class CreateEditsLog {
+  static final String BASE_PATH = "/createdViaInjectingInEditsLog";
+  static final String EDITS_DIR = "/tmp/EditsLogOut";
+  static String edits_dir = EDITS_DIR;
+  static final public long BLOCK_GENERATION_STAMP =
+    GenerationStamp.FIRST_VALID_STAMP;
+  
+  static void addFiles(FSEditLog editLog, int numFiles, short replication, 
+                         int blocksPerFile, long startingBlockId,
+                         FileNameGenerator nameGenerator) {
+    
+    PermissionStatus p = new PermissionStatus("joeDoe", "people",
+                                      new FsPermission((short)0777));
+    INodeDirectory dirInode = new INodeDirectory(p, 0L);
+    editLog.logMkDir(BASE_PATH, dirInode);
+    long blockSize = 10;
+    BlockInfo[] blocks = new BlockInfo[blocksPerFile];
+    for (int iB = 0; iB < blocksPerFile; ++iB) {
+      blocks[iB] = 
+       new BlockInfo(new Block(0, blockSize, BLOCK_GENERATION_STAMP),
+                               replication);
+    }
+    
+    long currentBlockId = startingBlockId;
+    long bidAtSync = startingBlockId;
+
+    for (int iF = 0; iF < numFiles; iF++) {
+      for (int iB = 0; iB < blocksPerFile; ++iB) {
+         blocks[iB].setBlockId(currentBlockId++);
+      }
+
+      try {
+
+        INodeFileUnderConstruction inode = new INodeFileUnderConstruction(
+                      null, replication, 0, blockSize, blocks, p, "", "", null);
+        // Append path to filename with information about blockIDs 
+        String path = "_" + iF + "_B" + blocks[0].getBlockId() + 
+                      "_to_B" + blocks[blocksPerFile-1].getBlockId() + "_";
+        String filePath = nameGenerator.getNextFileName("");
+        filePath = filePath + path;
+        // Log the new sub directory in edits
+        if ((iF % nameGenerator.getFilesPerDirectory())  == 0) {
+          String currentDir = nameGenerator.getCurrentDir();
+          dirInode = new INodeDirectory(p, 0L);
+          editLog.logMkDir(currentDir, dirInode);
+        }
+        editLog.logOpenFile(filePath, inode);
+        editLog.logCloseFile(filePath, inode);
+
+        if (currentBlockId - bidAtSync >= 2000) { // sync every 2K blocks
+          editLog.logSync();
+          bidAtSync = currentBlockId;
+        }
+      } catch (IOException e) {
+        System.out.println("Creating trascation for file " + iF +
+            " encountered exception " + e);
+      }
+    }
+    System.out.println("Created edits log in directory " + edits_dir);
+    System.out.println(" containing " +
+       numFiles + " File-Creates, each file with " + blocksPerFile + " blocks");
+    System.out.println(" blocks range: " + 
+        startingBlockId + " to " + (currentBlockId-1));
+  }
+  
+  static String usage = "Usage: createditlogs " +
+  " -f  numFiles startingBlockIds NumBlocksPerFile  [-r replicafactor] " + 
+  		"[-d editsLogDirectory]\n" + 
+  		"      Default replication factor is 1\n" +
+  		"      Default edits log direcory is " + EDITS_DIR + "\n";
+
+
+
+  static void printUsageExit() {
+    System.out.println(usage);
+    System.exit(-1); 
+    }
+    static void printUsageExit(String err) {
+    System.out.println(err);
+    printUsageExit();
+  }
+  /**
+   * @param args
+   * @throws IOException 
+   */
+  public static void main(String[] args) throws IOException {
+
+
+
+    long startingBlockId = 1;
+    int numFiles = 0;
+    short replication = 1;
+    int numBlocksPerFile = 0;
+
+    if (args.length == 0) {
+      printUsageExit();
+    }
+
+    for (int i = 0; i < args.length; i++) { // parse command line
+      if (args[i].equals("-h"))
+        printUsageExit();
+      if (args[i].equals("-f")) {
+       if (i + 3 >= args.length || args[i+1].startsWith("-") || 
+           args[i+2].startsWith("-") || args[i+3].startsWith("-")) {
+         printUsageExit(
+             "Missing num files, starting block and/or number of blocks");
+       }
+       numFiles = Integer.parseInt(args[++i]);
+       startingBlockId = Integer.parseInt(args[++i]);
+       numBlocksPerFile = Integer.parseInt(args[++i]);
+       if (numFiles <=0 || numBlocksPerFile <= 0) {
+         printUsageExit("numFiles and numBlocksPerFile most be greater than 0");
+       }
+      } else if (args[i].equals("-r") || args[i+1].startsWith("-")) {
+        if (i + 1 >= args.length) {
+          printUsageExit(
+              "Missing num files, starting block and/or number of blocks");
+        }
+        replication = Short.parseShort(args[++i]);
+      } else if (args[i].equals("-d")) {
+        if (i + 1 >= args.length || args[i+1].startsWith("-")) {
+          printUsageExit("Missing edits logs directory");
+        }
+        edits_dir = args[++i];
+      } else {
+        printUsageExit();
+      }
+    }
+    
+
+    File editsLogDir = new File(edits_dir);
+    File subStructureDir = new File(edits_dir + "/" + 
+        Storage.STORAGE_DIR_CURRENT);
+    if ( !editsLogDir.exists() ) {
+      if ( !editsLogDir.mkdir()) {
+        System.out.println("cannot create " + edits_dir);
+        System.exit(-1);
+      }
+    }
+    if ( !subStructureDir.exists() ) {
+      if ( !subStructureDir.mkdir()) {
+        System.out.println("cannot create subdirs of " + edits_dir);
+        System.exit(-1);
+      }
+    }
+  
+    FSImage fsImage = new FSImage(new File(edits_dir));
+    FileNameGenerator nameGenerator = new FileNameGenerator(BASE_PATH, 100);
+
+
+    FSEditLog editLog = fsImage.getEditLog();
+    editLog.createEditLogFile(fsImage.getFsEditName());
+    editLog.open();
+    addFiles(editLog, numFiles, replication, numBlocksPerFile, startingBlockId,
+             nameGenerator);
+    editLog.logSync();
+    editLog.close();
+  }
+}

+ 93 - 0
src/test/org/apache/hadoop/hdfs/server/namenode/FileNameGenerator.java

@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.util.Arrays;
+
+/**
+ * File name generator.
+ * 
+ * Each directory contains not more than a fixed number (filesPerDir) 
+ * of files and directories.
+ * When the number of files in one directory reaches the maximum,
+ * the generator creates a new directory and proceeds generating files in it.
+ * The generated namespace tree is balanced that is any path to a leaf
+ * file is not less than the height of the tree minus one.
+ */
+public class FileNameGenerator {
+  private static final int DEFAULT_FILES_PER_DIRECTORY = 32;
+  
+  private int[] pathIndecies = new int[20]; // this will support up to 32**20 = 2**100 = 10**30 files
+  private String baseDir;
+  private String currentDir;
+  private int filesPerDirectory;
+  private long fileCount;
+
+  FileNameGenerator(String baseDir) {
+    this(baseDir, DEFAULT_FILES_PER_DIRECTORY);
+  }
+  
+  FileNameGenerator(String baseDir, int filesPerDir) {
+    this.baseDir = baseDir;
+    this.filesPerDirectory = filesPerDir;
+    reset();
+  }
+
+  String getNextDirName(String prefix) {
+    int depth = 0;
+    while(pathIndecies[depth] >= 0)
+      depth++;
+    int level;
+    for(level = depth-1; 
+        level >= 0 && pathIndecies[level] == filesPerDirectory-1; level--)
+      pathIndecies[level] = 0;
+    if(level < 0)
+      pathIndecies[depth] = 0;
+    else
+      pathIndecies[level]++;
+    level = 0;
+    String next = baseDir;
+    while(pathIndecies[level] >= 0)
+      next = next + "/" + prefix + pathIndecies[level++];
+    return next; 
+  }
+
+  synchronized String getNextFileName(String fileNamePrefix) {
+    long fNum = fileCount % filesPerDirectory;
+    if(fNum == 0) {
+      currentDir = getNextDirName(fileNamePrefix + "Dir");
+    }
+    String fn = currentDir + "/" + fileNamePrefix + fileCount;
+    fileCount++;
+    return fn;
+  }
+
+  private synchronized void reset() {
+    Arrays.fill(pathIndecies, -1);
+    fileCount = 0L;
+    currentDir = "";
+  }
+
+  synchronized int getFilesPerDirectory() {
+    return filesPerDirectory;
+  }
+
+  synchronized String getCurrentDir() {
+    return currentDir;
+  }
+}

+ 1185 - 0
src/test/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java

@@ -0,0 +1,1185 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import javax.security.auth.login.LoginException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataStorage;
+import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.net.DNS;
+import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.security.UnixUserGroupInformation;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.log4j.Level;
+
+/**
+ * Main class for a series of name-node benchmarks.
+ * 
+ * Each benchmark measures throughput and average execution time 
+ * of a specific name-node operation, e.g. file creation or block reports.
+ * 
+ * The benchmark does not involve any other hadoop components
+ * except for the name-node. Each operation is executed
+ * by calling directly the respective name-node method.
+ * The name-node here is real all other components are simulated.
+ * 
+ * Command line arguments for the benchmark include:<br>
+ * 1) total number of operations to be performed,<br>
+ * 2) number of threads to run these operations,<br>
+ * 3) followed by operation specific input parameters.
+ * 
+ * Then the benchmark generates inputs for each thread so that the
+ * input generation overhead does not effect the resulting statistics.
+ * The number of operations performed by threads practically is the same. 
+ * Precisely, the difference between the number of operations 
+ * performed by any two threads does not exceed 1.
+ * 
+ * Then the benchmark executes the specified number of operations using 
+ * the specified number of threads and outputs the resulting stats.
+ */
+public class NNThroughputBenchmark {
+  private static final Log LOG = LogFactory.getLog(NNThroughputBenchmark.class);
+  private static final int BLOCK_SIZE = 16;
+
+  static Configuration config;
+  static NameNode nameNode;
+
+  private final UserGroupInformation ugi;
+
+  NNThroughputBenchmark(Configuration conf) throws IOException, LoginException {
+    config = conf;
+    ugi = UnixUserGroupInformation.login(config);
+    UserGroupInformation.setCurrentUser(ugi);
+
+    // We do not need many handlers, since each thread simulates a handler
+    // by calling name-node methods directly
+    config.setInt("dfs.namenode.handler.count", 1);
+    // set exclude file
+    config.set("dfs.hosts.exclude", "${hadoop.tmp.dir}/dfs/hosts/exclude");
+    File excludeFile = new File(config.get("dfs.hosts.exclude", "exclude"));
+    if(! excludeFile.exists()) {
+      if(!excludeFile.getParentFile().mkdirs())
+        throw new IOException("NNThroughputBenchmark: cannot mkdir " + excludeFile);
+    }
+    new FileOutputStream(excludeFile).close();
+    // Start the NameNode
+    String[] argv = new String[] {};
+    nameNode = NameNode.createNameNode(argv, config);
+  }
+
+  void close() throws IOException {
+    nameNode.stop();
+  }
+
+  static void turnOffNameNodeLogging() {
+    // change log level to ERROR: NameNode.LOG & NameNode.stateChangeLog
+    ((Log4JLogger)NameNode.LOG).getLogger().setLevel(Level.ERROR);
+    ((Log4JLogger)NameNode.stateChangeLog).getLogger().setLevel(Level.ERROR);
+    ((Log4JLogger)NetworkTopology.LOG).getLogger().setLevel(Level.ERROR);
+    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.ERROR);
+    ((Log4JLogger)FSNamesystem.auditLog).getLogger().setLevel(Level.ERROR);
+    ((Log4JLogger)LeaseManager.LOG).getLogger().setLevel(Level.ERROR);
+  }
+
+  /**
+   * Base class for collecting operation statistics.
+   * 
+   * Overload this class in order to run statistics for a 
+   * specific name-node operation.
+   */
+  abstract class OperationStatsBase {
+    protected static final String BASE_DIR_NAME = "/nnThroughputBenchmark";
+    protected static final String OP_ALL_NAME = "all";
+    protected static final String OP_ALL_USAGE = "-op all " +
+                                  "<other ops options> [-keepResults]";
+
+    protected String baseDir;
+    protected short replication;
+    protected int  numThreads = 0;        // number of threads
+    protected int  numOpsRequired = 0;    // number of operations requested
+    protected int  numOpsExecuted = 0;    // number of operations executed
+    protected long cumulativeTime = 0;    // sum of times for each op
+    protected long elapsedTime = 0;       // time from start to finish
+    protected boolean keepResults = false;// don't clean base directory on exit
+
+    protected List<StatsDaemon> daemons;
+
+    /**
+     * Operation name.
+     */
+    abstract String getOpName();
+
+    /**
+     * Parse command line arguments.
+     * 
+     * @param args arguments
+     * @throws IOException
+     */
+    abstract void parseArguments(List<String> args) throws IOException;
+
+    /**
+     * Generate inputs for each daemon thread.
+     * 
+     * @param opsPerThread number of inputs for each thread.
+     * @throws IOException
+     */
+    abstract void generateInputs(int[] opsPerThread) throws IOException;
+
+    /**
+     * This corresponds to the arg1 argument of 
+     * {@link #executeOp(int, int, String)}, which can have different meanings
+     * depending on the operation performed.
+     * 
+     * @param daemonId
+     * @return the argument
+     */
+    abstract String getExecutionArgument(int daemonId);
+
+    /**
+     * Execute name-node operation.
+     * 
+     * @param daemonId id of the daemon calling this method.
+     * @param inputIdx serial index of the operation called by the deamon.
+     * @param arg1 operation specific argument.
+     * @return time of the individual name-node call.
+     * @throws IOException
+     */
+    abstract long executeOp(int daemonId, int inputIdx, String arg1) throws IOException;
+
+    /**
+     * Print the results of the benchmarking.
+     */
+    abstract void printResults();
+
+    OperationStatsBase() {
+      baseDir = BASE_DIR_NAME + "/" + getOpName();
+      replication = (short) config.getInt("dfs.replication", 3);
+      numOpsRequired = 10;
+      numThreads = 3;
+    }
+
+    void benchmark() throws IOException {
+      daemons = new ArrayList<StatsDaemon>();
+      long start = 0;
+      try {
+        numOpsExecuted = 0;
+        cumulativeTime = 0;
+        if(numThreads < 1)
+          return;
+        int tIdx = 0; // thread index < nrThreads
+        int opsPerThread[] = new int[numThreads];
+        for(int opsScheduled = 0; opsScheduled < numOpsRequired; 
+                                  opsScheduled += opsPerThread[tIdx++]) {
+          // execute  in a separate thread
+          opsPerThread[tIdx] = (numOpsRequired-opsScheduled)/(numThreads-tIdx);
+          if(opsPerThread[tIdx] == 0)
+            opsPerThread[tIdx] = 1;
+        }
+        // if numThreads > numOpsRequired then the remaining threads will do nothing
+        for(; tIdx < numThreads; tIdx++)
+          opsPerThread[tIdx] = 0;
+        turnOffNameNodeLogging();
+        generateInputs(opsPerThread);
+        for(tIdx=0; tIdx < numThreads; tIdx++)
+          daemons.add(new StatsDaemon(tIdx, opsPerThread[tIdx], this));
+        start = System.currentTimeMillis();
+        LOG.info("Starting " + numOpsRequired + " " + getOpName() + "(s).");
+        for(StatsDaemon d : daemons)
+          d.start();
+      } finally {
+        while(isInPorgress()) {
+          // try {Thread.sleep(500);} catch (InterruptedException e) {}
+        }
+        elapsedTime = System.currentTimeMillis() - start;
+        for(StatsDaemon d : daemons) {
+          incrementStats(d.localNumOpsExecuted, d.localCumulativeTime);
+          // System.out.println(d.toString() + ": ops Exec = " + d.localNumOpsExecuted);
+        }
+      }
+    }
+
+    private boolean isInPorgress() {
+      for(StatsDaemon d : daemons)
+        if(d.isInProgress())
+          return true;
+      return false;
+    }
+
+    void cleanUp() throws IOException {
+      nameNode.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_LEAVE);
+      if(!keepResults)
+        nameNode.delete(getBaseDir(), true);
+    }
+
+    int getNumOpsExecuted() {
+      return numOpsExecuted;
+    }
+
+    long getCumulativeTime() {
+      return cumulativeTime;
+    }
+
+    long getElapsedTime() {
+      return elapsedTime;
+    }
+
+    long getAverageTime() {
+      return numOpsExecuted == 0 ? 0 : cumulativeTime / numOpsExecuted;
+    }
+
+    double getOpsPerSecond() {
+      return elapsedTime == 0 ? 0 : 1000*(double)numOpsExecuted / elapsedTime;
+    }
+
+    String getBaseDir() {
+      return baseDir;
+    }
+
+    String getClientName(int idx) {
+      return getOpName() + "-client-" + idx;
+    }
+
+    void incrementStats(int ops, long time) {
+      numOpsExecuted += ops;
+      cumulativeTime += time;
+    }
+
+    /**
+     * Parse first 2 arguments, corresponding to the "-op" option.
+     * 
+     * @param args
+     * @return true if operation is all, which means that options not related
+     * to this operation should be ignored, or false otherwise, meaning
+     * that usage should be printed when an unrelated option is encountered.
+     * @throws IOException
+     */
+    protected boolean verifyOpArgument(List<String> args) {
+      if(args.size() < 2 || ! args.get(0).startsWith("-op"))
+        printUsage();
+      int krIndex = args.indexOf("-keepResults");
+      keepResults = (krIndex >= 0);
+      if(keepResults) {
+        args.remove(krIndex);
+      }
+      String type = args.get(1);
+      if(OP_ALL_NAME.equals(type)) {
+        type = getOpName();
+        return true;
+      }
+      if(!getOpName().equals(type))
+        printUsage();
+      return false;
+    }
+
+    void printStats() {
+      LOG.info("--- " + getOpName() + " stats  ---");
+      LOG.info("# operations: " + getNumOpsExecuted());
+      LOG.info("Elapsed Time: " + getElapsedTime());
+      LOG.info(" Ops per sec: " + getOpsPerSecond());
+      LOG.info("Average Time: " + getAverageTime());
+    }
+  }
+
+  /**
+   * One of the threads that perform stats operations.
+   */
+  private class StatsDaemon extends Thread {
+    private int daemonId;
+    private int opsPerThread;
+    private String arg1;      // argument passed to executeOp()
+    private volatile int  localNumOpsExecuted = 0;
+    private volatile long localCumulativeTime = 0;
+    private OperationStatsBase statsOp;
+
+    StatsDaemon(int daemonId, int nrOps, OperationStatsBase op) {
+      this.daemonId = daemonId;
+      this.opsPerThread = nrOps;
+      this.statsOp = op;
+      setName(toString());
+    }
+
+    public void run() {
+      UserGroupInformation.setCurrentUser(ugi);
+      localNumOpsExecuted = 0;
+      localCumulativeTime = 0;
+      arg1 = statsOp.getExecutionArgument(daemonId);
+      try {
+        benchmarkOne();
+      } catch(IOException ex) {
+        LOG.error("StatsDaemon " + daemonId + " failed: \n" 
+            + StringUtils.stringifyException(ex));
+      }
+    }
+
+    public String toString() {
+      return "StatsDaemon-" + daemonId;
+    }
+
+    void benchmarkOne() throws IOException {
+      for(int idx = 0; idx < opsPerThread; idx++) {
+        long stat = statsOp.executeOp(daemonId, idx, arg1);
+        localNumOpsExecuted++;
+        localCumulativeTime += stat;
+      }
+    }
+
+    boolean isInProgress() {
+      return localNumOpsExecuted < opsPerThread;
+    }
+
+    /**
+     * Schedule to stop this daemon.
+     */
+    void terminate() {
+      opsPerThread = localNumOpsExecuted;
+    }
+  }
+
+  /**
+   * Clean all benchmark result directories.
+   */
+  class CleanAllStats extends OperationStatsBase {
+    // Operation types
+    static final String OP_CLEAN_NAME = "clean";
+    static final String OP_CLEAN_USAGE = "-op clean";
+
+    CleanAllStats(List<String> args) {
+      super();
+      parseArguments(args);
+      numOpsRequired = 1;
+      numThreads = 1;
+      keepResults = true;
+    }
+
+    String getOpName() {
+      return OP_CLEAN_NAME;
+    }
+
+    void parseArguments(List<String> args) {
+      boolean ignoreUnrelatedOptions = verifyOpArgument(args);
+      if(args.size() > 2 && !ignoreUnrelatedOptions)
+        printUsage();
+    }
+
+    void generateInputs(int[] opsPerThread) throws IOException {
+      // do nothing
+    }
+
+    /**
+     * Does not require the argument
+     */
+    String getExecutionArgument(int daemonId) {
+      return null;
+    }
+
+    /**
+     * Remove entire benchmark directory.
+     */
+    long executeOp(int daemonId, int inputIdx, String ignore) 
+    throws IOException {
+      nameNode.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_LEAVE);
+      long start = System.currentTimeMillis();
+      nameNode.delete(BASE_DIR_NAME, true);
+      long end = System.currentTimeMillis();
+      return end-start;
+    }
+
+    void printResults() {
+      LOG.info("--- " + getOpName() + " inputs ---");
+      LOG.info("Remove directory " + BASE_DIR_NAME);
+      printStats();
+    }
+  }
+
+  /**
+   * File creation statistics.
+   * 
+   * Each thread creates the same (+ or -1) number of files.
+   * File names are pre-generated during initialization.
+   * The created files do not have blocks.
+   */
+  class CreateFileStats extends OperationStatsBase {
+    // Operation types
+    static final String OP_CREATE_NAME = "create";
+    static final String OP_CREATE_USAGE = 
+      "-op create [-threads T] [-files N] [-filesPerDir P] [-close]";
+
+    protected FileNameGenerator nameGenerator;
+    protected String[][] fileNames;
+    private boolean closeUponCreate;
+
+    CreateFileStats(List<String> args) {
+      super();
+      parseArguments(args);
+    }
+
+    String getOpName() {
+      return OP_CREATE_NAME;
+    }
+
+    void parseArguments(List<String> args) {
+      boolean ignoreUnrelatedOptions = verifyOpArgument(args);
+      int nrFilesPerDir = 4;
+      closeUponCreate = false;
+      for (int i = 2; i < args.size(); i++) {       // parse command line
+        if(args.get(i).equals("-files")) {
+          if(i+1 == args.size())  printUsage();
+          numOpsRequired = Integer.parseInt(args.get(++i));
+        } else if(args.get(i).equals("-threads")) {
+          if(i+1 == args.size())  printUsage();
+          numThreads = Integer.parseInt(args.get(++i));
+        } else if(args.get(i).equals("-filesPerDir")) {
+          if(i+1 == args.size())  printUsage();
+          nrFilesPerDir = Integer.parseInt(args.get(++i));
+        } else if(args.get(i).equals("-close")) {
+          closeUponCreate = true;
+        } else if(!ignoreUnrelatedOptions)
+          printUsage();
+      }
+      nameGenerator = new FileNameGenerator(getBaseDir(), nrFilesPerDir);
+    }
+
+    void generateInputs(int[] opsPerThread) throws IOException {
+      assert opsPerThread.length == numThreads : "Error opsPerThread.length"; 
+      nameNode.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_LEAVE);
+      // int generatedFileIdx = 0;
+      LOG.info("Generate " + numOpsRequired + " intputs for " + getOpName());
+      fileNames = new String[numThreads][];
+      for(int idx=0; idx < numThreads; idx++) {
+        int threadOps = opsPerThread[idx];
+        fileNames[idx] = new String[threadOps];
+        for(int jdx=0; jdx < threadOps; jdx++)
+          fileNames[idx][jdx] = nameGenerator.
+                                  getNextFileName("ThroughputBench");
+      }
+    }
+
+    void dummyActionNoSynch(int daemonId, int fileIdx) {
+      for(int i=0; i < 2000; i++)
+        fileNames[daemonId][fileIdx].contains(""+i);
+    }
+
+    /**
+     * returns client name
+     */
+    String getExecutionArgument(int daemonId) {
+      return getClientName(daemonId);
+    }
+
+    /**
+     * Do file create.
+     */
+    long executeOp(int daemonId, int inputIdx, String clientName) 
+    throws IOException {
+      long start = System.currentTimeMillis();
+      // dummyActionNoSynch(fileIdx);
+      nameNode.create(fileNames[daemonId][inputIdx], FsPermission.getDefault(),
+                      clientName, true, replication, BLOCK_SIZE);
+      long end = System.currentTimeMillis();
+      for(boolean written = !closeUponCreate; !written; 
+        written = nameNode.complete(fileNames[daemonId][inputIdx], clientName));
+      return end-start;
+    }
+
+    void printResults() {
+      LOG.info("--- " + getOpName() + " inputs ---");
+      LOG.info("nrFiles = " + numOpsRequired);
+      LOG.info("nrThreads = " + numThreads);
+      LOG.info("nrFilesPerDir = " + nameGenerator.getFilesPerDirectory());
+      printStats();
+    }
+  }
+
+  /**
+   * Open file statistics.
+   * 
+   * Measure how many open calls (getBlockLocations()) 
+   * the name-node can handle per second.
+   */
+  class OpenFileStats extends CreateFileStats {
+    // Operation types
+    static final String OP_OPEN_NAME = "open";
+    static final String OP_USAGE_ARGS = 
+      " [-threads T] [-files N] [-filesPerDir P] [-useExisting]";
+    static final String OP_OPEN_USAGE = 
+      "-op " + OP_OPEN_NAME + OP_USAGE_ARGS;
+
+    private boolean useExisting;  // do not generate files, use existing ones
+
+    OpenFileStats(List<String> args) {
+      super(args);
+    }
+
+    String getOpName() {
+      return OP_OPEN_NAME;
+    }
+
+    void parseArguments(List<String> args) {
+      int ueIndex = args.indexOf("-useExisting");
+      useExisting = (ueIndex >= 0);
+      if(useExisting) {
+        args.remove(ueIndex);
+      }
+      super.parseArguments(args);
+    }
+
+    void generateInputs(int[] opsPerThread) throws IOException {
+      // create files using opsPerThread
+      String[] createArgs = new String[] {
+              "-op", "create", 
+              "-threads", String.valueOf(this.numThreads), 
+              "-files", String.valueOf(numOpsRequired),
+              "-filesPerDir", 
+              String.valueOf(nameGenerator.getFilesPerDirectory()),
+              "-close"};
+      CreateFileStats opCreate =  new CreateFileStats(Arrays.asList(createArgs));
+
+      if(!useExisting) {  // create files if they were not created before
+        opCreate.benchmark();
+        LOG.info("Created " + numOpsRequired + " files.");
+      } else {
+        LOG.info("useExisting = true. Assuming " 
+            + numOpsRequired + " files have been created before.");
+      }
+      // use the same files for open
+      super.generateInputs(opsPerThread);
+      if(nameNode.getFileInfo(opCreate.getBaseDir()) != null
+          && nameNode.getFileInfo(getBaseDir()) == null) {
+        nameNode.rename(opCreate.getBaseDir(), getBaseDir());
+      }
+      if(nameNode.getFileInfo(getBaseDir()) == null) {
+        throw new IOException(getBaseDir() + " does not exist.");
+      }
+    }
+
+    /**
+     * Do file open.
+     */
+    long executeOp(int daemonId, int inputIdx, String ignore) 
+    throws IOException {
+      long start = System.currentTimeMillis();
+      nameNode.getBlockLocations(fileNames[daemonId][inputIdx], 0L, BLOCK_SIZE);
+      long end = System.currentTimeMillis();
+      return end-start;
+    }
+  }
+
+  /**
+   * Delete file statistics.
+   * 
+   * Measure how many delete calls the name-node can handle per second.
+   */
+  class DeleteFileStats extends OpenFileStats {
+    // Operation types
+    static final String OP_DELETE_NAME = "delete";
+    static final String OP_DELETE_USAGE = 
+      "-op " + OP_DELETE_NAME + OP_USAGE_ARGS;
+
+    DeleteFileStats(List<String> args) {
+      super(args);
+    }
+
+    String getOpName() {
+      return OP_DELETE_NAME;
+    }
+
+    long executeOp(int daemonId, int inputIdx, String ignore) 
+    throws IOException {
+      long start = System.currentTimeMillis();
+      nameNode.delete(fileNames[daemonId][inputIdx], false);
+      long end = System.currentTimeMillis();
+      return end-start;
+    }
+  }
+
+  /**
+   * Rename file statistics.
+   * 
+   * Measure how many rename calls the name-node can handle per second.
+   */
+  class RenameFileStats extends OpenFileStats {
+    // Operation types
+    static final String OP_RENAME_NAME = "rename";
+    static final String OP_RENAME_USAGE = 
+      "-op " + OP_RENAME_NAME + OP_USAGE_ARGS;
+
+    protected String[][] destNames;
+
+    RenameFileStats(List<String> args) {
+      super(args);
+    }
+
+    String getOpName() {
+      return OP_RENAME_NAME;
+    }
+
+    void generateInputs(int[] opsPerThread) throws IOException {
+      super.generateInputs(opsPerThread);
+      destNames = new String[fileNames.length][];
+      for(int idx=0; idx < numThreads; idx++) {
+        int nrNames = fileNames[idx].length;
+        destNames[idx] = new String[nrNames];
+        for(int jdx=0; jdx < nrNames; jdx++)
+          destNames[idx][jdx] = fileNames[idx][jdx] + ".r";
+      }
+    }
+
+    long executeOp(int daemonId, int inputIdx, String ignore) 
+    throws IOException {
+      long start = System.currentTimeMillis();
+      nameNode.rename(fileNames[daemonId][inputIdx],
+                      destNames[daemonId][inputIdx]);
+      long end = System.currentTimeMillis();
+      return end-start;
+    }
+  }
+
+  /**
+   * Minimal data-node simulator.
+   */
+  private static class TinyDatanode implements Comparable<String> {
+    private static final long DF_CAPACITY = 100*1024*1024;
+    private static final long DF_USED = 0;
+    
+    NamespaceInfo nsInfo;
+    DatanodeRegistration dnRegistration;
+    Block[] blocks;
+    int nrBlocks; // actual number of blocks
+
+    /**
+     * Get data-node in the form 
+     * <host name> : <port>
+     * where port is a 6 digit integer.
+     * This is necessary in order to provide lexocographic ordering.
+     * Host names are all the same, the ordering goes by port numbers.
+     */
+    private static String getNodeName(int port) throws IOException {
+      String machineName = DNS.getDefaultHost("default", "default");
+      String sPort = String.valueOf(100000 + port);
+      if(sPort.length() > 6)
+        throw new IOException("Too many data-nodes.");
+      return machineName + ":" + sPort;
+    }
+
+    TinyDatanode(int dnIdx, int blockCapacity) throws IOException {
+      dnRegistration = new DatanodeRegistration(getNodeName(dnIdx));
+      this.blocks = new Block[blockCapacity];
+      this.nrBlocks = 0;
+    }
+
+    String getName() {
+      return dnRegistration.getName();
+    }
+
+    void register() throws IOException {
+      // get versions from the namenode
+      nsInfo = nameNode.versionRequest();
+      dnRegistration.setStorageInfo(new DataStorage(nsInfo, ""));
+      DataNode.setNewStorageID(dnRegistration);
+      // register datanode
+      dnRegistration = nameNode.register(dnRegistration);
+    }
+
+    /**
+     * Send a heartbeat to the name-node.
+     * Ignore reply commands.
+     */
+    void sendHeartbeat() throws IOException {
+      // register datanode
+      DatanodeCommand[] cmds = nameNode.sendHeartbeat(
+          dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, 0, 0);
+      if(cmds != null) {
+        for (DatanodeCommand cmd : cmds ) {
+          LOG.debug("sendHeartbeat Name-node reply: " + cmd.getAction());
+        }
+      }
+    }
+
+    boolean addBlock(Block blk) {
+      if(nrBlocks == blocks.length) {
+        LOG.debug("Cannot add block: datanode capacity = " + blocks.length);
+        return false;
+      }
+      blocks[nrBlocks] = blk;
+      nrBlocks++;
+      return true;
+    }
+
+    void formBlockReport() {
+      // fill remaining slots with blocks that do not exist
+      for(int idx = blocks.length-1; idx >= nrBlocks; idx--)
+        blocks[idx] = new Block(blocks.length - idx, 0, 0);
+    }
+
+    public int compareTo(String name) {
+      return getName().compareTo(name);
+    }
+
+    /**
+     * Send a heartbeat to the name-node and replicate blocks if requested.
+     */
+    int replicateBlocks() throws IOException {
+      // register datanode
+      DatanodeCommand[] cmds = nameNode.sendHeartbeat(
+          dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, 0, 0);
+      if (cmds != null) {
+        for (DatanodeCommand cmd : cmds) {
+          if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) {
+            // Send a copy of a block to another datanode
+            BlockCommand bcmd = (BlockCommand)cmd;
+            return transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
+          }
+        }
+      }
+      return 0;
+    }
+
+    /**
+     * Transfer blocks to another data-node.
+     * Just report on behalf of the other data-node
+     * that the blocks have been received.
+     */
+    private int transferBlocks( Block blocks[], 
+                                DatanodeInfo xferTargets[][] 
+                              ) throws IOException {
+      for(int i = 0; i < blocks.length; i++) {
+        DatanodeInfo blockTargets[] = xferTargets[i];
+        for(int t = 0; t < blockTargets.length; t++) {
+          DatanodeInfo dnInfo = blockTargets[t];
+          DatanodeRegistration receivedDNReg;
+          receivedDNReg = new DatanodeRegistration(dnInfo.getName());
+          receivedDNReg.setStorageInfo(
+                          new DataStorage(nsInfo, dnInfo.getStorageID()));
+          receivedDNReg.setInfoPort(dnInfo.getInfoPort());
+          nameNode.blockReceived( receivedDNReg, 
+                                  new Block[] {blocks[i]},
+                                  new String[] {DataNode.EMPTY_DEL_HINT});
+        }
+      }
+      return blocks.length;
+    }
+  }
+
+  /**
+   * Block report statistics.
+   * 
+   * Each thread here represents its own data-node.
+   * Data-nodes send the same block report each time.
+   * The block report may contain missing or non-existing blocks.
+   */
+  class BlockReportStats extends OperationStatsBase {
+    static final String OP_BLOCK_REPORT_NAME = "blockReport";
+    static final String OP_BLOCK_REPORT_USAGE = 
+      "-op blockReport [-datanodes T] [-reports N] " +
+      "[-blocksPerReport B] [-blocksPerFile F]";
+
+    private int blocksPerReport;
+    private int blocksPerFile;
+    private TinyDatanode[] datanodes; // array of data-nodes sorted by name
+
+    BlockReportStats(List<String> args) {
+      super();
+      this.blocksPerReport = 100;
+      this.blocksPerFile = 10;
+      // set heartbeat interval to 3 min, so that expiration were 40 min
+      config.setLong("dfs.heartbeat.interval", 3 * 60);
+      parseArguments(args);
+      // adjust replication to the number of data-nodes
+      this.replication = (short)Math.min((int)replication, getNumDatanodes());
+    }
+
+    /**
+     * Each thread pretends its a data-node here.
+     */
+    private int getNumDatanodes() {
+      return numThreads;
+    }
+
+    String getOpName() {
+      return OP_BLOCK_REPORT_NAME;
+    }
+
+    void parseArguments(List<String> args) {
+      boolean ignoreUnrelatedOptions = verifyOpArgument(args);
+      for (int i = 2; i < args.size(); i++) {       // parse command line
+        if(args.get(i).equals("-reports")) {
+          if(i+1 == args.size())  printUsage();
+          numOpsRequired = Integer.parseInt(args.get(++i));
+        } else if(args.get(i).equals("-datanodes")) {
+          if(i+1 == args.size())  printUsage();
+          numThreads = Integer.parseInt(args.get(++i));
+        } else if(args.get(i).equals("-blocksPerReport")) {
+          if(i+1 == args.size())  printUsage();
+          blocksPerReport = Integer.parseInt(args.get(++i));
+        } else if(args.get(i).equals("-blocksPerFile")) {
+          if(i+1 == args.size())  printUsage();
+          blocksPerFile = Integer.parseInt(args.get(++i));
+        } else if(!ignoreUnrelatedOptions)
+          printUsage();
+      }
+    }
+
+    void generateInputs(int[] ignore) throws IOException {
+      int nrDatanodes = getNumDatanodes();
+      int nrBlocks = (int)Math.ceil((double)blocksPerReport * nrDatanodes 
+                                    / replication);
+      int nrFiles = (int)Math.ceil((double)nrBlocks / blocksPerFile);
+      datanodes = new TinyDatanode[nrDatanodes];
+      // create data-nodes
+      String prevDNName = "";
+      for(int idx=0; idx < nrDatanodes; idx++) {
+        datanodes[idx] = new TinyDatanode(idx, blocksPerReport);
+        datanodes[idx].register();
+        assert datanodes[idx].getName().compareTo(prevDNName) > 0
+          : "Data-nodes must be sorted lexicographically.";
+        datanodes[idx].sendHeartbeat();
+        prevDNName = datanodes[idx].getName();
+      }
+
+      // create files 
+      LOG.info("Creating " + nrFiles + " with " + blocksPerFile + " blocks each.");
+      FileNameGenerator nameGenerator;
+      nameGenerator = new FileNameGenerator(getBaseDir(), 100);
+      String clientName = getClientName(007);
+      nameNode.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_LEAVE);
+      for(int idx=0; idx < nrFiles; idx++) {
+        String fileName = nameGenerator.getNextFileName("ThroughputBench");
+        nameNode.create(fileName, FsPermission.getDefault(),
+                        clientName, true, replication, BLOCK_SIZE);
+        addBlocks(fileName, clientName);
+        nameNode.complete(fileName, clientName);
+      }
+      // prepare block reports
+      for(int idx=0; idx < nrDatanodes; idx++) {
+        datanodes[idx].formBlockReport();
+      }
+    }
+
+    private void addBlocks(String fileName, String clientName) throws IOException {
+      for(int jdx = 0; jdx < blocksPerFile; jdx++) {
+        LocatedBlock loc = nameNode.addBlock(fileName, clientName);
+        for(DatanodeInfo dnInfo : loc.getLocations()) {
+          int dnIdx = Arrays.binarySearch(datanodes, dnInfo.getName());
+          datanodes[dnIdx].addBlock(loc.getBlock());
+          nameNode.blockReceived(
+              datanodes[dnIdx].dnRegistration, 
+              new Block[] {loc.getBlock()},
+              new String[] {""});
+        }
+      }
+    }
+
+    /**
+     * Does not require the argument
+     */
+    String getExecutionArgument(int daemonId) {
+      return null;
+    }
+
+    long executeOp(int daemonId, int inputIdx, String ignore) throws IOException {
+      assert daemonId < numThreads : "Wrong daemonId.";
+      TinyDatanode dn = datanodes[daemonId];
+      long start = System.currentTimeMillis();
+      nameNode.blockReport(dn.dnRegistration,
+          BlockListAsLongs.convertToArrayLongs(dn.blocks));
+      long end = System.currentTimeMillis();
+      return end-start;
+    }
+
+    void printResults() {
+      String blockDistribution = "";
+      String delim = "(";
+      for(int idx=0; idx < getNumDatanodes(); idx++) {
+        blockDistribution += delim + datanodes[idx].nrBlocks;
+        delim = ", ";
+      }
+      blockDistribution += ")";
+      LOG.info("--- " + getOpName() + " inputs ---");
+      LOG.info("reports = " + numOpsRequired);
+      LOG.info("datanodes = " + numThreads + " " + blockDistribution);
+      LOG.info("blocksPerReport = " + blocksPerReport);
+      LOG.info("blocksPerFile = " + blocksPerFile);
+      printStats();
+    }
+  }   // end BlockReportStats
+
+  /**
+   * Measures how fast replication monitor can compute data-node work.
+   * 
+   * It runs only one thread until no more work can be scheduled.
+   */
+  class ReplicationStats extends OperationStatsBase {
+    static final String OP_REPLICATION_NAME = "replication";
+    static final String OP_REPLICATION_USAGE = 
+      "-op replication [-datanodes T] [-nodesToDecommission D] " +
+      "[-nodeReplicationLimit C] [-totalBlocks B] [-replication R]";
+
+    private BlockReportStats blockReportObject;
+    private int numDatanodes;
+    private int nodesToDecommission;
+    private int nodeReplicationLimit;
+    private int totalBlocks;
+    private int numDecommissionedBlocks;
+    private int numPendingBlocks;
+
+    ReplicationStats(List<String> args) {
+      super();
+      numThreads = 1;
+      numDatanodes = 3;
+      nodesToDecommission = 1;
+      nodeReplicationLimit = 100;
+      totalBlocks = 100;
+      parseArguments(args);
+      // number of operations is 4 times the number of decommissioned
+      // blocks divided by the number of needed replications scanned 
+      // by the replication monitor in one iteration
+      numOpsRequired = (totalBlocks*replication*nodesToDecommission*2)
+            / (numDatanodes*numDatanodes);
+
+      String[] blkReportArgs = {
+        "-op", "blockReport",
+        "-datanodes", String.valueOf(numDatanodes),
+        "-blocksPerReport", String.valueOf(totalBlocks*replication/numDatanodes),
+        "-blocksPerFile", String.valueOf(numDatanodes)};
+      blockReportObject = new BlockReportStats(Arrays.asList(blkReportArgs));
+      numDecommissionedBlocks = 0;
+      numPendingBlocks = 0;
+    }
+
+    String getOpName() {
+      return OP_REPLICATION_NAME;
+    }
+
+    void parseArguments(List<String> args) {
+      boolean ignoreUnrelatedOptions = verifyOpArgument(args);
+      for (int i = 2; i < args.size(); i++) {       // parse command line
+        if(args.get(i).equals("-datanodes")) {
+          if(i+1 == args.size())  printUsage();
+          numDatanodes = Integer.parseInt(args.get(++i));
+        } else if(args.get(i).equals("-nodesToDecommission")) {
+          if(i+1 == args.size())  printUsage();
+          nodesToDecommission = Integer.parseInt(args.get(++i));
+        } else if(args.get(i).equals("-nodeReplicationLimit")) {
+          if(i+1 == args.size())  printUsage();
+          nodeReplicationLimit = Integer.parseInt(args.get(++i));
+        } else if(args.get(i).equals("-totalBlocks")) {
+          if(i+1 == args.size())  printUsage();
+          totalBlocks = Integer.parseInt(args.get(++i));
+        } else if(args.get(i).equals("-replication")) {
+          if(i+1 == args.size())  printUsage();
+          replication = Short.parseShort(args.get(++i));
+        } else if(!ignoreUnrelatedOptions)
+          printUsage();
+      }
+    }
+
+    void generateInputs(int[] ignore) throws IOException {
+      final FSNamesystem namesystem = nameNode.getNamesystem();
+
+      // start data-nodes; create a bunch of files; generate block reports.
+      blockReportObject.generateInputs(ignore);
+      // stop replication monitor
+      namesystem.replthread.interrupt();
+      try {
+        namesystem.replthread.join();
+      } catch(InterruptedException ei) {
+        return;
+      }
+      // report blocks once
+      int nrDatanodes = blockReportObject.getNumDatanodes();
+      for(int idx=0; idx < nrDatanodes; idx++) {
+        blockReportObject.executeOp(idx, 0, null);
+      }
+      // decommission data-nodes
+      decommissionNodes();
+      // set node replication limit
+      namesystem.setNodeReplicationLimit(nodeReplicationLimit);
+    }
+
+    private void decommissionNodes() throws IOException {
+      String excludeFN = config.get("dfs.hosts.exclude", "exclude");
+      FileOutputStream excludeFile = new FileOutputStream(excludeFN);
+      excludeFile.getChannel().truncate(0L);
+      int nrDatanodes = blockReportObject.getNumDatanodes();
+      numDecommissionedBlocks = 0;
+      for(int i=0; i < nodesToDecommission; i++) {
+        TinyDatanode dn = blockReportObject.datanodes[nrDatanodes-1-i];
+        numDecommissionedBlocks += dn.nrBlocks;
+        excludeFile.write(dn.getName().getBytes());
+        excludeFile.write('\n');
+        LOG.info("Datanode " + dn.getName() + " is decommissioned.");
+      }
+      excludeFile.close();
+      nameNode.refreshNodes();
+    }
+
+    /**
+     * Does not require the argument
+     */
+    String getExecutionArgument(int daemonId) {
+      return null;
+    }
+
+    long executeOp(int daemonId, int inputIdx, String ignore) throws IOException {
+      assert daemonId < numThreads : "Wrong daemonId.";
+      long start = System.currentTimeMillis();
+      // compute data-node work
+      int work = nameNode.getNamesystem().computeDatanodeWork();
+      long end = System.currentTimeMillis();
+      numPendingBlocks += work;
+      if(work == 0)
+        daemons.get(daemonId).terminate();
+      return end-start;
+    }
+
+    void printResults() {
+      String blockDistribution = "";
+      String delim = "(";
+      int totalReplicas = 0;
+      for(int idx=0; idx < blockReportObject.getNumDatanodes(); idx++) {
+        totalReplicas += blockReportObject.datanodes[idx].nrBlocks;
+        blockDistribution += delim + blockReportObject.datanodes[idx].nrBlocks;
+        delim = ", ";
+      }
+      blockDistribution += ")";
+      LOG.info("--- " + getOpName() + " inputs ---");
+      LOG.info("numOpsRequired = " + numOpsRequired);
+      LOG.info("datanodes = " + numDatanodes + " " + blockDistribution);
+      LOG.info("decommissioned datanodes = " + nodesToDecommission);
+      LOG.info("datanode replication limit = " + nodeReplicationLimit);
+      LOG.info("total blocks = " + totalBlocks);
+      printStats();
+      LOG.info("decommissioned blocks = " + numDecommissionedBlocks);
+      LOG.info("pending replications = " + numPendingBlocks);
+      LOG.info("replications per sec: " + getBlocksPerSecond());
+    }
+
+    private double getBlocksPerSecond() {
+      return elapsedTime == 0 ? 0 : 1000*(double)numPendingBlocks / elapsedTime;
+    }
+
+  }   // end ReplicationStats
+
+  static void printUsage() {
+    System.err.println("Usage: NNThroughputBenchmark"
+        + "\n\t"    + OperationStatsBase.OP_ALL_USAGE
+        + " | \n\t" + CreateFileStats.OP_CREATE_USAGE
+        + " | \n\t" + OpenFileStats.OP_OPEN_USAGE
+        + " | \n\t" + DeleteFileStats.OP_DELETE_USAGE
+        + " | \n\t" + RenameFileStats.OP_RENAME_USAGE
+        + " | \n\t" + BlockReportStats.OP_BLOCK_REPORT_USAGE
+        + " | \n\t" + ReplicationStats.OP_REPLICATION_USAGE
+        + " | \n\t" + CleanAllStats.OP_CLEAN_USAGE
+    );
+    System.exit(-1);
+  }
+
+  /**
+   * Main method of the benchmark.
+   * @param args command line parameters
+   */
+  public static void runBenchmark(Configuration conf, List<String> args) throws Exception {
+    if(args.size() < 2 || ! args.get(0).startsWith("-op"))
+      printUsage();
+
+    String type = args.get(1);
+    boolean runAll = OperationStatsBase.OP_ALL_NAME.equals(type);
+
+    NNThroughputBenchmark bench = null;
+    List<OperationStatsBase> ops = new ArrayList<OperationStatsBase>();
+    OperationStatsBase opStat = null;
+    try {
+      bench = new NNThroughputBenchmark(conf);
+      if(runAll || CreateFileStats.OP_CREATE_NAME.equals(type)) {
+        opStat = bench.new CreateFileStats(args);
+        ops.add(opStat);
+      }
+      if(runAll || OpenFileStats.OP_OPEN_NAME.equals(type)) {
+        opStat = bench.new OpenFileStats(args);
+        ops.add(opStat);
+      }
+      if(runAll || DeleteFileStats.OP_DELETE_NAME.equals(type)) {
+        opStat = bench.new DeleteFileStats(args);
+        ops.add(opStat);
+      }
+      if(runAll || RenameFileStats.OP_RENAME_NAME.equals(type)) {
+        opStat = bench.new RenameFileStats(args);
+        ops.add(opStat);
+      }
+      if(runAll || BlockReportStats.OP_BLOCK_REPORT_NAME.equals(type)) {
+        opStat = bench.new BlockReportStats(args);
+        ops.add(opStat);
+      }
+      if(runAll || ReplicationStats.OP_REPLICATION_NAME.equals(type)) {
+        opStat = bench.new ReplicationStats(args);
+        ops.add(opStat);
+      }
+      if(runAll || CleanAllStats.OP_CLEAN_NAME.equals(type)) {
+        opStat = bench.new CleanAllStats(args);
+        ops.add(opStat);
+      }
+      if(ops.size() == 0)
+        printUsage();
+      // run each benchmark
+      for(OperationStatsBase op : ops) {
+        LOG.info("Starting benchmark: " + op.getOpName());
+        op.benchmark();
+        op.cleanUp();
+      }
+      // print statistics
+      for(OperationStatsBase op : ops) {
+        LOG.info("");
+        op.printResults();
+      }
+    } catch(Exception e) {
+      LOG.error(StringUtils.stringifyException(e));
+      throw e;
+    } finally {
+      if(bench != null)
+        bench.close();
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    runBenchmark(new Configuration(), 
+                  new ArrayList<String>(Arrays.asList(args)));
+  }
+}

+ 250 - 0
src/test/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java

@@ -0,0 +1,250 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.namenode.FSImage.CheckpointStates;
+
+import junit.framework.TestCase;
+
+public class TestBackupNode extends TestCase {
+  public static final Log LOG = LogFactory.getLog(TestBackupNode.class);
+
+  static final String BASE_DIR = MiniDFSCluster.getBaseDirectory();
+
+  protected void setUp() throws Exception {
+    super.setUp();
+    File baseDir = new File(BASE_DIR);
+    if(baseDir.exists())
+      if(!(FileUtil.fullyDelete(baseDir)))
+        throw new IOException("Cannot remove directory: " + baseDir);
+    File dirC = new File(getBackupNodeDir(StartupOption.CHECKPOINT, 1));
+    dirC.mkdirs();
+    File dirB = new File(getBackupNodeDir(StartupOption.BACKUP, 1));
+    dirB.mkdirs();
+    dirB = new File(getBackupNodeDir(StartupOption.BACKUP, 2));
+    dirB.mkdirs();
+  }
+
+  protected void tearDown() throws Exception {
+    super.tearDown();
+    File baseDir = new File(BASE_DIR);
+    if(!(FileUtil.fullyDelete(baseDir)))
+      throw new IOException("Cannot remove directory: " + baseDir);
+  }
+
+  static void writeFile(FileSystem fileSys, Path name, int repl)
+  throws IOException {
+    TestCheckpoint.writeFile(fileSys, name, repl);
+  }
+
+
+  static void checkFile(FileSystem fileSys, Path name, int repl)
+  throws IOException {
+    TestCheckpoint.checkFile(fileSys, name, repl);
+  }
+
+  void cleanupFile(FileSystem fileSys, Path name)
+  throws IOException {
+    TestCheckpoint.cleanupFile(fileSys, name);
+  }
+
+  static String getBackupNodeDir(StartupOption t, int i) {
+    return BASE_DIR + "name" + t.getName() + i;
+  }
+
+  BackupNode startBackupNode(Configuration conf,
+                             StartupOption t, int i) throws IOException {
+    Configuration c = new Configuration(conf);
+    String dirs = getBackupNodeDir(t, i);
+    c.set("dfs.name.dir", dirs);
+    c.set("dfs.name.edits.dir", "${dfs.name.dir}");
+    return (BackupNode)NameNode.createNameNode(new String[]{t.getName()}, c);
+  }
+
+  void waitCheckpointDone(BackupNode backup) {
+    do {
+      try {
+        LOG.info("Waiting checkpoint to complete...");
+        Thread.sleep(1000);
+      } catch (Exception e) {}
+    } while(backup.getCheckpointState() != CheckpointStates.START);
+  }
+
+  public void testCheckpoint() throws IOException {
+    testCheckpoint(StartupOption.CHECKPOINT);
+    testCheckpoint(StartupOption.BACKUP);
+  }
+
+  void testCheckpoint(StartupOption op) throws IOException {
+    Path file1 = new Path("checkpoint.dat");
+    Path file2 = new Path("checkpoint2.dat");
+
+    Configuration conf = new Configuration();
+    short replication = (short)conf.getInt("dfs.replication", 3);
+    conf.set("dfs.blockreport.initialDelay", "0");
+    conf.setInt("dfs.datanode.scan.period.hours", -1); // disable block scanner
+    int numDatanodes = Math.max(3, replication);
+    MiniDFSCluster cluster = null;
+    FileSystem fileSys = null;
+    BackupNode backup = null;
+
+    try {
+      cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
+      fileSys = cluster.getFileSystem();
+      //
+      // verify that 'format' really blew away all pre-existing files
+      //
+      assertTrue(!fileSys.exists(file1));
+      assertTrue(!fileSys.exists(file2));
+
+      //
+      // Create file1
+      //
+      writeFile(fileSys, file1, replication);
+      checkFile(fileSys, file1, replication);
+
+      //
+      // Take a checkpoint
+      //
+      backup = startBackupNode(conf, op, 1);
+      waitCheckpointDone(backup);
+    } catch(IOException e) {
+      LOG.error("Error in TestBackupNode:", e);
+      assertTrue(e.getLocalizedMessage(), false);
+    } finally {
+      if(backup != null) backup.stop();
+      if(fileSys != null) fileSys.close();
+      if(cluster != null) cluster.shutdown();
+    }
+    File imageFileNN = new File(BASE_DIR, "name1/current/fsimage");
+    File imageFileBN = new File(getBackupNodeDir(op, 1), "/current/fsimage");
+    LOG.info("NameNode fsimage length = " + imageFileNN.length());
+    LOG.info("Backup Node fsimage length = " + imageFileBN.length());
+    assertTrue(imageFileNN.length() == imageFileBN.length());
+
+    try {
+      //
+      // Restart cluster and verify that file1 still exist.
+      //
+      cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+      fileSys = cluster.getFileSystem();
+      // check that file1 still exists
+      checkFile(fileSys, file1, replication);
+      cleanupFile(fileSys, file1);
+
+      // create new file file2
+      writeFile(fileSys, file2, replication);
+      checkFile(fileSys, file2, replication);
+
+      //
+      // Take a checkpoint
+      //
+      backup = startBackupNode(conf, op, 1);
+      waitCheckpointDone(backup);
+    } catch(IOException e) {
+      LOG.error("Error in TestBackupNode:", e);
+      assertTrue(e.getLocalizedMessage(), false);
+    } finally {
+      if(backup != null) backup.stop();
+      if(fileSys != null) fileSys.close();
+      if(cluster != null) cluster.shutdown();
+    }
+    LOG.info("NameNode fsimage length = " + imageFileNN.length());
+    LOG.info("Backup Node fsimage length = " + imageFileBN.length());
+    assertTrue(imageFileNN.length() == imageFileBN.length());
+
+    try {
+      //
+      // Restart cluster and verify that file2 exists and
+      // file1 does not exist.
+      //
+      cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+      fileSys = cluster.getFileSystem();
+
+      assertTrue(!fileSys.exists(file1));
+
+      // verify that file2 exists
+      checkFile(fileSys, file2, replication);
+    } catch(IOException e) {
+      LOG.error("Error in TestBackupNode:", e);
+      assertTrue(e.getLocalizedMessage(), false);
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * Test that only one backup node can register.
+   * @throws IOException
+   */
+  public void testBackupRegistration() throws IOException {
+    Configuration conf1 = new Configuration();
+    Configuration conf2 = null;
+    MiniDFSCluster cluster = null;
+    BackupNode backup1 = null;
+    BackupNode backup2 = null;
+    try {
+      // start name-node and backup node 1
+      cluster = new MiniDFSCluster(conf1, 0, true, null);
+      conf1.set("dfs.backup.address", "0.0.0.0:7770");
+      conf1.set("dfs.backup.http.address", "0.0.0.0:7775");
+      backup1 = startBackupNode(conf1, StartupOption.BACKUP, 1);
+      // try to start backup node 2
+      conf2 = new Configuration(conf1);
+      conf2.set("dfs.backup.address", "0.0.0.0:7771");
+      conf2.set("dfs.backup.http.address", "0.0.0.0:7776");
+      try {
+        backup2 = startBackupNode(conf2, StartupOption.BACKUP, 2);
+        backup2.stop();
+        backup2 = null;
+        assertTrue("Only one backup node should be able to start", false);
+      } catch(IOException e) {
+        assertTrue(
+            e.getLocalizedMessage().contains("Registration is not allowed"));
+        // should fail - doing good
+      }
+      // stop backup node 1; backup node 2 should be able to start
+      backup1.stop();
+      backup1 = null;
+      try {
+        backup2 = startBackupNode(conf2, StartupOption.BACKUP, 2);
+      } catch(IOException e) {
+        assertTrue("Backup node 2 should be able to start", false);
+      }
+    } catch(IOException e) {
+      LOG.error("Error in TestBackupNode:", e);
+      assertTrue(e.getLocalizedMessage(), false);
+    } finally {
+      if(backup1 != null) backup1.stop();
+      if(backup2 != null) backup2.stop();
+      if(cluster != null) cluster.shutdown();
+    }
+  }
+}

+ 714 - 0
src/test/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java

@@ -0,0 +1,714 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import junit.framework.TestCase;
+import java.io.*;
+import java.util.Collection;
+import java.util.List;
+import java.util.Iterator;
+import java.util.Random;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile;
+import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
+import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+
+/**
+ * This class tests the creation and validation of a checkpoint.
+ */
+public class TestCheckpoint extends TestCase {
+  static final long seed = 0xDEADBEEFL;
+  static final int blockSize = 4096;
+  static final int fileSize = 8192;
+  static final int numDatanodes = 3;
+  short replication = 3;
+
+  static void writeFile(FileSystem fileSys, Path name, int repl)
+    throws IOException {
+    FSDataOutputStream stm = fileSys.create(name, true,
+                                            fileSys.getConf().getInt("io.file.buffer.size", 4096),
+                                            (short)repl, (long)blockSize);
+    byte[] buffer = new byte[TestCheckpoint.fileSize];
+    Random rand = new Random(TestCheckpoint.seed);
+    rand.nextBytes(buffer);
+    stm.write(buffer);
+    stm.close();
+  }
+  
+  
+  static void checkFile(FileSystem fileSys, Path name, int repl)
+    throws IOException {
+    assertTrue(fileSys.exists(name));
+    int replication = fileSys.getFileStatus(name).getReplication();
+    assertEquals("replication for " + name, repl, replication);
+    //We should probably test for more of the file properties.    
+  }
+  
+  static void cleanupFile(FileSystem fileSys, Path name)
+    throws IOException {
+    assertTrue(fileSys.exists(name));
+    fileSys.delete(name, true);
+    assertTrue(!fileSys.exists(name));
+  }
+
+  /**
+   * put back the old namedir
+   */
+  private void resurrectNameDir(File namedir) 
+    throws IOException {
+    String parentdir = namedir.getParent();
+    String name = namedir.getName();
+    File oldname =  new File(parentdir, name + ".old");
+    if (!oldname.renameTo(namedir)) {
+      assertTrue(false);
+    }
+  }
+
+  /**
+   * remove one namedir
+   */
+  private void removeOneNameDir(File namedir) 
+    throws IOException {
+    String parentdir = namedir.getParent();
+    String name = namedir.getName();
+    File newname =  new File(parentdir, name + ".old");
+    if (!namedir.renameTo(newname)) {
+      assertTrue(false);
+    }
+  }
+
+  /*
+   * Verify that namenode does not startup if one namedir is bad.
+   */
+  private void testNamedirError(Configuration conf, Collection<File> namedirs) 
+    throws IOException {
+    System.out.println("Starting testNamedirError");
+    MiniDFSCluster cluster = null;
+
+    if (namedirs.size() <= 1) {
+      return;
+    }
+    
+    //
+    // Remove one namedir & Restart cluster. This should fail.
+    //
+    File first = namedirs.iterator().next();
+    removeOneNameDir(first);
+    try {
+      cluster = new MiniDFSCluster(conf, 0, false, null);
+      cluster.shutdown();
+      assertTrue(false);
+    } catch (Throwable t) {
+      // no nothing
+    }
+    resurrectNameDir(first); // put back namedir
+  }
+
+  /*
+   * Simulate namenode crashing after rolling edit log.
+   */
+  private void testSecondaryNamenodeError1(Configuration conf)
+    throws IOException {
+    System.out.println("Starting testSecondaryNamenodeError 1");
+    Path file1 = new Path("checkpointxx.dat");
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, 
+                                                false, null);
+    cluster.waitActive();
+    FileSystem fileSys = cluster.getFileSystem();
+    try {
+      assertTrue(!fileSys.exists(file1));
+      //
+      // Make the checkpoint fail after rolling the edits log.
+      //
+      SecondaryNameNode secondary = startSecondaryNameNode(conf);
+      ErrorSimulator.setErrorSimulation(0);
+
+      try {
+        secondary.doCheckpoint();  // this should fail
+        assertTrue(false);
+      } catch (IOException e) {
+      }
+      ErrorSimulator.clearErrorSimulation(0);
+      secondary.shutdown();
+
+      //
+      // Create a new file
+      //
+      writeFile(fileSys, file1, replication);
+      checkFile(fileSys, file1, replication);
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+
+    //
+    // Restart cluster and verify that file exists.
+    // Then take another checkpoint to verify that the 
+    // namenode restart accounted for the rolled edit logs.
+    //
+    System.out.println("Starting testSecondaryNamenodeError 2");
+    cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+    cluster.waitActive();
+    // Also check that the edits file is empty here
+    // and that temporary checkpoint files are gone.
+    FSImage image = cluster.getNameNode().getFSImage();
+    for (Iterator<StorageDirectory> it = 
+             image.dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
+      StorageDirectory sd = it.next();
+      assertFalse(FSImage.getImageFile(sd, NameNodeFile.IMAGE_NEW).exists());
+    }
+    for (Iterator<StorageDirectory> it = 
+            image.dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
+      StorageDirectory sd = it.next();
+      assertFalse(image.getEditNewFile(sd).exists());
+      File edits = image.getEditFile(sd);
+      assertTrue(edits.exists()); // edits should exist and be empty
+      long editsLen = edits.length();
+      assertTrue(editsLen == Integer.SIZE/Byte.SIZE);
+    }
+    
+    fileSys = cluster.getFileSystem();
+    try {
+      checkFile(fileSys, file1, replication);
+      cleanupFile(fileSys, file1);
+      SecondaryNameNode secondary = startSecondaryNameNode(conf);
+      secondary.doCheckpoint();
+      secondary.shutdown();
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+  }
+
+  /*
+   * Simulate a namenode crash after uploading new image
+   */
+  private void testSecondaryNamenodeError2(Configuration conf)
+    throws IOException {
+    System.out.println("Starting testSecondaryNamenodeError 21");
+    Path file1 = new Path("checkpointyy.dat");
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, 
+                                                false, null);
+    cluster.waitActive();
+    FileSystem fileSys = cluster.getFileSystem();
+    try {
+      assertTrue(!fileSys.exists(file1));
+      //
+      // Make the checkpoint fail after uploading the new fsimage.
+      //
+      SecondaryNameNode secondary = startSecondaryNameNode(conf);
+      ErrorSimulator.setErrorSimulation(1);
+
+      try {
+        secondary.doCheckpoint();  // this should fail
+        assertTrue(false);
+      } catch (IOException e) {
+      }
+      ErrorSimulator.clearErrorSimulation(1);
+      secondary.shutdown();
+
+      //
+      // Create a new file
+      //
+      writeFile(fileSys, file1, replication);
+      checkFile(fileSys, file1, replication);
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+
+    //
+    // Restart cluster and verify that file exists.
+    // Then take another checkpoint to verify that the 
+    // namenode restart accounted for the rolled edit logs.
+    //
+    System.out.println("Starting testSecondaryNamenodeError 22");
+    cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+    cluster.waitActive();
+    fileSys = cluster.getFileSystem();
+    try {
+      checkFile(fileSys, file1, replication);
+      cleanupFile(fileSys, file1);
+      SecondaryNameNode secondary = startSecondaryNameNode(conf);
+      secondary.doCheckpoint();
+      secondary.shutdown();
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+  }
+
+  /*
+   * Simulate a secondary namenode crash after rolling the edit log.
+   */
+  private void testSecondaryNamenodeError3(Configuration conf)
+    throws IOException {
+    System.out.println("Starting testSecondaryNamenodeError 31");
+    Path file1 = new Path("checkpointzz.dat");
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, 
+                                                false, null);
+    cluster.waitActive();
+    FileSystem fileSys = cluster.getFileSystem();
+    try {
+      assertTrue(!fileSys.exists(file1));
+      //
+      // Make the checkpoint fail after rolling the edit log.
+      //
+      SecondaryNameNode secondary = startSecondaryNameNode(conf);
+      ErrorSimulator.setErrorSimulation(0);
+
+      try {
+        secondary.doCheckpoint();  // this should fail
+        assertTrue(false);
+      } catch (IOException e) {
+      }
+      ErrorSimulator.clearErrorSimulation(0);
+      secondary.shutdown(); // secondary namenode crash!
+
+      // start new instance of secondary and verify that 
+      // a new rollEditLog suceedes inspite of the fact that 
+      // edits.new already exists.
+      //
+      secondary = startSecondaryNameNode(conf);
+      secondary.doCheckpoint();  // this should work correctly
+      secondary.shutdown();
+
+      //
+      // Create a new file
+      //
+      writeFile(fileSys, file1, replication);
+      checkFile(fileSys, file1, replication);
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+
+    //
+    // Restart cluster and verify that file exists.
+    // Then take another checkpoint to verify that the 
+    // namenode restart accounted for the twice-rolled edit logs.
+    //
+    System.out.println("Starting testSecondaryNamenodeError 32");
+    cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+    cluster.waitActive();
+    fileSys = cluster.getFileSystem();
+    try {
+      checkFile(fileSys, file1, replication);
+      cleanupFile(fileSys, file1);
+      SecondaryNameNode secondary = startSecondaryNameNode(conf);
+      secondary.doCheckpoint();
+      secondary.shutdown();
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * Simulate a secondary node failure to transfer image
+   * back to the name-node.
+   * Used to truncate primary fsimage file.
+   */
+  void testSecondaryFailsToReturnImage(Configuration conf)
+    throws IOException {
+    System.out.println("Starting testSecondaryFailsToReturnImage");
+    Path file1 = new Path("checkpointRI.dat");
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, 
+                                                false, null);
+    cluster.waitActive();
+    FileSystem fileSys = cluster.getFileSystem();
+    FSImage image = cluster.getNameNode().getFSImage();
+    try {
+      assertTrue(!fileSys.exists(file1));
+      StorageDirectory sd = null;
+      for (Iterator<StorageDirectory> it = 
+                image.dirIterator(NameNodeDirType.IMAGE); it.hasNext();)
+         sd = it.next();
+      assertTrue(sd != null);
+      long fsimageLength = FSImage.getImageFile(sd, NameNodeFile.IMAGE).length();
+      //
+      // Make the checkpoint
+      //
+      SecondaryNameNode secondary = startSecondaryNameNode(conf);
+      ErrorSimulator.setErrorSimulation(2);
+
+      try {
+        secondary.doCheckpoint();  // this should fail
+        assertTrue(false);
+      } catch (IOException e) {
+        System.out.println("testSecondaryFailsToReturnImage: doCheckpoint() " +
+            "failed predictably - " + e);
+      }
+      ErrorSimulator.clearErrorSimulation(2);
+
+      // Verify that image file sizes did not change.
+      for (Iterator<StorageDirectory> it = 
+              image.dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
+        assertTrue(FSImage.getImageFile(it.next(), 
+                                NameNodeFile.IMAGE).length() == fsimageLength);
+      }
+
+      secondary.shutdown();
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+  }
+
+  /**
+   * Test different startup scenarios.
+   * <p><ol>
+   * <li> Start of primary name-node in secondary directory must succeed. 
+   * <li> Start of secondary node when the primary is already running in 
+   *      this directory must fail.
+   * <li> Start of primary name-node if secondary node is already running in 
+   *      this directory must fail.
+   * <li> Start of two secondary nodes in the same directory must fail.
+   * <li> Import of a checkpoint must fail if primary 
+   * directory contains a valid image.
+   * <li> Import of the secondary image directory must succeed if primary 
+   * directory does not exist.
+   * <li> Recover failed checkpoint for secondary node.
+   * <li> Complete failed checkpoint for secondary node.
+   * </ol>
+   */
+  void testStartup(Configuration conf) throws IOException {
+    System.out.println("Startup of the name-node in the checkpoint directory.");
+    String primaryDirs = conf.get("dfs.name.dir");
+    String primaryEditsDirs = conf.get("dfs.name.edits.dir");
+    String checkpointDirs = conf.get("fs.checkpoint.dir");
+    String checkpointEditsDirs = conf.get("fs.checkpoint.edits.dir");
+    NameNode nn = startNameNode(conf, checkpointDirs, checkpointEditsDirs,
+                                 StartupOption.REGULAR);
+
+    // Starting secondary node in the same directory as the primary
+    System.out.println("Startup of secondary in the same dir as the primary.");
+    SecondaryNameNode secondary = null;
+    try {
+      secondary = startSecondaryNameNode(conf);
+      assertFalse(secondary.getFSImage().isLockSupported(0));
+      secondary.shutdown();
+    } catch (IOException e) { // expected to fail
+      assertTrue(secondary == null);
+    }
+    nn.stop(); nn = null;
+
+    // Starting primary node in the same directory as the secondary
+    System.out.println("Startup of primary in the same dir as the secondary.");
+    // secondary won't start without primary
+    nn = startNameNode(conf, primaryDirs, primaryEditsDirs,
+                        StartupOption.REGULAR);
+    boolean succeed = false;
+    do {
+      try {
+        secondary = startSecondaryNameNode(conf);
+        succeed = true;
+      } catch(IOException ie) { // keep trying
+        System.out.println("Try again: " + ie.getLocalizedMessage());
+      }
+    } while(!succeed);
+    nn.stop(); nn = null;
+    try {
+      nn = startNameNode(conf, checkpointDirs, checkpointEditsDirs,
+                          StartupOption.REGULAR);
+      assertFalse(nn.getFSImage().isLockSupported(0));
+      nn.stop(); nn = null;
+    } catch (IOException e) { // expected to fail
+      assertTrue(nn == null);
+    }
+
+    // Try another secondary in the same directory
+    System.out.println("Startup of two secondaries in the same dir.");
+    // secondary won't start without primary
+    nn = startNameNode(conf, primaryDirs, primaryEditsDirs,
+                        StartupOption.REGULAR);
+    SecondaryNameNode secondary2 = null;
+    try {
+      secondary2 = startSecondaryNameNode(conf);
+      assertFalse(secondary2.getFSImage().isLockSupported(0));
+      secondary2.shutdown();
+    } catch (IOException e) { // expected to fail
+      assertTrue(secondary2 == null);
+    }
+    nn.stop(); nn = null;
+    secondary.shutdown();
+
+    // Import a checkpoint with existing primary image.
+    System.out.println("Import a checkpoint with existing primary image.");
+    try {
+      nn = startNameNode(conf, primaryDirs, primaryEditsDirs,
+                          StartupOption.IMPORT);
+      assertTrue(false);
+    } catch (IOException e) { // expected to fail
+      assertTrue(nn == null);
+    }
+    
+    // Remove current image and import a checkpoint.
+    System.out.println("Import a checkpoint with existing primary image.");
+    List<File> nameDirs = (List<File>)FSNamesystem.getNamespaceDirs(conf);
+    List<File> nameEditsDirs = (List<File>)FSNamesystem.
+                                  getNamespaceEditsDirs(conf);
+    long fsimageLength = new File(new File(nameDirs.get(0), "current"), 
+                                        NameNodeFile.IMAGE.getName()).length();
+    for(File dir : nameDirs) {
+      if(dir.exists())
+        if(!(FileUtil.fullyDelete(dir)))
+          throw new IOException("Cannot remove directory: " + dir);
+      if (!dir.mkdirs())
+        throw new IOException("Cannot create directory " + dir);
+    }
+
+    for(File dir : nameEditsDirs) {
+      if(dir.exists())
+        if(!(FileUtil.fullyDelete(dir)))
+          throw new IOException("Cannot remove directory: " + dir);
+      if (!dir.mkdirs())
+        throw new IOException("Cannot create directory " + dir);
+    }
+    
+    nn = startNameNode(conf, primaryDirs, primaryEditsDirs,
+                        StartupOption.IMPORT);
+    // Verify that image file sizes did not change.
+    FSImage image = nn.getFSImage();
+    for (Iterator<StorageDirectory> it = 
+            image.dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
+      assertTrue(FSImage.getImageFile(it.next(), 
+                          NameNodeFile.IMAGE).length() == fsimageLength);
+    }
+    nn.stop();
+
+    // recover failed checkpoint
+    nn = startNameNode(conf, primaryDirs, primaryEditsDirs,
+                        StartupOption.REGULAR);
+    Collection<File> secondaryDirs = FSImage.getCheckpointDirs(conf, null);
+    for(File dir : secondaryDirs) {
+      Storage.rename(new File(dir, "current"), 
+                     new File(dir, "lastcheckpoint.tmp"));
+    }
+    secondary = startSecondaryNameNode(conf);
+    secondary.shutdown();
+    for(File dir : secondaryDirs) {
+      assertTrue(new File(dir, "current").exists()); 
+      assertFalse(new File(dir, "lastcheckpoint.tmp").exists());
+    }
+    
+    // complete failed checkpoint
+    for(File dir : secondaryDirs) {
+      Storage.rename(new File(dir, "previous.checkpoint"), 
+                     new File(dir, "lastcheckpoint.tmp"));
+    }
+    secondary = startSecondaryNameNode(conf);
+    secondary.shutdown();
+    for(File dir : secondaryDirs) {
+      assertTrue(new File(dir, "current").exists()); 
+      assertTrue(new File(dir, "previous.checkpoint").exists()); 
+      assertFalse(new File(dir, "lastcheckpoint.tmp").exists());
+    }
+    nn.stop(); nn = null;
+    
+    // Check that everything starts ok now.
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+    cluster.waitActive();
+    cluster.shutdown();
+  }
+
+  NameNode startNameNode( Configuration conf,
+                          String imageDirs,
+                          String editsDirs,
+                          StartupOption start) throws IOException {
+    conf.set("fs.default.name", "hdfs://localhost:0");
+    conf.set("dfs.http.address", "0.0.0.0:0");  
+    conf.set("dfs.name.dir", imageDirs);
+    conf.set("dfs.name.edits.dir", editsDirs);
+    String[] args = new String[]{start.getName()};
+    NameNode nn = NameNode.createNameNode(args, conf);
+    assertTrue(nn.isInSafeMode());
+    return nn;
+  }
+
+  SecondaryNameNode startSecondaryNameNode(Configuration conf
+                                          ) throws IOException {
+    conf.set("dfs.secondary.http.address", "0.0.0.0:0");
+    return new SecondaryNameNode(conf);
+  }
+
+  /**
+   * Tests checkpoint in HDFS.
+   */
+  public void testCheckpoint() throws IOException {
+    Path file1 = new Path("checkpoint.dat");
+    Path file2 = new Path("checkpoint2.dat");
+    Collection<File> namedirs = null;
+
+    Configuration conf = new Configuration();
+    conf.set("dfs.secondary.http.address", "0.0.0.0:0");
+    replication = (short)conf.getInt("dfs.replication", 3);  
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
+    cluster.waitActive();
+    FileSystem fileSys = cluster.getFileSystem();
+
+    try {
+      //
+      // verify that 'format' really blew away all pre-existing files
+      //
+      assertTrue(!fileSys.exists(file1));
+      assertTrue(!fileSys.exists(file2));
+      namedirs = cluster.getNameDirs();
+
+      //
+      // Create file1
+      //
+      writeFile(fileSys, file1, replication);
+      checkFile(fileSys, file1, replication);
+
+      //
+      // Take a checkpoint
+      //
+      SecondaryNameNode secondary = startSecondaryNameNode(conf);
+      ErrorSimulator.initializeErrorSimulationEvent(3);
+      secondary.doCheckpoint();
+      secondary.shutdown();
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+
+    //
+    // Restart cluster and verify that file1 still exist.
+    //
+    cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+    cluster.waitActive();
+    fileSys = cluster.getFileSystem();
+    try {
+      // check that file1 still exists
+      checkFile(fileSys, file1, replication);
+      cleanupFile(fileSys, file1);
+
+      // create new file file2
+      writeFile(fileSys, file2, replication);
+      checkFile(fileSys, file2, replication);
+
+      //
+      // Take a checkpoint
+      //
+      SecondaryNameNode secondary = startSecondaryNameNode(conf);
+      secondary.doCheckpoint();
+      secondary.shutdown();
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+
+    //
+    // Restart cluster and verify that file2 exists and
+    // file1 does not exist.
+    //
+    cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+    cluster.waitActive();
+    fileSys = cluster.getFileSystem();
+
+    assertTrue(!fileSys.exists(file1));
+
+    try {
+      // verify that file2 exists
+      checkFile(fileSys, file2, replication);
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+
+    // file2 is left behind.
+
+    testSecondaryNamenodeError1(conf);
+    testSecondaryNamenodeError2(conf);
+    testSecondaryNamenodeError3(conf);
+    testNamedirError(conf, namedirs);
+    testSecondaryFailsToReturnImage(conf);
+    testStartup(conf);
+  }
+
+  /**
+   * Tests save namepsace.
+   */
+  public void testSaveNamespace() throws IOException {
+    MiniDFSCluster cluster = null;
+    DistributedFileSystem fs = null;
+    try {
+      Configuration conf = new Configuration();
+      cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+      cluster.waitActive();
+      fs = (DistributedFileSystem)(cluster.getFileSystem());
+
+      // Saving image without safe mode should fail
+      DFSAdmin admin = new DFSAdmin(conf);
+      String[] args = new String[]{"-saveNamespace"};
+      try {
+        admin.run(args);
+      } catch(IOException eIO) {
+        assertTrue(eIO.getLocalizedMessage().contains("Safe mode should be turned ON"));
+      } catch(Exception e) {
+        throw new IOException(e);
+      }
+      // create new file
+      Path file = new Path("namespace.dat");
+      writeFile(fs, file, replication);
+      checkFile(fs, file, replication);
+      // verify that the edits file is NOT empty
+      Collection<File> editsDirs = cluster.getNameEditsDirs();
+      for(File ed : editsDirs) {
+        assertTrue(new File(ed, "current/edits").length() > Integer.SIZE/Byte.SIZE);
+      }
+
+      // Saving image in safe mode should succeed
+      fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+      try {
+        admin.run(args);
+      } catch(Exception e) {
+        throw new IOException(e);
+      }
+      // verify that the edits file is empty
+      for(File ed : editsDirs) {
+        assertTrue(new File(ed, "current/edits").length() == Integer.SIZE/Byte.SIZE);
+      }
+
+      // restart cluster and verify file exists
+      cluster.shutdown();
+      cluster = null;
+
+      cluster = new MiniDFSCluster(conf, numDatanodes, false, null);
+      cluster.waitActive();
+      fs = (DistributedFileSystem)(cluster.getFileSystem());
+      checkFile(fs, file, replication);
+    } finally {
+      if(fs != null) fs.close();
+      if(cluster!= null) cluster.shutdown();
+    }
+  }
+}

+ 57 - 0
src/test/org/apache/hadoop/hdfs/server/namenode/TestComputeInvalidateWork.java

@@ -0,0 +1,57 @@
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.common.GenerationStamp;
+
+import junit.framework.TestCase;
+
+/**
+ * Test if FSNamesystem handles heartbeat right
+ */
+public class TestComputeInvalidateWork extends TestCase {
+  /**
+   * Test if {@link FSNamesystem#computeInvalidateWork(int)}
+   * can schedule invalidate work correctly 
+   */
+  public void testCompInvalidate() throws Exception {
+    final Configuration conf = new Configuration();
+    final int NUM_OF_DATANODES = 3;
+    final MiniDFSCluster cluster = new MiniDFSCluster(conf, NUM_OF_DATANODES, true, null);
+    try {
+      cluster.waitActive();
+      final FSNamesystem namesystem = cluster.getNamesystem();
+      DatanodeDescriptor[] nodes =
+        namesystem.heartbeats.toArray(new DatanodeDescriptor[NUM_OF_DATANODES]);
+      assertEquals(nodes.length, NUM_OF_DATANODES);
+      
+      synchronized (namesystem) {
+      for (int i=0; i<nodes.length; i++) {
+        for(int j=0; j<3*namesystem.blockInvalidateLimit+1; j++) {
+          Block block = new Block(i*(namesystem.blockInvalidateLimit+1)+j, 0, 
+              GenerationStamp.FIRST_VALID_STAMP);
+          namesystem.blockManager.addToInvalidates(block, nodes[i]);
+        }
+      }
+      
+      assertEquals(namesystem.blockInvalidateLimit*NUM_OF_DATANODES, 
+          namesystem.blockManager.computeInvalidateWork(NUM_OF_DATANODES+1));
+      assertEquals(namesystem.blockInvalidateLimit*NUM_OF_DATANODES, 
+          namesystem.blockManager.computeInvalidateWork(NUM_OF_DATANODES));
+      assertEquals(namesystem.blockInvalidateLimit*(NUM_OF_DATANODES-1), 
+          namesystem.blockManager.computeInvalidateWork(NUM_OF_DATANODES-1));
+      int workCount = namesystem.blockManager.computeInvalidateWork(1);
+      if (workCount == 1) {
+        assertEquals(namesystem.blockInvalidateLimit+1, 
+            namesystem.blockManager.computeInvalidateWork(2));
+      } else {
+        assertEquals(workCount, namesystem.blockInvalidateLimit);
+        assertEquals(2, namesystem.blockManager.computeInvalidateWork(2));
+      }
+      }
+    } finally {
+      cluster.shutdown();
+    }
+  }
+}

+ 51 - 0
src/test/org/apache/hadoop/hdfs/server/namenode/TestDatanodeDescriptor.java

@@ -0,0 +1,51 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.util.ArrayList;
+
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.common.GenerationStamp;
+import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
+
+import junit.framework.TestCase;
+
+/**
+ * This class tests that methods in DatanodeDescriptor
+ */
+public class TestDatanodeDescriptor extends TestCase {
+  /**
+   * Test that getInvalidateBlocks observes the maxlimit.
+   */
+  public void testGetInvalidateBlocks() throws Exception {
+    final int MAX_BLOCKS = 10;
+    final int REMAINING_BLOCKS = 2;
+    final int MAX_LIMIT = MAX_BLOCKS - REMAINING_BLOCKS;
+    
+    DatanodeDescriptor dd = new DatanodeDescriptor();
+    ArrayList<Block> blockList = new ArrayList<Block>(MAX_BLOCKS);
+    for (int i=0; i<MAX_BLOCKS; i++) {
+      blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
+    }
+    dd.addBlocksToBeInvalidated(blockList);
+    BlockCommand bc = dd.getInvalidateBlocks(MAX_LIMIT);
+    assertEquals(bc.getBlocks().length, MAX_LIMIT);
+    bc = dd.getInvalidateBlocks(MAX_LIMIT);
+    assertEquals(bc.getBlocks().length, REMAINING_BLOCKS);
+  }
+}

+ 152 - 0
src/test/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java

@@ -0,0 +1,152 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import junit.framework.TestCase;
+import java.io.*;
+import java.util.Iterator;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.permission.*;
+
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
+import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile;
+
+/**
+ * This class tests the creation and validation of a checkpoint.
+ */
+public class TestEditLog extends TestCase {
+  static final int NUM_DATA_NODES = 1;
+
+  // This test creates NUM_THREADS threads and each thread does
+  // 2 * NUM_TRANSACTIONS Transactions concurrently.
+  static final int NUM_TRANSACTIONS = 100;
+  static final int NUM_THREADS = 100;
+
+  //
+  // an object that does a bunch of transactions
+  //
+  static class Transactions implements Runnable {
+    FSNamesystem namesystem;
+    int numTransactions;
+    short replication = 3;
+    long blockSize = 64;
+
+    Transactions(FSNamesystem ns, int num) {
+      namesystem = ns;
+      numTransactions = num;
+    }
+
+    // add a bunch of transactions.
+    public void run() {
+      PermissionStatus p = namesystem.createFsOwnerPermissions(
+                                          new FsPermission((short)0777));
+      FSEditLog editLog = namesystem.getEditLog();
+
+      for (int i = 0; i < numTransactions; i++) {
+        try {
+          INodeFileUnderConstruction inode = new INodeFileUnderConstruction(
+                              p, replication, blockSize, 0, "", "", null);
+          editLog.logOpenFile("/filename" + i, inode);
+          editLog.logCloseFile("/filename" + i, inode);
+          editLog.logSync();
+        } catch (IOException e) {
+          System.out.println("Transaction " + i + " encountered exception " +
+                             e);
+        }
+      }
+    }
+  }
+
+  /**
+   * Tests transaction logging in dfs.
+   */
+  public void testEditLog() throws IOException {
+
+    // start a cluster 
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = null;
+    FileSystem fileSys = null;
+
+    try {
+      cluster = new MiniDFSCluster(conf, NUM_DATA_NODES, true, null);
+      cluster.waitActive();
+      fileSys = cluster.getFileSystem();
+      final FSNamesystem namesystem = cluster.getNamesystem();
+  
+      for (Iterator<File> it = cluster.getNameDirs().iterator(); it.hasNext(); ) {
+        File dir = it.next();
+        System.out.println(dir);
+      }
+  
+      FSImage fsimage = namesystem.getFSImage();
+      FSEditLog editLog = fsimage.getEditLog();
+  
+      // set small size of flush buffer
+      editLog.setBufferCapacity(2048);
+      editLog.close();
+      editLog.open();
+    
+      // Create threads and make them run transactions concurrently.
+      Thread threadId[] = new Thread[NUM_THREADS];
+      for (int i = 0; i < NUM_THREADS; i++) {
+        Transactions trans = new Transactions(namesystem, NUM_TRANSACTIONS);
+        threadId[i] = new Thread(trans, "TransactionThread-" + i);
+        threadId[i].start();
+      }
+  
+      // wait for all transactions to get over
+      for (int i = 0; i < NUM_THREADS; i++) {
+        try {
+          threadId[i].join();
+        } catch (InterruptedException e) {
+          i--;      // retry 
+        }
+      } 
+      
+      editLog.close();
+      editLog.open();
+  
+      // Verify that we can read in all the transactions that we have written.
+      // If there were any corruptions, it is likely that the reading in
+      // of these transactions will throw an exception.
+      //
+      for (Iterator<StorageDirectory> it = 
+              fsimage.dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
+        File editFile = FSImage.getImageFile(it.next(), NameNodeFile.EDITS);
+        System.out.println("Verifying file: " + editFile);
+        int numEdits = namesystem.getEditLog().loadFSEdits(
+                                  new EditLogFileInputStream(editFile));
+        int numLeases = namesystem.leaseManager.countLease();
+        System.out.println("Number of outstanding leases " + numLeases);
+        assertEquals(0, numLeases);
+        assertTrue("Verification for " + editFile + " failed. " +
+                   "Expected " + (NUM_THREADS * 2 * NUM_TRANSACTIONS) + " transactions. "+
+                   "Found " + numEdits + " transactions.",
+                   numEdits == NUM_THREADS * 2 * NUM_TRANSACTIONS);
+  
+      }
+    } finally {
+      if(fileSys != null) fileSys.close();
+      if(cluster != null) cluster.shutdown();
+    }
+  }
+}

+ 171 - 0
src/test/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java

@@ -0,0 +1,171 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+
+
+/**
+ * This class tests that a file system adheres to the limit of
+ * maximum number of files that is configured.
+ */
+public class TestFileLimit extends TestCase {
+  static final long seed = 0xDEADBEEFL;
+  static final int blockSize = 8192;
+  boolean simulatedStorage = false;
+
+  // creates a zero file.
+  private void createFile(FileSystem fileSys, Path name)
+    throws IOException {
+    FSDataOutputStream stm = fileSys.create(name, true,
+                                            fileSys.getConf().getInt("io.file.buffer.size", 4096),
+                                            (short)1, (long)blockSize);
+    byte[] buffer = new byte[1024];
+    Random rand = new Random(seed);
+    rand.nextBytes(buffer);
+    stm.write(buffer);
+    stm.close();
+  }
+
+  private void waitForLimit(FSNamesystem namesys, long num)
+  {
+    // wait for number of blocks to decrease
+    while (true) {
+      long total = namesys.getBlocksTotal() + namesys.dir.totalInodes();
+      System.out.println("Comparing current nodes " + total +
+                         " to become " + num);
+      if (total == num) {
+        break;
+      }
+      try {
+        Thread.sleep(1000);
+      } catch (InterruptedException e) {
+      }
+    }
+  }
+
+  /**
+   * Test that file data becomes available before file is closed.
+   */
+  public void testFileLimit() throws IOException {
+    Configuration conf = new Configuration();
+    int maxObjects = 5;
+    conf.setLong("dfs.max.objects", maxObjects);
+    conf.setLong("dfs.blockreport.intervalMsec", 1000L);
+    conf.setInt("dfs.heartbeat.interval", 1);
+    int currentNodes = 0;
+    
+    if (simulatedStorage) {
+      conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
+    }
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    FileSystem fs = cluster.getFileSystem();
+    FSNamesystem namesys = cluster.getNamesystem();
+    try {
+
+      //
+      // check that / exists
+      //
+      Path path = new Path("/");
+      assertTrue("/ should be a directory", 
+                 fs.getFileStatus(path).isDir() == true);
+      currentNodes = 1;          // root inode
+
+      // verify that we can create the specified number of files. We leave
+      // one for the "/". Each file takes an inode and a block.
+      //
+      for (int i = 0; i < maxObjects/2; i++) {
+        Path file = new Path("/filestatus" + i);
+        createFile(fs, file);
+        System.out.println("Created file " + file);
+        currentNodes += 2;      // two more objects for this creation.
+      }
+
+      // verify that creating another file fails
+      boolean hitException = false;
+      try {
+        Path file = new Path("/filestatus");
+        createFile(fs, file);
+        System.out.println("Created file " + file);
+      } catch (IOException e) {
+        hitException = true;
+      }
+      assertTrue("Was able to exceed file limit", hitException);
+
+      // delete one file
+      Path file0 = new Path("/filestatus0");
+      fs.delete(file0, true);
+      System.out.println("Deleted file " + file0);
+      currentNodes -= 2;
+
+      // wait for number of blocks to decrease
+      waitForLimit(namesys, currentNodes);
+
+      // now, we shud be able to create a new file
+      createFile(fs, file0);
+      System.out.println("Created file " + file0 + " again.");
+      currentNodes += 2;
+
+      // delete the file again
+      file0 = new Path("/filestatus0");
+      fs.delete(file0, true);
+      System.out.println("Deleted file " + file0 + " again.");
+      currentNodes -= 2;
+
+      // wait for number of blocks to decrease
+      waitForLimit(namesys, currentNodes);
+
+      // create two directories in place of the file that we deleted
+      Path dir = new Path("/dir0/dir1");
+      fs.mkdirs(dir);
+      System.out.println("Created directories " + dir);
+      currentNodes += 2;
+      waitForLimit(namesys, currentNodes);
+
+      // verify that creating another directory fails
+      hitException = false;
+      try {
+        fs.mkdirs(new Path("dir.fail"));
+        System.out.println("Created directory should not have succeeded.");
+      } catch (IOException e) {
+        hitException = true;
+      }
+      assertTrue("Was able to exceed dir limit", hitException);
+
+    } finally {
+      fs.close();
+      cluster.shutdown();
+    }
+  }
+
+  public void testFileLimitSimulated() throws IOException {
+    simulatedStorage = true;
+    testFileLimit();
+    simulatedStorage = false;
+  }
+}

+ 376 - 0
src/test/org/apache/hadoop/hdfs/server/namenode/TestFsck.java

@@ -0,0 +1,376 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.io.RandomAccessFile;
+import java.net.InetSocketAddress;
+import java.nio.channels.FileChannel;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.tools.DFSck;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.util.ToolRunner;
+import org.apache.log4j.Level;
+
+/**
+ * A JUnit test for doing fsck
+ */
+public class TestFsck extends TestCase {
+  static String runFsck(Configuration conf, int expectedErrCode, 
+                        boolean checkErrorCode,String... path) 
+                        throws Exception {
+    PrintStream oldOut = System.out;
+    ByteArrayOutputStream bStream = new ByteArrayOutputStream();
+    PrintStream newOut = new PrintStream(bStream, true);
+    System.setOut(newOut);
+    ((Log4JLogger)FSPermissionChecker.LOG).getLogger().setLevel(Level.ALL);
+    int errCode = ToolRunner.run(new DFSck(conf), path);
+    if (checkErrorCode)
+      assertEquals(expectedErrCode, errCode);
+    ((Log4JLogger)FSPermissionChecker.LOG).getLogger().setLevel(Level.INFO);
+    System.setOut(oldOut);
+    return bStream.toString();
+  }
+
+  /** do fsck */
+  public void testFsck() throws Exception {
+    DFSTestUtil util = new DFSTestUtil("TestFsck", 20, 3, 8*1024);
+    MiniDFSCluster cluster = null;
+    FileSystem fs = null;
+    try {
+      Configuration conf = new Configuration();
+      conf.setLong("dfs.blockreport.intervalMsec", 10000L);
+      cluster = new MiniDFSCluster(conf, 4, true, null);
+      fs = cluster.getFileSystem();
+      util.createFiles(fs, "/srcdat");
+      util.waitReplication(fs, "/srcdat", (short)3);
+      String outStr = runFsck(conf, 0, true, "/");
+      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+      System.out.println(outStr);
+      if (fs != null) {try{fs.close();} catch(Exception e){}}
+      cluster.shutdown();
+      
+      // restart the cluster; bring up namenode but not the data nodes
+      cluster = new MiniDFSCluster(conf, 0, false, null);
+      outStr = runFsck(conf, 1, true, "/");
+      // expect the result is corrupt
+      assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
+      System.out.println(outStr);
+      
+      // bring up data nodes & cleanup cluster
+      cluster.startDataNodes(conf, 4, true, null, null);
+      cluster.waitActive();
+      cluster.waitClusterUp();
+      fs = cluster.getFileSystem();
+      util.cleanup(fs, "/srcdat");
+    } finally {
+      if (fs != null) {try{fs.close();} catch(Exception e){}}
+      if (cluster != null) { cluster.shutdown(); }
+    }
+  }
+
+  public void testFsckNonExistent() throws Exception {
+    DFSTestUtil util = new DFSTestUtil("TestFsck", 20, 3, 8*1024);
+    MiniDFSCluster cluster = null;
+    FileSystem fs = null;
+    try {
+      Configuration conf = new Configuration();
+      conf.setLong("dfs.blockreport.intervalMsec", 10000L);
+      cluster = new MiniDFSCluster(conf, 4, true, null);
+      fs = cluster.getFileSystem();
+      util.createFiles(fs, "/srcdat");
+      util.waitReplication(fs, "/srcdat", (short)3);
+      String outStr = runFsck(conf, 0, true, "/non-existent");
+      assertEquals(-1, outStr.indexOf(NamenodeFsck.HEALTHY_STATUS));
+      System.out.println(outStr);
+      util.cleanup(fs, "/srcdat");
+    } finally {
+      if (fs != null) {try{fs.close();} catch(Exception e){}}
+      if (cluster != null) { cluster.shutdown(); }
+    }
+  }
+
+  /** Test fsck with permission set on inodes */
+  public void testFsckPermission() throws Exception {
+    final DFSTestUtil util = new DFSTestUtil(getClass().getSimpleName(), 20, 3, 8*1024);
+    final Configuration conf = new Configuration();
+    conf.setLong("dfs.blockreport.intervalMsec", 10000L);
+
+    MiniDFSCluster cluster = null;
+    try {
+      cluster = new MiniDFSCluster(conf, 4, true, null);
+
+      final FileSystem fs = cluster.getFileSystem();
+      final String dir = "/dfsck";
+      final Path dirpath = new Path(dir);
+      util.createFiles(fs, dir);
+      util.waitReplication(fs, dir, (short)3);
+      fs.setPermission(dirpath, new FsPermission((short)0700));
+
+      //run DFSck as another user
+      final Configuration c2 = DFSTestUtil.getConfigurationWithDifferentUsername(conf);
+      System.out.println(runFsck(c2, -1, true, dir));
+
+      //set permission and try DFSck again
+      fs.setPermission(dirpath, new FsPermission((short)0777));
+      final String outStr = runFsck(c2, 0, true, dir);
+      System.out.println(outStr);
+      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+
+      util.cleanup(fs, dir);
+    } finally {
+      if (cluster != null) { cluster.shutdown(); }
+    }
+  }
+
+  public void testFsckMove() throws Exception {
+    DFSTestUtil util = new DFSTestUtil("TestFsck", 5, 3, 8*1024);
+    MiniDFSCluster cluster = null;
+    FileSystem fs = null;
+    try {
+      Configuration conf = new Configuration();
+      conf.setLong("dfs.blockreport.intervalMsec", 10000L);
+      cluster = new MiniDFSCluster(conf, 4, true, null);
+      String topDir = "/srcdat";
+      fs = cluster.getFileSystem();
+      cluster.waitActive();
+      util.createFiles(fs, topDir);
+      util.waitReplication(fs, topDir, (short)3);
+      String outStr = runFsck(conf, 0, true, "/");
+      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+      
+      // Corrupt a block by deleting it
+      String[] fileNames = util.getFileNames(topDir);
+      DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
+                                          cluster.getNameNodePort()), conf);
+      String block = dfsClient.namenode.
+                      getBlockLocations(fileNames[0], 0, Long.MAX_VALUE).
+                      get(0).getBlock().getBlockName();
+      File baseDir = new File(System.getProperty("test.build.data",
+                                                 "build/test/data"),"dfs/data");
+      for (int i=0; i<8; i++) {
+        File blockFile = new File(baseDir, "data" +(i+1)+ "/current/" + block);
+        if(blockFile.exists()) {
+          assertTrue(blockFile.delete());
+        }
+      }
+
+      // We excpect the filesystem to be corrupted
+      outStr = runFsck(conf, 1, false, "/");
+      while (!outStr.contains(NamenodeFsck.CORRUPT_STATUS)) {
+        try {
+          Thread.sleep(100);
+        } catch (InterruptedException ignore) {
+        }
+        outStr = runFsck(conf, 1, false, "/");
+      } 
+      
+      // Fix the filesystem by moving corrupted files to lost+found
+      outStr = runFsck(conf, 1, true, "/", "-move");
+      assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
+      
+      // Check to make sure we have healthy filesystem
+      outStr = runFsck(conf, 0, true, "/");
+      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS)); 
+      util.cleanup(fs, topDir);
+      if (fs != null) {try{fs.close();} catch(Exception e){}}
+      cluster.shutdown();
+    } finally {
+      if (fs != null) {try{fs.close();} catch(Exception e){}}
+      if (cluster != null) { cluster.shutdown(); }
+    }
+  }
+  
+  public void testFsckOpenFiles() throws Exception {
+    DFSTestUtil util = new DFSTestUtil("TestFsck", 4, 3, 8*1024); 
+    MiniDFSCluster cluster = null;
+    FileSystem fs = null;
+    try {
+      Configuration conf = new Configuration();
+      conf.setLong("dfs.blockreport.intervalMsec", 10000L);
+      cluster = new MiniDFSCluster(conf, 4, true, null);
+      String topDir = "/srcdat";
+      String randomString = "HADOOP  ";
+      fs = cluster.getFileSystem();
+      cluster.waitActive();
+      util.createFiles(fs, topDir);
+      util.waitReplication(fs, topDir, (short)3);
+      String outStr = runFsck(conf, 0, true, "/");
+      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+      // Open a file for writing and do not close for now
+      Path openFile = new Path(topDir + "/openFile");
+      FSDataOutputStream out = fs.create(openFile);
+      int writeCount = 0;
+      while (writeCount != 100) {
+        out.write(randomString.getBytes());
+        writeCount++;                  
+      }
+      // We expect the filesystem to be HEALTHY and show one open file
+      outStr = runFsck(conf, 0, true, topDir);
+      System.out.println(outStr);
+      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+      assertFalse(outStr.contains("OPENFORWRITE")); 
+      // Use -openforwrite option to list open files
+      outStr = runFsck(conf, 0, true, topDir, "-openforwrite");
+      System.out.println(outStr);
+      assertTrue(outStr.contains("OPENFORWRITE"));
+      assertTrue(outStr.contains("openFile"));
+      // Close the file
+      out.close(); 
+      // Now, fsck should show HEALTHY fs and should not show any open files
+      outStr = runFsck(conf, 0, true, topDir);
+      System.out.println(outStr);
+      assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+      assertFalse(outStr.contains("OPENFORWRITE"));
+      util.cleanup(fs, topDir);
+      if (fs != null) {try{fs.close();} catch(Exception e){}}
+      cluster.shutdown();
+    } finally {
+      if (fs != null) {try{fs.close();} catch(Exception e){}}
+      if (cluster != null) { cluster.shutdown(); }
+    }
+  }
+
+  public void testCorruptBlock() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setLong("dfs.blockreport.intervalMsec", 1000);
+    FileSystem fs = null;
+    DFSClient dfsClient = null;
+    LocatedBlocks blocks = null;
+    int replicaCount = 0;
+    Random random = new Random();
+    String outStr = null;
+
+    MiniDFSCluster cluster = null;
+    try {
+    cluster = new MiniDFSCluster(conf, 3, true, null);
+    cluster.waitActive();
+    fs = cluster.getFileSystem();
+    Path file1 = new Path("/testCorruptBlock");
+    DFSTestUtil.createFile(fs, file1, 1024, (short)3, 0);
+    // Wait until file replication has completed
+    DFSTestUtil.waitReplication(fs, file1, (short)3);
+    String block = DFSTestUtil.getFirstBlock(fs, file1).getBlockName();
+
+    // Make sure filesystem is in healthy state
+    outStr = runFsck(conf, 0, true, "/");
+    System.out.println(outStr);
+    assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
+    
+    // corrupt replicas 
+    File baseDir = new File(System.getProperty("test.build.data",
+                                               "build/test/data"),"dfs/data");
+    for (int i=0; i < 6; i++) {
+      File blockFile = new File(baseDir, "data" + (i+1) + "/current/" +
+                                block);
+      if (blockFile.exists()) {
+        RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
+        FileChannel channel = raFile.getChannel();
+        String badString = "BADBAD";
+        int rand = random.nextInt((int)channel.size()/2);
+        raFile.seek(rand);
+        raFile.write(badString.getBytes());
+        raFile.close();
+      }
+    }
+    // Read the file to trigger reportBadBlocks
+    try {
+      IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), conf,
+                        true);
+    } catch (IOException ie) {
+      // Ignore exception
+    }
+
+    dfsClient = new DFSClient(new InetSocketAddress("localhost",
+                               cluster.getNameNodePort()), conf);
+    blocks = dfsClient.namenode.
+               getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
+    replicaCount = blocks.get(0).getLocations().length;
+    while (replicaCount != 3) {
+      try {
+        Thread.sleep(100);
+      } catch (InterruptedException ignore) {
+      }
+      blocks = dfsClient.namenode.
+                getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
+      replicaCount = blocks.get(0).getLocations().length;
+    }
+    assertTrue (blocks.get(0).isCorrupt());
+
+    // Check if fsck reports the same
+    outStr = runFsck(conf, 1, true, "/");
+    System.out.println(outStr);
+    assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
+    assertTrue(outStr.contains("testCorruptBlock"));
+    } finally {
+      if (cluster != null) {cluster.shutdown();}
+    }
+  }
+  
+  /** Test if fsck can return -1 in case of failure
+   * 
+   * @throws Exception
+   */
+  public void testFsckError() throws Exception {
+    MiniDFSCluster cluster = null;
+    try {
+      // bring up a one-node cluster
+      Configuration conf = new Configuration();
+      cluster = new MiniDFSCluster(conf, 1, true, null);
+      String fileName = "/test.txt";
+      Path filePath = new Path(fileName);
+      FileSystem fs = cluster.getFileSystem();
+      
+      // create a one-block file
+      DFSTestUtil.createFile(fs, filePath, 1L, (short)1, 1L);
+      DFSTestUtil.waitReplication(fs, filePath, (short)1);
+      
+      // intentionally corrupt NN data structure
+      INodeFile node = (INodeFile)cluster.getNamesystem().dir.rootDir.getNode(fileName);
+      assertEquals(node.blocks.length, 1);
+      node.blocks[0].setNumBytes(-1L);  // set the block length to be negative
+      
+      // run fsck and expect a failure with -1 as the error code
+      String outStr = runFsck(conf, -1, true, fileName);
+      System.out.println(outStr);
+      assertTrue(outStr.contains(NamenodeFsck.FAILURE_STATUS));
+      
+      // clean up file system
+      fs.delete(filePath, true);
+    } finally {
+      if (cluster != null) {cluster.shutdown();}
+    }
+  }
+}

+ 88 - 0
src/test/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java

@@ -0,0 +1,88 @@
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.util.ArrayList;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.common.GenerationStamp;
+import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+
+import junit.framework.TestCase;
+
+/**
+ * Test if FSNamesystem handles heartbeat right
+ */
+public class TestHeartbeatHandling extends TestCase {
+  /**
+   * Test if {@link FSNamesystem#handleHeartbeat(DatanodeRegistration, long, long, long, int, int)}
+   * can pick up replication and/or invalidate requests and 
+   * observes the max limit
+   */
+  public void testHeartbeat() throws Exception {
+    final Configuration conf = new Configuration();
+    final MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    try {
+      cluster.waitActive();
+      final FSNamesystem namesystem = cluster.getNamesystem();
+      final DatanodeRegistration nodeReg = cluster.getDataNodes().get(0).dnRegistration;
+      DatanodeDescriptor dd = namesystem.getDatanode(nodeReg);
+      
+      final int REMAINING_BLOCKS = 1;
+      final int MAX_REPLICATE_LIMIT = conf.getInt("dfs.max-repl-streams", 2);
+      final int MAX_INVALIDATE_LIMIT = FSNamesystem.BLOCK_INVALIDATE_CHUNK;
+      final int MAX_INVALIDATE_BLOCKS = 2*MAX_INVALIDATE_LIMIT+REMAINING_BLOCKS;
+      final int MAX_REPLICATE_BLOCKS = 2*MAX_REPLICATE_LIMIT+REMAINING_BLOCKS;
+      final DatanodeDescriptor[] ONE_TARGET = new DatanodeDescriptor[1];
+
+      synchronized (namesystem.heartbeats) {
+      for (int i=0; i<MAX_REPLICATE_BLOCKS; i++) {
+        dd.addBlockToBeReplicated(
+            new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP), ONE_TARGET);
+      }
+      DatanodeCommand[] cmds = namesystem.handleHeartbeat(
+          nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0);
+      assertEquals(1, cmds.length);
+      assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
+      assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
+      
+      ArrayList<Block> blockList = new ArrayList<Block>(MAX_INVALIDATE_BLOCKS);
+      for (int i=0; i<MAX_INVALIDATE_BLOCKS; i++) {
+        blockList.add(new Block(i, 0, GenerationStamp.FIRST_VALID_STAMP));
+      }
+      dd.addBlocksToBeInvalidated(blockList);
+           
+      cmds = namesystem.handleHeartbeat(
+          nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0);
+      assertEquals(2, cmds.length);
+      assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
+      assertEquals(MAX_REPLICATE_LIMIT, ((BlockCommand)cmds[0]).getBlocks().length);
+      assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
+      assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
+      
+      cmds = namesystem.handleHeartbeat(
+          nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0);
+      assertEquals(2, cmds.length);
+      assertEquals(DatanodeProtocol.DNA_TRANSFER, cmds[0].getAction());
+      assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);
+      assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[1].getAction());
+      assertEquals(MAX_INVALIDATE_LIMIT, ((BlockCommand)cmds[1]).getBlocks().length);
+      
+      cmds = namesystem.handleHeartbeat(
+          nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0);
+      assertEquals(1, cmds.length);
+      assertEquals(DatanodeProtocol.DNA_INVALIDATE, cmds[0].getAction());
+      assertEquals(REMAINING_BLOCKS, ((BlockCommand)cmds[0]).getBlocks().length);
+
+      cmds = namesystem.handleHeartbeat(
+          nodeReg, dd.getCapacity(), dd.getDfsUsed(), dd.getRemaining(), 0, 0);
+      assertEquals(null, cmds);
+      }
+    } finally {
+      cluster.shutdown();
+    }
+  }
+}

+ 97 - 0
src/test/org/apache/hadoop/hdfs/server/namenode/TestHost2NodesMap.java

@@ -0,0 +1,97 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+
+import junit.framework.TestCase;
+
+public class TestHost2NodesMap extends TestCase {
+  static private Host2NodesMap map = new Host2NodesMap();
+  private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
+    new DatanodeDescriptor(new DatanodeID("h1:5020"), "/d1/r1"),
+    new DatanodeDescriptor(new DatanodeID("h2:5020"), "/d1/r1"),
+    new DatanodeDescriptor(new DatanodeID("h3:5020"), "/d1/r2"),
+    new DatanodeDescriptor(new DatanodeID("h3:5030"), "/d1/r2"),
+  };
+  private final static DatanodeDescriptor NULL_NODE = null; 
+  private final static DatanodeDescriptor NODE = 
+    new DatanodeDescriptor(new DatanodeID("h3:5040"), "/d1/r4");
+
+  static {
+    for(DatanodeDescriptor node:dataNodes) {
+      map.add(node);
+    }
+    map.add(NULL_NODE);
+  }
+  
+  public void testContains() throws Exception {
+    for(int i=0; i<dataNodes.length; i++) {
+      assertTrue(map.contains(dataNodes[i]));
+    }
+    assertFalse(map.contains(NULL_NODE));
+    assertFalse(map.contains(NODE));
+  }
+
+  public void testGetDatanodeByHost() throws Exception {
+    assertTrue(map.getDatanodeByHost("h1")==dataNodes[0]);
+    assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
+    DatanodeDescriptor node = map.getDatanodeByHost("h3");
+    assertTrue(node==dataNodes[2] || node==dataNodes[3]);
+    assertTrue(null==map.getDatanodeByHost("h4"));
+  }
+
+  public void testGetDatanodeByName() throws Exception {
+    assertTrue(map.getDatanodeByName("h1:5020")==dataNodes[0]);
+    assertTrue(map.getDatanodeByName("h1:5030")==null);
+    assertTrue(map.getDatanodeByName("h2:5020")==dataNodes[1]);
+    assertTrue(map.getDatanodeByName("h2:5030")==null);
+    assertTrue(map.getDatanodeByName("h3:5020")==dataNodes[2]);
+    assertTrue(map.getDatanodeByName("h3:5030")==dataNodes[3]);
+    assertTrue(map.getDatanodeByName("h3:5040")==null);
+    assertTrue(map.getDatanodeByName("h4")==null);
+    assertTrue(map.getDatanodeByName(null)==null);
+  }
+
+  public void testRemove() throws Exception {
+    assertFalse(map.remove(NODE));
+    
+    assertTrue(map.remove(dataNodes[0]));
+    assertTrue(map.getDatanodeByHost("h1")==null);
+    assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
+    DatanodeDescriptor node = map.getDatanodeByHost("h3");
+    assertTrue(node==dataNodes[2] || node==dataNodes[3]);
+    assertTrue(null==map.getDatanodeByHost("h4"));
+    
+    assertTrue(map.remove(dataNodes[2]));
+    assertTrue(map.getDatanodeByHost("h1")==null);
+    assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
+    assertTrue(map.getDatanodeByHost("h3")==dataNodes[3]);
+    
+    assertTrue(map.remove(dataNodes[3]));
+    assertTrue(map.getDatanodeByHost("h1")==null);
+    assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
+    assertTrue(map.getDatanodeByHost("h3")==null);
+    
+    assertFalse(map.remove(NULL_NODE));
+    assertTrue(map.remove(dataNodes[1]));
+    assertFalse(map.remove(dataNodes[1]));
+  }
+
+}

+ 40 - 0
src/test/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java

@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.util.Arrays;
+
+import junit.framework.TestCase;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+
+public class TestNNThroughputBenchmark extends TestCase {
+
+  /**
+   * This test runs all benchmarks defined in {@link NNThroughputBenchmark}.
+   */
+  public void testNNThroughput() throws Exception {
+    Configuration conf = new Configuration();
+    FileSystem.setDefaultUri(conf, "hdfs://localhost:" + 0);
+    conf.set("dfs.http.address", "0.0.0.0:0");
+    NameNode.format(conf);
+    String[] args = new String[] {"-op", "all"};
+    NNThroughputBenchmark.runBenchmark(conf, Arrays.asList(args));
+  }
+}

+ 391 - 0
src/test/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java

@@ -0,0 +1,391 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import junit.framework.TestCase;
+import java.io.*;
+import java.util.Random;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+
+/**
+ * This class tests various combinations of dfs.name.dir 
+ * and dfs.name.edits.dir configurations.
+ */
+public class TestNameEditsConfigs extends TestCase {
+  static final long SEED = 0xDEADBEEFL;
+  static final int BLOCK_SIZE = 4096;
+  static final int FILE_SIZE = 8192;
+  static final int NUM_DATA_NODES = 3;
+  static final String FILE_IMAGE = "current/fsimage";
+  static final String FILE_EDITS = "current/edits";
+
+  short replication = 3;
+  private File base_dir = new File(
+      System.getProperty("test.build.data", "build/test/data"), "dfs/");
+
+  protected void setUp() throws java.lang.Exception {
+    if(base_dir.exists())
+      tearDown();
+  }
+
+  protected void tearDown() throws java.lang.Exception {
+    if (!FileUtil.fullyDelete(base_dir)) 
+      throw new IOException("Cannot remove directory " + base_dir);
+  }
+
+  private void writeFile(FileSystem fileSys, Path name, int repl)
+    throws IOException {
+    FSDataOutputStream stm = fileSys.create(name, true,
+                                            fileSys.getConf().getInt("io.file.buffer.size", 4096),
+                                            (short)repl, (long)BLOCK_SIZE);
+    byte[] buffer = new byte[FILE_SIZE];
+    Random rand = new Random(SEED);
+    rand.nextBytes(buffer);
+    stm.write(buffer);
+    stm.close();
+  }
+
+  void checkImageAndEditsFilesExistence(File dir, 
+                                        boolean imageMustExist,
+                                        boolean editsMustExist) {
+    assertTrue(imageMustExist == new File(dir, FILE_IMAGE).exists());
+    assertTrue(editsMustExist == new File(dir, FILE_EDITS).exists());
+  }
+
+  private void checkFile(FileSystem fileSys, Path name, int repl)
+    throws IOException {
+    assertTrue(fileSys.exists(name));
+    int replication = fileSys.getFileStatus(name).getReplication();
+    assertEquals("replication for " + name, repl, replication);
+    long size = fileSys.getContentSummary(name).getLength();
+    assertEquals("file size for " + name, size, (long)FILE_SIZE);
+  }
+
+  private void cleanupFile(FileSystem fileSys, Path name)
+    throws IOException {
+    assertTrue(fileSys.exists(name));
+    fileSys.delete(name, true);
+    assertTrue(!fileSys.exists(name));
+  }
+
+  SecondaryNameNode startSecondaryNameNode(Configuration conf
+                                          ) throws IOException {
+    conf.set("dfs.secondary.http.address", "0.0.0.0:0");
+    return new SecondaryNameNode(conf);
+  }
+
+  /**
+   * Test various configuration options of dfs.name.dir and dfs.name.edits.dir
+   * The test creates files and restarts cluster with different configs.
+   * 1. Starts cluster with shared name and edits dirs
+   * 2. Restarts cluster by adding additional (different) name and edits dirs
+   * 3. Restarts cluster by removing shared name and edits dirs by allowing to 
+   *    start using separate name and edits dirs
+   * 4. Restart cluster by adding shared directory again, but make sure we 
+   *    do not read any stale image or edits. 
+   * All along the test, we create and delete files at reach restart to make
+   * sure we are reading proper edits and image.
+   */
+  public void testNameEditsConfigs() throws IOException {
+    Path file1 = new Path("TestNameEditsConfigs1");
+    Path file2 = new Path("TestNameEditsConfigs2");
+    Path file3 = new Path("TestNameEditsConfigs3");
+    MiniDFSCluster cluster = null;
+    SecondaryNameNode secondary = null;
+    Configuration conf = null;
+    FileSystem fileSys = null;
+    File newNameDir = new File(base_dir, "name");
+    File newEditsDir = new File(base_dir, "edits");
+    File nameAndEdits = new File(base_dir, "name_and_edits");
+    File checkpointNameDir = new File(base_dir, "secondname");
+    File checkpointEditsDir = new File(base_dir, "secondedits");
+    File checkpointNameAndEdits = new File(base_dir, "second_name_and_edits");
+    
+    // Start namenode with same dfs.name.dir and dfs.name.edits.dir
+    conf = new Configuration();
+    conf.set("dfs.name.dir", nameAndEdits.getPath());
+    conf.set("dfs.name.edits.dir", nameAndEdits.getPath());
+    conf.set("fs.checkpoint.dir", checkpointNameAndEdits.getPath());
+    conf.set("fs.checkpoint.edits.dir", checkpointNameAndEdits.getPath());
+    replication = (short)conf.getInt("dfs.replication", 3);
+    // Manage our own dfs directories
+    cluster = new MiniDFSCluster(0, conf, NUM_DATA_NODES, true, false, true, null,
+                                  null, null, null);
+    cluster.waitActive();
+    secondary = startSecondaryNameNode(conf);
+    fileSys = cluster.getFileSystem();
+
+    try {
+      assertTrue(!fileSys.exists(file1));
+      writeFile(fileSys, file1, replication);
+      checkFile(fileSys, file1, replication);
+      secondary.doCheckpoint();
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+      secondary.shutdown();
+    }
+
+    // Start namenode with additional dfs.name.dir and dfs.name.edits.dir
+    conf =  new Configuration();
+    assertTrue(newNameDir.mkdir());
+    assertTrue(newEditsDir.mkdir());
+
+    conf.set("dfs.name.dir", nameAndEdits.getPath() +
+              "," + newNameDir.getPath());
+    conf.set("dfs.name.edits.dir", nameAndEdits.getPath() + 
+             "," + newEditsDir.getPath());
+    conf.set("fs.checkpoint.dir", checkpointNameDir.getPath() +
+             "," + checkpointNameAndEdits.getPath());
+    conf.set("fs.checkpoint.edits.dir", checkpointEditsDir.getPath() +
+             "," + checkpointNameAndEdits.getPath());
+    replication = (short)conf.getInt("dfs.replication", 3);
+    // Manage our own dfs directories. Do not format.
+    cluster = new MiniDFSCluster(0, conf, NUM_DATA_NODES, false, false, true, 
+                                  null, null, null, null);
+    cluster.waitActive();
+    secondary = startSecondaryNameNode(conf);
+    fileSys = cluster.getFileSystem();
+
+    try {
+      assertTrue(fileSys.exists(file1));
+      checkFile(fileSys, file1, replication);
+      cleanupFile(fileSys, file1);
+      writeFile(fileSys, file2, replication);
+      checkFile(fileSys, file2, replication);
+      secondary.doCheckpoint();
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+      secondary.shutdown();
+    }
+
+    checkImageAndEditsFilesExistence(nameAndEdits, true, true);
+    checkImageAndEditsFilesExistence(newNameDir, true, false);
+    checkImageAndEditsFilesExistence(newEditsDir, false, true);
+    checkImageAndEditsFilesExistence(checkpointNameAndEdits, true, true);
+    checkImageAndEditsFilesExistence(checkpointNameDir, true, false);
+    checkImageAndEditsFilesExistence(checkpointEditsDir, false, true);
+
+    // Now remove common directory both have and start namenode with 
+    // separate name and edits dirs
+    new File(nameAndEdits, FILE_EDITS).renameTo(
+        new File(newNameDir, FILE_EDITS));
+    new File(nameAndEdits, FILE_IMAGE).renameTo(
+        new File(newEditsDir, FILE_IMAGE));
+    new File(checkpointNameAndEdits, FILE_EDITS).renameTo(
+        new File(checkpointNameDir, FILE_EDITS));
+    new File(checkpointNameAndEdits, FILE_IMAGE).renameTo(
+        new File(checkpointEditsDir, FILE_IMAGE));
+    conf =  new Configuration();
+    conf.set("dfs.name.dir", newNameDir.getPath());
+    conf.set("dfs.name.edits.dir", newEditsDir.getPath());
+    conf.set("fs.checkpoint.dir", checkpointNameDir.getPath());
+    conf.set("fs.checkpoint.edits.dir", checkpointEditsDir.getPath());
+    replication = (short)conf.getInt("dfs.replication", 3);
+    cluster = new MiniDFSCluster(0, conf, NUM_DATA_NODES, false, false, true,
+                                  null, null, null, null);
+    cluster.waitActive();
+    secondary = startSecondaryNameNode(conf);
+    fileSys = cluster.getFileSystem();
+
+    try {
+      assertTrue(!fileSys.exists(file1));
+      assertTrue(fileSys.exists(file2));
+      checkFile(fileSys, file2, replication);
+      cleanupFile(fileSys, file2);
+      writeFile(fileSys, file3, replication);
+      checkFile(fileSys, file3, replication);
+      secondary.doCheckpoint();
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+      secondary.shutdown();
+    }
+
+    checkImageAndEditsFilesExistence(newNameDir, true, false);
+    checkImageAndEditsFilesExistence(newEditsDir, false, true);
+    checkImageAndEditsFilesExistence(checkpointNameDir, true, false);
+    checkImageAndEditsFilesExistence(checkpointEditsDir, false, true);
+
+    // Add old name_and_edits dir. File system should not read image or edits
+    // from old dir
+    assertTrue(FileUtil.fullyDelete(new File(nameAndEdits, "current")));
+    assertTrue(FileUtil.fullyDelete(new File(checkpointNameAndEdits, "current")));
+    conf = new Configuration();
+    conf.set("dfs.name.dir", nameAndEdits.getPath() +
+              "," + newNameDir.getPath());
+    conf.set("dfs.name.edits.dir", nameAndEdits +
+              "," + newEditsDir.getPath());
+    conf.set("fs.checkpoint.dir", checkpointNameDir.getPath() +
+        "," + checkpointNameAndEdits.getPath());
+    conf.set("fs.checkpoint.edits.dir", checkpointEditsDir.getPath() +
+        "," + checkpointNameAndEdits.getPath());
+    replication = (short)conf.getInt("dfs.replication", 3);
+    cluster = new MiniDFSCluster(0, conf, NUM_DATA_NODES, false, false, true,
+                                  null, null, null, null);
+    cluster.waitActive();
+    secondary = startSecondaryNameNode(conf);
+    fileSys = cluster.getFileSystem();
+
+    try {
+      assertTrue(!fileSys.exists(file1));
+      assertTrue(!fileSys.exists(file2));
+      assertTrue(fileSys.exists(file3));
+      checkFile(fileSys, file3, replication);
+      secondary.doCheckpoint();
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+      secondary.shutdown();
+    }
+    checkImageAndEditsFilesExistence(nameAndEdits, true, true);
+    checkImageAndEditsFilesExistence(checkpointNameAndEdits, true, true);
+  }
+
+  /**
+   * Test various configuration options of dfs.name.dir and dfs.name.edits.dir
+   * This test tries to simulate failure scenarios.
+   * 1. Start cluster with shared name and edits dir
+   * 2. Restart cluster by adding separate name and edits dirs
+   * 3. Restart cluster by removing shared name and edits dir
+   * 4. Restart cluster with old shared name and edits dir, but only latest 
+   *    name dir. This should fail since we dont have latest edits dir
+   * 5. Restart cluster with old shared name and edits dir, but only latest
+   *    edits dir. This should fail since we dont have latest name dir
+   */
+  public void testNameEditsConfigsFailure() throws IOException {
+    Path file1 = new Path("TestNameEditsConfigs1");
+    Path file2 = new Path("TestNameEditsConfigs2");
+    Path file3 = new Path("TestNameEditsConfigs3");
+    MiniDFSCluster cluster = null;
+    Configuration conf = null;
+    FileSystem fileSys = null;
+    File newNameDir = new File(base_dir, "name");
+    File newEditsDir = new File(base_dir, "edits");
+    File nameAndEdits = new File(base_dir, "name_and_edits");
+    
+    // Start namenode with same dfs.name.dir and dfs.name.edits.dir
+    conf = new Configuration();
+    conf.set("dfs.name.dir", nameAndEdits.getPath());
+    conf.set("dfs.name.edits.dir", nameAndEdits.getPath());
+    replication = (short)conf.getInt("dfs.replication", 3);
+    // Manage our own dfs directories
+    cluster = new MiniDFSCluster(0, conf, NUM_DATA_NODES, true, false, true, null,
+                                  null, null, null);
+    cluster.waitActive();
+    fileSys = cluster.getFileSystem();
+
+    try {
+      assertTrue(!fileSys.exists(file1));
+      writeFile(fileSys, file1, replication);
+      checkFile(fileSys, file1, replication);
+    } finally  {
+      fileSys.close();
+      cluster.shutdown();
+    }
+
+    // Start namenode with additional dfs.name.dir and dfs.name.edits.dir
+    conf =  new Configuration();
+    assertTrue(newNameDir.mkdir());
+    assertTrue(newEditsDir.mkdir());
+
+    conf.set("dfs.name.dir", nameAndEdits.getPath() +
+              "," + newNameDir.getPath());
+    conf.set("dfs.name.edits.dir", nameAndEdits.getPath() +
+              "," + newEditsDir.getPath());
+    replication = (short)conf.getInt("dfs.replication", 3);
+    // Manage our own dfs directories. Do not format.
+    cluster = new MiniDFSCluster(0, conf, NUM_DATA_NODES, false, false, true, 
+                                  null, null, null, null);
+    cluster.waitActive();
+    fileSys = cluster.getFileSystem();
+
+    try {
+      assertTrue(fileSys.exists(file1));
+      checkFile(fileSys, file1, replication);
+      cleanupFile(fileSys, file1);
+      writeFile(fileSys, file2, replication);
+      checkFile(fileSys, file2, replication);
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+    
+    // Now remove common directory both have and start namenode with 
+    // separate name and edits dirs
+    conf =  new Configuration();
+    conf.set("dfs.name.dir", newNameDir.getPath());
+    conf.set("dfs.name.edits.dir", newEditsDir.getPath());
+    replication = (short)conf.getInt("dfs.replication", 3);
+    cluster = new MiniDFSCluster(0, conf, NUM_DATA_NODES, false, false, true,
+                                  null, null, null, null);
+    cluster.waitActive();
+    fileSys = cluster.getFileSystem();
+
+    try {
+      assertTrue(!fileSys.exists(file1));
+      assertTrue(fileSys.exists(file2));
+      checkFile(fileSys, file2, replication);
+      cleanupFile(fileSys, file2);
+      writeFile(fileSys, file3, replication);
+      checkFile(fileSys, file3, replication);
+    } finally {
+      fileSys.close();
+      cluster.shutdown();
+    }
+
+    // Add old shared directory for name and edits along with latest name
+    conf = new Configuration();
+    conf.set("dfs.name.dir", newNameDir.getPath() + "," + 
+             nameAndEdits.getPath());
+    conf.set("dfs.name.edits.dir", nameAndEdits.getPath());
+    replication = (short)conf.getInt("dfs.replication", 3);
+    try {
+      cluster = new MiniDFSCluster(0, conf, NUM_DATA_NODES, false, false, true,
+                                  null, null, null, null);
+      assertTrue(false);
+    } catch (IOException e) { // expect to fail
+      System.out.println("cluster start failed due to missing " +
+                         "latest edits dir");
+    } finally {
+      cluster = null;
+    }
+
+    // Add old shared directory for name and edits along with latest edits
+    conf = new Configuration();
+    conf.set("dfs.name.dir", nameAndEdits.getPath());
+    conf.set("dfs.name.edits.dir", newEditsDir.getPath() +
+             "," + nameAndEdits.getPath());
+    replication = (short)conf.getInt("dfs.replication", 3);
+    try {
+      cluster = new MiniDFSCluster(0, conf, NUM_DATA_NODES, false, false, true,
+                                   null, null, null, null);
+      assertTrue(false);
+    } catch (IOException e) { // expect to fail
+      System.out.println("cluster start failed due to missing latest name dir");
+    } finally {
+      cluster = null;
+    }
+  }
+}

+ 136 - 0
src/test/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java

@@ -0,0 +1,136 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+
+import java.io.File;
+import java.util.ArrayList;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DF;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import junit.framework.TestCase;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+
+
+/**
+ * This tests InterDataNodeProtocol for block handling. 
+ */
+public class TestNamenodeCapacityReport extends TestCase {
+  private static final Log LOG = LogFactory.getLog(TestNamenodeCapacityReport.class);
+
+  /**
+   * The following test first creates a file.
+   * It verifies the block information from a datanode.
+   * Then, it updates the block with new information and verifies again. 
+   */
+  public void testVolumeSize() throws Exception {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = null;
+
+    // Set aside fifth of the total capacity as reserved
+    long reserved = 10000;
+    conf.setLong("dfs.datanode.du.reserved", reserved);
+    
+    try {
+      cluster = new MiniDFSCluster(conf, 1, true, null);
+      cluster.waitActive();
+      
+      final FSNamesystem namesystem = cluster.getNamesystem();
+      
+      // Ensure the data reported for each data node is right
+      ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
+      ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
+      namesystem.DFSNodesStatus(live, dead);
+      
+      assertTrue(live.size() == 1);
+      
+      long used, remaining, configCapacity, nonDFSUsed;
+      float percentUsed, percentRemaining;
+      
+      for (final DatanodeDescriptor datanode : live) {
+        used = datanode.getDfsUsed();
+        remaining = datanode.getRemaining();
+        nonDFSUsed = datanode.getNonDfsUsed();
+        configCapacity = datanode.getCapacity();
+        percentUsed = datanode.getDfsUsedPercent();
+        percentRemaining = datanode.getRemainingPercent();
+        
+        LOG.info("Datanode configCapacity " + configCapacity
+            + " used " + used + " non DFS used " + nonDFSUsed 
+            + " remaining " + remaining + " perentUsed " + percentUsed
+            + " percentRemaining " + percentRemaining);
+        
+        assertTrue(configCapacity == (used + remaining + nonDFSUsed));
+        assertTrue(percentUsed == ((100.0f * (float)used)/(float)configCapacity));
+        assertTrue(percentRemaining == ((100.0f * (float)remaining)/(float)configCapacity));
+      }   
+      
+      DF df = new DF(new File(cluster.getDataDirectory()), conf);
+     
+      //
+      // Currently two data directories are created by the data node
+      // in the MiniDFSCluster. This results in each data directory having
+      // capacity equals to the disk capacity of the data directory.
+      // Hence the capacity reported by the data node is twice the disk space
+      // the disk capacity
+      //
+      // So multiply the disk capacity and reserved space by two 
+      // for accommodating it
+      //
+      int numOfDataDirs = 2;
+      
+      long diskCapacity = numOfDataDirs * df.getCapacity();
+      reserved *= numOfDataDirs;
+      
+      configCapacity = namesystem.getCapacityTotal();
+      used = namesystem.getCapacityUsed();
+      nonDFSUsed = namesystem.getCapacityUsedNonDFS();
+      remaining = namesystem.getCapacityRemaining();
+      percentUsed = namesystem.getCapacityUsedPercent();
+      percentRemaining = namesystem.getCapacityRemainingPercent();
+      
+      LOG.info("Data node directory " + cluster.getDataDirectory());
+           
+      LOG.info("Name node diskCapacity " + diskCapacity + " configCapacity "
+          + configCapacity + " reserved " + reserved + " used " + used 
+          + " remaining " + remaining + " nonDFSUsed " + nonDFSUsed 
+          + " remaining " + remaining + " percentUsed " + percentUsed 
+          + " percentRemaining " + percentRemaining);
+      
+      // Ensure new total capacity reported excludes the reserved space
+      assertTrue(configCapacity == diskCapacity - reserved);
+      
+      // Ensure new total capacity reported excludes the reserved space
+      assertTrue(configCapacity == (used + remaining + nonDFSUsed));
+
+      // Ensure percent used is calculated based on used and present capacity
+      assertTrue(percentUsed == ((float)used * 100.0f)/(float)configCapacity);
+
+      // Ensure percent used is calculated based on used and present capacity
+      assertTrue(percentRemaining == ((float)remaining * 100.0f)/(float)configCapacity);
+    }
+    finally {
+      if (cluster != null) {cluster.shutdown();}
+    }
+  }
+}

+ 107 - 0
src/test/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java

@@ -0,0 +1,107 @@
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.util.Collection;
+import java.util.Iterator;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NumberReplicas;
+
+import junit.framework.TestCase;
+
+/**
+ * Test if live nodes count per node is correct 
+ * so NN makes right decision for under/over-replicated blocks
+ */
+public class TestNodeCount extends TestCase {
+  public void testNodeCount() throws Exception {
+    // start a mini dfs cluster of 2 nodes
+    final Configuration conf = new Configuration();
+    final short REPLICATION_FACTOR = (short)2;
+    final MiniDFSCluster cluster = 
+      new MiniDFSCluster(conf, REPLICATION_FACTOR, true, null);
+    try {
+      final FSNamesystem namesystem = cluster.getNamesystem();
+      final FileSystem fs = cluster.getFileSystem();
+      
+      // populate the cluster with a one block file
+      final Path FILE_PATH = new Path("/testfile");
+      DFSTestUtil.createFile(fs, FILE_PATH, 1L, REPLICATION_FACTOR, 1L);
+      DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
+      Block block = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
+
+      // keep a copy of all datanode descriptor
+      DatanodeDescriptor[] datanodes = 
+         namesystem.heartbeats.toArray(new DatanodeDescriptor[REPLICATION_FACTOR]);
+      
+      // start two new nodes
+      cluster.startDataNodes(conf, 2, true, null, null);
+      cluster.waitActive();
+      
+      // bring down first datanode
+      DatanodeDescriptor datanode = datanodes[0];
+      DataNodeProperties dnprop = cluster.stopDataNode(datanode.getName());
+      // make sure that NN detects that the datanode is down
+      synchronized (namesystem.heartbeats) {
+        datanode.setLastUpdate(0); // mark it dead
+        namesystem.heartbeatCheck();
+      }
+      // the block will be replicated
+      DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
+
+      // restart the first datanode
+      cluster.restartDataNode(dnprop);
+      cluster.waitActive();
+      
+      // check if excessive replica is detected
+      NumberReplicas num = null;
+      do {
+       synchronized (namesystem) {
+         num = namesystem.blockManager.countNodes(block);
+       }
+      } while (num.excessReplicas() == 0);
+      
+      // find out a non-excess node
+      Iterator<DatanodeDescriptor> iter = namesystem.blockManager.blocksMap.nodeIterator(block);
+      DatanodeDescriptor nonExcessDN = null;
+      while (iter.hasNext()) {
+        DatanodeDescriptor dn = iter.next();
+        Collection<Block> blocks = namesystem.blockManager.excessReplicateMap.get(dn.getStorageID());
+        if (blocks == null || !blocks.contains(block) ) {
+          nonExcessDN = dn;
+          break;
+        }
+      }
+      assertTrue(nonExcessDN!=null);
+      
+      // bring down non excessive datanode
+      dnprop = cluster.stopDataNode(nonExcessDN.getName());
+      // make sure that NN detects that the datanode is down
+      synchronized (namesystem.heartbeats) {
+        nonExcessDN.setLastUpdate(0); // mark it dead
+        namesystem.heartbeatCheck();
+      }
+      
+      // The block should be replicated
+      do {
+        num = namesystem.blockManager.countNodes(block);
+      } while (num.liveReplicas() != REPLICATION_FACTOR);
+      
+      // restart the first datanode
+      cluster.restartDataNode(dnprop);
+      cluster.waitActive();
+      
+      // check if excessive replica is detected
+      do {
+       num = namesystem.blockManager.countNodes(block);
+      } while (num.excessReplicas() == 2);
+    } finally {
+      cluster.shutdown();
+    }
+  }
+}

部分文件因文件數量過多而無法顯示