Преглед изворни кода

Merge branch 'trunk' into HDFS-7240

Xiaoyu Yao пре 7 година
родитељ
комит
070ad8438b
100 измењених фајлова са 1529 додато и 598 уклоњено
  1. 12 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StreamCapabilitiesPolicy.java
  2. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java
  3. 72 40
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
  4. 6 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java
  5. 31 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
  6. 38 15
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java
  7. 35 23
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java
  8. 6 9
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestDtUtilShell.java
  9. 65 9
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  10. 20 8
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
  11. 15 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
  12. 46 19
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsPathHandle.java
  13. 24 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
  14. 0 15
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
  15. 11 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
  16. 2 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
  17. 22 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
  18. 5 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
  19. 5 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
  20. 47 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
  21. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java
  22. 4 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java
  23. 24 17
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
  24. 15 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
  25. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
  26. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
  27. 26 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
  28. 11 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  29. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
  30. 19 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
  31. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
  32. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
  33. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
  34. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/robots.txt
  35. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
  36. 4 0
      hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
  37. 15 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java
  38. 23 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
  39. 55 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java
  40. 0 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
  41. 35 25
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java
  42. 2 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/RouterDFSCluster.java
  43. 25 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java
  44. 31 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java
  45. 143 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java
  46. 4 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
  47. 3 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
  48. 5 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
  49. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
  50. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
  51. 11 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
  52. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java
  53. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
  54. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java
  55. 10 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java
  56. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java
  57. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java
  58. 10 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
  59. 35 34
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyInProgressTail.java
  60. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/resources/contract/hdfs.xml
  61. BIN
      hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored
  62. 378 158
      hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
  63. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java
  64. 4 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
  65. 4 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
  66. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
  67. 4 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fi/ProbabilityModel.java
  68. 5 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/AccumulatingReducer.java
  69. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/DFSCIOTest.java
  70. 4 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/DistributedFSCheck.java
  71. 4 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java
  72. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
  73. 4 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestJHLA.java
  74. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGeneratorMR.java
  75. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/AppendOp.java
  76. 4 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ConfigExtractor.java
  77. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/CreateOp.java
  78. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/DeleteOp.java
  79. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ListOp.java
  80. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/MkdirOp.java
  81. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ReadOp.java
  82. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/RenameOp.java
  83. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ReportWriter.java
  84. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/SleepOp.java
  85. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/SliveMapper.java
  86. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/SliveReducer.java
  87. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/SliveTest.java
  88. 5 5
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/TestSlive.java
  89. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/TruncateOp.java
  90. 4 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/WeightSelector.java
  91. 3 4
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java
  92. 4 4
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBenchWithoutMR.java
  93. 4 4
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/BigMapOutput.java
  94. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java
  95. 8 7
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRCluster.java
  96. 4 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRYarnClusterAdapter.java
  97. 7 6
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ReliabilityTest.java
  98. 4 4
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestBadRecords.java
  99. 4 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
  100. 5 6
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCombineFileInputFormat.java

+ 12 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/StreamCapabilitiesPolicy.java

@@ -22,6 +22,8 @@ import java.io.InputStream;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Static methods to implement policies for {@link StreamCapabilities}.
@@ -29,6 +31,10 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class StreamCapabilitiesPolicy {
+  public static final String CAN_UNBUFFER_NOT_IMPLEMENTED_MESSAGE =
+          "claims unbuffer capabilty but does not implement CanUnbuffer";
+  static final Logger LOG = LoggerFactory.getLogger(
+          StreamCapabilitiesPolicy.class);
   /**
    * Implement the policy for {@link CanUnbuffer#unbuffer()}.
    *
@@ -40,11 +46,14 @@ public class StreamCapabilitiesPolicy {
           && ((StreamCapabilities) in).hasCapability(
           StreamCapabilities.UNBUFFER)) {
         ((CanUnbuffer) in).unbuffer();
+      } else {
+        LOG.debug(in.getClass().getName() + ":"
+                + " does not implement StreamCapabilities"
+                + " and the unbuffer capability");
       }
     } catch (ClassCastException e) {
-      throw new UnsupportedOperationException("this stream " +
-          in.getClass().getName() +
-          " claims to unbuffer but forgets to implement CanUnbuffer");
+      throw new UnsupportedOperationException(in.getClass().getName() + ": "
+              + CAN_UNBUFFER_NOT_IMPLEMENTED_MESSAGE);
     }
   }
 }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ErasureCodeConstants.java

@@ -52,6 +52,6 @@ public final class ErasureCodeConstants {
 
   public static final byte MAX_POLICY_ID = Byte.MAX_VALUE;
   public static final byte USER_DEFINED_POLICY_START_ID = (byte) 64;
-  public static final byte REPLICATION_POLICY_ID = (byte) 63;
+  public static final byte REPLICATION_POLICY_ID = (byte) 0;
   public static final String REPLICATION_POLICY_NAME = REPLICATION_CODEC_NAME;
 }

+ 72 - 40
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java

@@ -27,7 +27,6 @@ import java.io.DataOutput;
 import java.io.DataOutputStream;
 import java.io.File;
 import java.io.FileInputStream;
-import java.io.FileOutputStream;
 import java.io.IOException;
 import java.nio.charset.StandardCharsets;
 import java.util.Arrays;
@@ -60,6 +59,28 @@ import org.slf4j.LoggerFactory;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class Credentials implements Writable {
+  public enum SerializedFormat {
+    WRITABLE((byte) 0x00),
+    PROTOBUF((byte) 0x01);
+
+    // Caching to avoid reconstructing the array each time.
+    private static final SerializedFormat[] FORMATS = values();
+
+    final byte value;
+
+    SerializedFormat(byte val) {
+      this.value = val;
+    }
+
+    public static SerializedFormat valueOf(int val) {
+      try {
+        return FORMATS[val];
+      } catch (ArrayIndexOutOfBoundsException e) {
+        throw new IllegalArgumentException("Unknown credential format: " + val);
+      }
+    }
+  }
+
   private static final Logger LOG = LoggerFactory.getLogger(Credentials.class);
 
   private  Map<Text, byte[]> secretKeysMap = new HashMap<Text, byte[]>();
@@ -224,63 +245,74 @@ public class Credentials implements Writable {
     if (!Arrays.equals(magic, TOKEN_STORAGE_MAGIC)) {
       throw new IOException("Bad header found in token storage.");
     }
-    byte version = in.readByte();
-    if (version != TOKEN_STORAGE_VERSION &&
-        version != OLD_TOKEN_STORAGE_VERSION) {
-      throw new IOException("Unknown version " + version +
-                            " in token storage.");
+    SerializedFormat format;
+    try {
+      format = SerializedFormat.valueOf(in.readByte());
+    } catch (IllegalArgumentException e) {
+      throw new IOException(e);
     }
-    if (version == OLD_TOKEN_STORAGE_VERSION) {
+    switch (format) {
+    case WRITABLE:
       readFields(in);
-    } else if (version == TOKEN_STORAGE_VERSION) {
+      break;
+    case PROTOBUF:
       readProto(in);
+      break;
+    default:
+      throw new IOException("Unsupported format " + format);
     }
   }
 
   private static final byte[] TOKEN_STORAGE_MAGIC =
       "HDTS".getBytes(StandardCharsets.UTF_8);
-  private static final byte TOKEN_STORAGE_VERSION = 1;
-
-  /**
-   *  For backward compatibility.
-   */
-  private static final byte OLD_TOKEN_STORAGE_VERSION = 0;
-
 
   public void writeTokenStorageToStream(DataOutputStream os)
       throws IOException {
-    os.write(TOKEN_STORAGE_MAGIC);
-    os.write(TOKEN_STORAGE_VERSION);
-    writeProto(os);
+    // by default store in the oldest supported format for compatibility
+    writeTokenStorageToStream(os, SerializedFormat.WRITABLE);
   }
 
-  public void writeTokenStorageFile(Path filename,
-                                    Configuration conf) throws IOException {
-    FSDataOutputStream os = filename.getFileSystem(conf).create(filename);
-    writeTokenStorageToStream(os);
-    os.close();
+  public void writeTokenStorageToStream(DataOutputStream os,
+      SerializedFormat format) throws IOException {
+    switch (format) {
+    case WRITABLE:
+      writeWritableOutputStream(os);
+      break;
+    case PROTOBUF:
+      writeProtobufOutputStream(os);
+      break;
+    default:
+      throw new IllegalArgumentException("Unsupported serialized format: "
+          + format);
+    }
   }
 
-  /**
-   *  For backward compatibility.
-   */
-  public void writeLegacyTokenStorageLocalFile(File f) throws IOException {
-    writeLegacyOutputStream(new DataOutputStream(new FileOutputStream(f)));
+  private void writeWritableOutputStream(DataOutputStream os)
+      throws IOException {
+    os.write(TOKEN_STORAGE_MAGIC);
+    os.write(SerializedFormat.WRITABLE.value);
+    write(os);
   }
 
-  /**
-   *  For backward compatibility.
-   */
-  public void writeLegacyTokenStorageFile(Path filename, Configuration conf)
+  private void writeProtobufOutputStream(DataOutputStream os)
       throws IOException {
-    writeLegacyOutputStream(filename.getFileSystem(conf).create(filename));
+    os.write(TOKEN_STORAGE_MAGIC);
+    os.write(SerializedFormat.PROTOBUF.value);
+    writeProto(os);
   }
 
-  private void writeLegacyOutputStream(DataOutputStream os) throws IOException {
-    os.write(TOKEN_STORAGE_MAGIC);
-    os.write(OLD_TOKEN_STORAGE_VERSION);
-    write(os);
-    os.close();
+  public void writeTokenStorageFile(Path filename,
+                                    Configuration conf) throws IOException {
+    // by default store in the oldest supported format for compatibility
+    writeTokenStorageFile(filename, conf, SerializedFormat.WRITABLE);
+  }
+
+  public void writeTokenStorageFile(Path filename, Configuration conf,
+      SerializedFormat format) throws IOException {
+    try (FSDataOutputStream os =
+             filename.getFileSystem(conf).create(filename)) {
+      writeTokenStorageToStream(os, format);
+    }
   }
 
   /**
@@ -312,7 +344,7 @@ public class Credentials implements Writable {
    * @param out
    * @throws IOException
    */
-  public void writeProto(DataOutput out) throws IOException {
+  void writeProto(DataOutput out) throws IOException {
     CredentialsProto.Builder storage = CredentialsProto.newBuilder();
     for (Map.Entry<Text, Token<? extends TokenIdentifier>> e :
                                                          tokenMap.entrySet()) {
@@ -337,7 +369,7 @@ public class Credentials implements Writable {
    * Populates keys/values from proto buffer storage.
    * @param in - stream ready to read a serialized proto buffer message
    */
-  public void readProto(DataInput in) throws IOException {
+  void readProto(DataInput in) throws IOException {
     CredentialsProto storage = CredentialsProto.parseDelimitedFrom((DataInputStream)in);
     for (CredentialsKVProto kv : storage.getTokensList()) {
       addToken(new Text(kv.getAliasBytes().toByteArray()),

+ 6 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/DtFileOperations.java

@@ -102,11 +102,13 @@ public final class DtFileOperations {
   public static void doFormattedWrite(
       File f, String format, Credentials creds, Configuration conf)
       throws IOException {
-    if (format == null || format.equals(FORMAT_PB)) {
-      creds.writeTokenStorageFile(fileToPath(f), conf);
-    } else { // if (format != null && format.equals(FORMAT_JAVA)) {
-      creds.writeLegacyTokenStorageLocalFile(f);
+    // default to oldest supported format for compatibility
+    Credentials.SerializedFormat credsFormat =
+        Credentials.SerializedFormat.WRITABLE;
+    if (format.equals(FORMAT_PB)) {
+      credsFormat = Credentials.SerializedFormat.PROTOBUF;
     }
+    creds.writeTokenStorageFile(fileToPath(f), conf, credsFormat);
   }
 
   /** Print out a Credentials file from the local filesystem.

+ 31 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java

@@ -1152,4 +1152,35 @@ public class StringUtils {
     return s1.equalsIgnoreCase(s2);
   }
 
+  /**
+   * <p>Checks if the String contains only unicode letters.</p>
+   *
+   * <p><code>null</code> will return <code>false</code>.
+   * An empty String (length()=0) will return <code>true</code>.</p>
+   *
+   * <pre>
+   * StringUtils.isAlpha(null)   = false
+   * StringUtils.isAlpha("")     = true
+   * StringUtils.isAlpha("  ")   = false
+   * StringUtils.isAlpha("abc")  = true
+   * StringUtils.isAlpha("ab2c") = false
+   * StringUtils.isAlpha("ab-c") = false
+   * </pre>
+   *
+   * @param str  the String to check, may be null
+   * @return <code>true</code> if only contains letters, and is non-null
+   */
+  public static boolean isAlpha(String str) {
+    if (str == null) {
+      return false;
+    }
+    int sz = str.length();
+    for (int i = 0; i < sz; i++) {
+      if (!Character.isLetter(str.charAt(i))) {
+        return false;
+      }
+    }
+    return true;
+  }
+
 }

+ 38 - 15
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractOpenTest.java

@@ -218,14 +218,18 @@ public abstract class AbstractContractOpenTest
     Path path2 = path("testopenfilebyexact2");
     byte[] file1 = dataset(TEST_FILE_LEN, 43, 255);
     createFile(getFileSystem(), path1, false, file1);
-    FileStatus stat = getFileSystem().getFileStatus(path1);
-    assertNotNull(stat);
-    assertEquals(path1, stat.getPath());
+    FileStatus stat1 = getFileSystem().getFileStatus(path1);
+    assertNotNull(stat1);
+    assertEquals(path1, stat1.getPath());
     ContractTestUtils.rename(getFileSystem(), path1, path2);
+    FileStatus stat2 = getFileSystem().getFileStatus(path2);
+    assertNotNull(stat2);
+    assertEquals(path2, stat2.getPath());
     // create identical file at same location, orig still exists at path2
     createFile(getFileSystem(), path1, false, file1);
 
-    PathHandle fd = getHandleOrSkip(stat, HandleOpt.exact());
+    PathHandle fd1 = getHandleOrSkip(stat1, HandleOpt.exact());
+    PathHandle fd2 = getHandleOrSkip(stat2, HandleOpt.exact());
 
     // verify path1, path2 contents identical
     verifyFileContents(getFileSystem(), path1, file1);
@@ -235,11 +239,15 @@ public abstract class AbstractContractOpenTest
       // the original entity exists, it has not been modified, and an
       // identical file exists at the old path. The handle would also
       // fail to resolve if path1 had been modified
-      instream = getFileSystem().open(fd, 1 << 15);
+      instream = getFileSystem().open(fd1);
       fail("Expected an exception");
     } catch (IOException e) {
       // expected
     }
+
+    // verify unchanged resolves
+    instream = getFileSystem().open(fd2);
+    verifyRead(instream, file1, 0, TEST_FILE_LEN);
   }
 
   /**
@@ -265,7 +273,7 @@ public abstract class AbstractContractOpenTest
     // obtain handle to entity from #getFileStatus call
     PathHandle fd = getHandleOrSkip(stat, HandleOpt.content());
 
-    try (FSDataInputStream in = getFileSystem().open(fd, 1 << 15)) {
+    try (FSDataInputStream in = getFileSystem().open(fd)) {
       // verify read of consistent content at new location
       verifyRead(in, file1, 0, TEST_FILE_LEN);
     }
@@ -280,7 +288,7 @@ public abstract class AbstractContractOpenTest
 
     try {
       // handle should not resolve when content changed
-      instream = getFileSystem().open(fd, 1 << 15);
+      instream = getFileSystem().open(fd);
       fail("Failed to detect change to content");
     } catch (IOException e) {
       // expected
@@ -302,25 +310,40 @@ public abstract class AbstractContractOpenTest
 
     byte[] file1 = dataset(TEST_FILE_LEN, 43, 255);
     createFile(getFileSystem(), path1, false, file1);
-    FileStatus stat = getFileSystem().getFileStatus(path1);
-    assertNotNull(stat);
-    assertEquals(path1, stat.getPath());
+    FileStatus stat1 = getFileSystem().getFileStatus(path1);
+    assertNotNull(stat1);
+    assertEquals(path1, stat1.getPath());
     ContractTestUtils.rename(getFileSystem(), path1, path2);
+    FileStatus stat2 = getFileSystem().getFileStatus(path2);
+    assertNotNull(stat2);
+    assertEquals(path2, stat2.getPath());
     // create identical file at same location, orig still exists at path2
     createFile(getFileSystem(), path1, false, file1);
 
-    PathHandle fd = getHandleOrSkip(stat, HandleOpt.path());
+    PathHandle fd1 = getHandleOrSkip(stat1, HandleOpt.path());
+    PathHandle fd2 = getHandleOrSkip(stat2, HandleOpt.path());
 
     // verify path1, path2 contents identical
     verifyFileContents(getFileSystem(), path1, file1);
     verifyFileContents(getFileSystem(), path2, file1);
     try {
       // verify attempt to resolve the handle fails
-      instream = getFileSystem().open(fd, 1 << 15);
+      instream = getFileSystem().open(fd1);
       fail("Expected an exception");
     } catch (IOException e) {
       // expected
     }
+
+    // verify content change OK
+    byte[] file2a = dataset(TEST_FILE_LEN, 44, 255);
+    ContractTestUtils.appendFile(getFileSystem(), path2, file2a);
+    byte[] file2x = Arrays.copyOf(file1, file1.length + file2a.length);
+    System.arraycopy(file2a, 0, file2x, file1.length, file2a.length);
+    // verify path2 contains contents of orig + appended bytes
+    verifyFileContents(getFileSystem(), path2, file2x);
+    // verify open by fd succeeds
+    instream = getFileSystem().open(fd2);
+    verifyRead(instream, file2x, 0, 2 * TEST_FILE_LEN);
   }
 
   /**
@@ -357,8 +380,8 @@ public abstract class AbstractContractOpenTest
     verifyFileContents(getFileSystem(), path1, file2);
 
     // verify fd contains contents of file1 + appended bytes
-    instream = getFileSystem().open(fd, 1 << 15);
-    verifyRead(instream, file1x, 0, TEST_FILE_LEN);
+    instream = getFileSystem().open(fd);
+    verifyRead(instream, file1x, 0, 2 * TEST_FILE_LEN);
   }
 
   /**
@@ -388,7 +411,7 @@ public abstract class AbstractContractOpenTest
     ByteBuffer sb = fd.bytes();
     PathHandle fdb = new RawPathHandle(sb);
 
-    instream = getFileSystem().open(fdb, 1 << 15);
+    instream = getFileSystem().open(fdb);
     // verify stat contains contents of file1
     verifyRead(instream, file1, 0, TEST_FILE_LEN);
     // verify path2 contains contents of file1

+ 35 - 23
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java

@@ -139,38 +139,50 @@ public class TestClusterTopology extends Assert {
     NodeElement node4 = getNewNode("node4", "/d1/r3");
     cluster.add(node4);
 
+    // Number of test runs
+    int numTestRuns = 3;
+    int chiSquareTestRejectedCounter = 0;
+
     // Number of iterations to do the test
     int numIterations = 100;
 
-    // Pick random nodes
-    HashMap<String,Integer> histogram = new HashMap<String,Integer>();
-    for (int i=0; i<numIterations; i++) {
-      String randomNode = cluster.chooseRandom(NodeBase.ROOT).getName();
-      if (!histogram.containsKey(randomNode)) {
-        histogram.put(randomNode, 0);
+    for (int testRun = 0; testRun < numTestRuns; ++testRun) {
+
+      // Pick random nodes
+      HashMap<String, Integer> histogram = new HashMap<String, Integer>();
+      for (int i = 0; i < numIterations; i++) {
+        String randomNode = cluster.chooseRandom(NodeBase.ROOT).getName();
+        if (!histogram.containsKey(randomNode)) {
+          histogram.put(randomNode, 0);
+        }
+        histogram.put(randomNode, histogram.get(randomNode) + 1);
+      }
+      assertEquals("Random is not selecting all nodes", 4, histogram.size());
+
+      // Check with 99% confidence alpha=0.01 as confidence = 100 * (1 - alpha)
+      ChiSquareTest chiSquareTest = new ChiSquareTest();
+      double[] expected = new double[histogram.size()];
+      long[] observed = new long[histogram.size()];
+      int j = 0;
+      for (Integer occurrence : histogram.values()) {
+        expected[j] = 1.0 * numIterations / histogram.size();
+        observed[j] = occurrence;
+        j++;
+      }
+      boolean chiSquareTestRejected =
+            chiSquareTest.chiSquareTest(expected, observed, 0.01);
+
+      if (chiSquareTestRejected) {
+        ++chiSquareTestRejectedCounter;
       }
-      histogram.put(randomNode, histogram.get(randomNode) + 1);
-    }
-    assertEquals("Random is not selecting all nodes", 4, histogram.size());
-
-    // Check with 99% confidence (alpha=0.01 as confidence = (100 * (1 - alpha)
-    ChiSquareTest chiSquareTest = new ChiSquareTest();
-    double[] expected = new double[histogram.size()];
-    long[] observed = new long[histogram.size()];
-    int j=0;
-    for (Integer occurrence : histogram.values()) {
-      expected[j] = 1.0 * numIterations / histogram.size();
-      observed[j] = occurrence;
-      j++;
     }
-    boolean chiSquareTestRejected =
-        chiSquareTest.chiSquareTest(expected, observed, 0.01);
 
     // Check that they have the proper distribution
-    assertFalse("Not choosing nodes randomly", chiSquareTestRejected);
+    assertFalse("Random not choosing nodes with proper distribution",
+            chiSquareTestRejectedCounter==3);
 
     // Pick random nodes excluding the 2 nodes in /d1/r3
-    histogram = new HashMap<String,Integer>();
+    HashMap<String, Integer> histogram = new HashMap<String, Integer>();
     for (int i=0; i<numIterations; i++) {
       String randomNode = cluster.chooseRandom("~/d1/r3").getName();
       if (!histogram.containsKey(randomNode)) {

+ 6 - 9
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestDtUtilShell.java

@@ -20,7 +20,6 @@ package org.apache.hadoop.security.token;
 import java.io.ByteArrayOutputStream;
 import java.io.DataInputStream;
 import java.io.FileInputStream;
-import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
 
@@ -29,14 +28,12 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.Credentials;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.security.token.DtFetcher;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -54,7 +51,6 @@ public class TestDtUtilShell {
   private static Configuration defaultConf = new Configuration();
   private static FileSystem localFs = null;
   private final String alias = "proxy_ip:1234";
-  private final String renewer = "yarn";
   private final String getUrl = SERVICE_GET.toString() + "://localhost:9000/";
   private final String getUrl2 = "http://localhost:9000/";
   public static Text SERVICE_GET = new Text("testTokenServiceGet");
@@ -111,11 +107,12 @@ public class TestDtUtilShell {
     Token<? extends TokenIdentifier> tok = (Token<? extends TokenIdentifier>)
         new Token(IDENTIFIER, PASSWORD, KIND, service);
     creds.addToken(tok.getService(), tok);
+    Credentials.SerializedFormat format =
+        Credentials.SerializedFormat.PROTOBUF;
     if (legacy) {
-      creds.writeLegacyTokenStorageLocalFile(new File(tokenPath.toString()));
-    } else {
-      creds.writeTokenStorageFile(tokenPath, defaultConf);
+      format = Credentials.SerializedFormat.WRITABLE;
     }
+    creds.writeTokenStorageFile(tokenPath, defaultConf, format);
   }
 
   @Test
@@ -284,6 +281,6 @@ public class TestDtUtilShell {
     DataInputStream in = new DataInputStream(
         new FileInputStream(tokenFilenameGet));
     spyCreds.readTokenStorageStream(in);
-    Mockito.verify(spyCreds).readProto(in);
+    Mockito.verify(spyCreds, Mockito.never()).readFields(in);
   }
 }

+ 65 - 9
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -125,6 +125,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
+import org.apache.hadoop.hdfs.protocol.HdfsPathHandle;
 import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -1015,16 +1017,46 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     //    Get block info from namenode
     try (TraceScope ignored = newPathTraceScope("newDFSInputStream", src)) {
       LocatedBlocks locatedBlocks = getLocatedBlocks(src, 0);
-      if (locatedBlocks != null) {
-        ErasureCodingPolicy ecPolicy = locatedBlocks.getErasureCodingPolicy();
-        if (ecPolicy != null) {
-          return new DFSStripedInputStream(this, src, verifyChecksum, ecPolicy,
-              locatedBlocks);
-        }
-        return new DFSInputStream(this, src, verifyChecksum, locatedBlocks);
-      } else {
-        throw new IOException("Cannot open filename " + src);
+      return openInternal(locatedBlocks, src, verifyChecksum);
+    }
+  }
+
+  /**
+   * Create an input stream from the {@link HdfsPathHandle} if the
+   * constraints encoded from {@link
+   * DistributedFileSystem#createPathHandle(FileStatus, Options.HandleOpt...)}
+   * are satisfied. Note that HDFS does not ensure that these constraints
+   * remain invariant for the life of the stream. It only checks that they
+   * still held when the stream was opened.
+   * @param fd Handle to an entity in HDFS, with constraints
+   * @param buffersize ignored
+   * @param verifyChecksum Verify checksums before returning data to client
+   * @return Data from the referent of the {@link HdfsPathHandle}.
+   * @throws IOException On I/O error
+   */
+  public DFSInputStream open(HdfsPathHandle fd, int buffersize,
+      boolean verifyChecksum) throws IOException {
+    checkOpen();
+    String src = fd.getPath();
+    try (TraceScope ignored = newPathTraceScope("newDFSInputStream", src)) {
+      HdfsLocatedFileStatus s = getLocatedFileInfo(src, true);
+      fd.verify(s); // check invariants in path handle
+      LocatedBlocks locatedBlocks = s.getLocatedBlocks();
+      return openInternal(locatedBlocks, src, verifyChecksum);
+    }
+  }
+
+  private DFSInputStream openInternal(LocatedBlocks locatedBlocks, String src,
+      boolean verifyChecksum) throws IOException {
+    if (locatedBlocks != null) {
+      ErasureCodingPolicy ecPolicy = locatedBlocks.getErasureCodingPolicy();
+      if (ecPolicy != null) {
+        return new DFSStripedInputStream(this, src, verifyChecksum, ecPolicy,
+            locatedBlocks);
       }
+      return new DFSInputStream(this, src, verifyChecksum, locatedBlocks);
+    } else {
+      throw new IOException("Cannot open filename " + src);
     }
   }
 
@@ -1647,6 +1679,30 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     }
   }
 
+  /**
+   * Get the file info for a specific file or directory.
+   * @param src The string representation of the path to the file
+   * @param needBlockToken Include block tokens in {@link LocatedBlocks}.
+   *        When block tokens are included, this call is a superset of
+   *        {@link #getBlockLocations(String, long)}.
+   * @return object containing information regarding the file
+   *         or null if file not found
+   *
+   * @see DFSClient#open(HdfsPathHandle, int, boolean)
+   * @see ClientProtocol#getFileInfo(String) for description of
+   *      exceptions
+   */
+  public HdfsLocatedFileStatus getLocatedFileInfo(String src,
+      boolean needBlockToken) throws IOException {
+    checkOpen();
+    try (TraceScope ignored = newPathTraceScope("getLocatedFileInfo", src)) {
+      return namenode.getLocatedFileInfo(src, needBlockToken);
+    } catch (RemoteException re) {
+      throw re.unwrapRemoteException(AccessControlException.class,
+          FileNotFoundException.class,
+          UnresolvedPathException.class);
+    }
+  }
   /**
    * Close status of a file
    * @return true if file is already closed

+ 20 - 8
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -115,7 +115,7 @@ import java.util.Collection;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.Map;
-import java.util.stream.Collectors;
+import java.util.Optional;
 
 /****************************************************************
  * Implementation of the abstract FileSystem for the DFS system.
@@ -340,11 +340,14 @@ public class DistributedFileSystem extends FileSystem
   @Override
   public FSDataInputStream open(PathHandle fd, int bufferSize)
       throws IOException {
+    statistics.incrementReadOps(1);
+    storageStatistics.incrementOpCounter(OpType.OPEN);
     if (!(fd instanceof HdfsPathHandle)) {
       fd = new HdfsPathHandle(fd.bytes());
     }
     HdfsPathHandle id = (HdfsPathHandle) fd;
-    return open(DFSUtilClient.makePathFromFileId(id.getInodeId()), bufferSize);
+    final DFSInputStream dfsis = dfs.open(id, bufferSize, verifyChecksum);
+    return dfs.createWrappedInputStream(dfsis);
   }
 
   /**
@@ -358,7 +361,7 @@ public class DistributedFileSystem extends FileSystem
    * @return A handle to the file.
    */
   @Override
-  protected PathHandle createPathHandle(FileStatus st, HandleOpt... opts) {
+  protected HdfsPathHandle createPathHandle(FileStatus st, HandleOpt... opts) {
     if (!(st instanceof HdfsFileStatus)) {
       throw new IllegalArgumentException("Invalid FileStatus "
           + st.getClass().getSimpleName());
@@ -373,12 +376,21 @@ public class DistributedFileSystem extends FileSystem
         .orElse(HandleOpt.changed(false));
     HandleOpt.Location loc = HandleOpt.getOpt(HandleOpt.Location.class, opts)
         .orElse(HandleOpt.moved(false));
-    if (!data.allowChange() || !loc.allowChange()) {
-      throw new UnsupportedOperationException("Unsupported opts "
-          + Arrays.stream(opts)
-                  .map(HandleOpt::toString).collect(Collectors.joining(",")));
+
+    HdfsFileStatus hst = (HdfsFileStatus) st;
+    final Path p;
+    final Optional<Long> inodeId;
+    if (loc.allowChange()) {
+      p = DFSUtilClient.makePathFromFileId(hst.getFileId());
+      inodeId = Optional.empty();
+    } else {
+      p = hst.getPath();
+      inodeId = Optional.of(hst.getFileId());
     }
-    return new HdfsPathHandle((HdfsFileStatus)st);
+    final Optional<Long> mtime = !data.allowChange()
+        ? Optional.of(hst.getModificationTime())
+        : Optional.empty();
+    return new HdfsPathHandle(getPathName(p), inodeId, mtime);
   }
 
   @Override

+ 15 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

@@ -1024,6 +1024,21 @@ public interface ClientProtocol {
   @Idempotent
   HdfsFileStatus getFileLinkInfo(String src) throws IOException;
 
+  /**
+   * Get the file info for a specific file or directory with
+   * {@link LocatedBlocks}.
+   * @param src The string representation of the path to the file
+   * @param needBlockToken Generate block tokens for {@link LocatedBlocks}
+   * @return object containing information regarding the file
+   *         or null if file not found
+   * @throws org.apache.hadoop.security.AccessControlException permission denied
+   * @throws java.io.FileNotFoundException file <code>src</code> is not found
+   * @throws IOException If an I/O error occurred
+   */
+  @Idempotent
+  HdfsLocatedFileStatus getLocatedFileInfo(String src, boolean needBlockToken)
+      throws IOException;
+
   /**
    * Get {@link ContentSummary} rooted at the specified directory.
    * @param path The string representation of the path

+ 46 - 19
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsPathHandle.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.protocol;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
+import java.util.Optional;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -34,16 +35,17 @@ import com.google.protobuf.ByteString;
 @InterfaceStability.Unstable
 public final class HdfsPathHandle implements PathHandle {
 
-  private static final long serialVersionUID = 0xc5308795428L;
+  private static final long serialVersionUID = 0xc53087a5428L;
 
-  private final long inodeId;
+  private final String path;
+  private final Long mtime;
+  private final Long inodeId;
 
-  public HdfsPathHandle(HdfsFileStatus hstat) {
-    this(hstat.getFileId());
-  }
-
-  public HdfsPathHandle(long inodeId) {
-    this.inodeId = inodeId;
+  public HdfsPathHandle(String path,
+      Optional<Long> inodeId, Optional<Long> mtime) {
+    this.path = path;
+    this.mtime = mtime.orElse(null);
+    this.inodeId = inodeId.orElse(null);
   }
 
   public HdfsPathHandle(ByteBuffer bytes) throws IOException {
@@ -52,20 +54,39 @@ public final class HdfsPathHandle implements PathHandle {
     }
     HdfsPathHandleProto p =
         HdfsPathHandleProto.parseFrom(ByteString.copyFrom(bytes));
-    inodeId = p.getInodeId();
+    path = p.getPath();
+    mtime = p.hasMtime()
+        ? p.getMtime()
+        : null;
+    inodeId = p.hasInodeId()
+        ? p.getInodeId()
+        : null;
   }
 
-  public long getInodeId() {
-    return inodeId;
+  public String getPath() {
+    return path;
+  }
+
+  public void verify(HdfsLocatedFileStatus stat) throws IOException {
+    if (mtime != null && mtime != stat.getModificationTime()) {
+      throw new IOException("Content changed");
+    }
+    if (inodeId != null && inodeId != stat.getFileId()) {
+      throw new IOException("Wrong file");
+    }
   }
 
   @Override
   public ByteBuffer bytes() {
-    return HdfsPathHandleProto.newBuilder()
-      .setInodeId(getInodeId())
-      .build()
-      .toByteString()
-      .asReadOnlyByteBuffer();
+    HdfsPathHandleProto.Builder b = HdfsPathHandleProto.newBuilder();
+    b.setPath(path);
+    if (inodeId != null) {
+      b.setInodeId(inodeId);
+    }
+    if (mtime != null) {
+      b.setMtime(mtime);
+    }
+    return b.build().toByteString().asReadOnlyByteBuffer();
   }
 
   @Override
@@ -78,19 +99,25 @@ public final class HdfsPathHandle implements PathHandle {
       return false;
     }
     HdfsPathHandle o = (HdfsPathHandle)other;
-    return getInodeId() == o.getInodeId();
+    return getPath().equals(o.getPath());
   }
 
   @Override
   public int hashCode() {
-    return Long.hashCode(inodeId);
+    return path.hashCode();
   }
 
   @Override
   public String toString() {
     StringBuilder sb = new StringBuilder();
     sb.append("{ ");
-    sb.append("inodeId : ").append(Long.toString(getInodeId()));
+    sb.append("\"path\" : \"").append(path).append("\"");
+    if (inodeId != null) {
+      sb.append(",\"inodeId\" : ").append(inodeId);
+    }
+    if (mtime != null) {
+      sb.append(",\"mtime\" : ").append(mtime);
+    }
     sb.append(" }");
     return sb.toString();
   }

+ 24 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

@@ -71,6 +71,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -129,6 +130,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLin
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto;
@@ -872,7 +875,8 @@ public class ClientNamenodeProtocolTranslatorPB implements
   @Override
   public HdfsFileStatus getFileInfo(String src) throws IOException {
     GetFileInfoRequestProto req = GetFileInfoRequestProto.newBuilder()
-        .setSrc(src).build();
+        .setSrc(src)
+        .build();
     try {
       GetFileInfoResponseProto res = rpcProxy.getFileInfo(null, req);
       return res.hasFs() ? PBHelperClient.convert(res.getFs()) : null;
@@ -881,6 +885,25 @@ public class ClientNamenodeProtocolTranslatorPB implements
     }
   }
 
+  @Override
+  public HdfsLocatedFileStatus getLocatedFileInfo(String src,
+      boolean needBlockToken) throws IOException {
+    GetLocatedFileInfoRequestProto req =
+        GetLocatedFileInfoRequestProto.newBuilder()
+            .setSrc(src)
+            .setNeedBlockToken(needBlockToken)
+            .build();
+    try {
+      GetLocatedFileInfoResponseProto res =
+          rpcProxy.getLocatedFileInfo(null, req);
+      return (HdfsLocatedFileStatus) (res.hasFs()
+          ? PBHelperClient.convert(res.getFs())
+          : null);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
   @Override
   public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
     GetFileLinkInfoRequestProto req = GetFileLinkInfoRequestProto.newBuilder()

+ 0 - 15
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java

@@ -91,7 +91,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
-import org.apache.hadoop.hdfs.protocol.HdfsPathHandle;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
@@ -164,7 +163,6 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsPathHandleProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto;
@@ -1624,19 +1622,6 @@ public class PBHelperClient {
     return FsPermissionProto.newBuilder().setPerm(p.toShort()).build();
   }
 
-  public static HdfsPathHandle convert(HdfsPathHandleProto fd) {
-    if (null == fd) {
-      return null;
-    }
-    return new HdfsPathHandle(fd.getInodeId());
-  }
-
-  public static HdfsPathHandleProto convert(HdfsPathHandle fd) {
-    return HdfsPathHandleProto.newBuilder()
-        .setInodeId(fd.getInodeId())
-        .build();
-  }
-
   public static HdfsFileStatus convert(HdfsFileStatusProto fs) {
     if (fs == null) {
       return null;

+ 11 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto

@@ -495,6 +495,15 @@ message GetFileInfoResponseProto {
   optional HdfsFileStatusProto fs = 1;
 }
 
+message GetLocatedFileInfoRequestProto {
+  optional string src = 1;
+  optional bool needBlockToken = 2 [default = false];
+}
+
+message GetLocatedFileInfoResponseProto {
+  optional HdfsFileStatusProto fs = 1;
+}
+
 message IsFileClosedRequestProto {
   required string src = 1;
 }
@@ -868,6 +877,8 @@ service ClientNamenodeProtocol {
       returns(ListCorruptFileBlocksResponseProto);
   rpc metaSave(MetaSaveRequestProto) returns(MetaSaveResponseProto);
   rpc getFileInfo(GetFileInfoRequestProto) returns(GetFileInfoResponseProto);
+  rpc getLocatedFileInfo(GetLocatedFileInfoRequestProto)
+      returns(GetLocatedFileInfoResponseProto);
   rpc addCacheDirective(AddCacheDirectiveRequestProto)
       returns (AddCacheDirectiveResponseProto);
   rpc modifyCacheDirective(ModifyCacheDirectiveRequestProto)

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto

@@ -403,6 +403,8 @@ message AddErasureCodingPolicyResponseProto {
  */
 message HdfsPathHandleProto {
   optional uint64 inodeId = 1;
+  optional uint64 mtime = 2;
+  optional string path = 3;
 }
 
 /**

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java

@@ -136,6 +136,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLin
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLocatedFileInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto;
@@ -344,6 +346,10 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   private static final GetFileInfoResponseProto VOID_GETFILEINFO_RESPONSE = 
   GetFileInfoResponseProto.newBuilder().build();
 
+  private static final GetLocatedFileInfoResponseProto
+      VOID_GETLOCATEDFILEINFO_RESPONSE =
+          GetLocatedFileInfoResponseProto.newBuilder().build();
+
   private static final GetFileLinkInfoResponseProto VOID_GETFILELINKINFO_RESPONSE = 
   GetFileLinkInfoResponseProto.newBuilder().build();
 
@@ -952,7 +958,23 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   }
 
   @Override
+  public GetLocatedFileInfoResponseProto getLocatedFileInfo(
+      RpcController controller, GetLocatedFileInfoRequestProto req)
+      throws ServiceException {
+    try {
+      HdfsFileStatus result = server.getLocatedFileInfo(req.getSrc(),
+          req.getNeedBlockToken());
+      if (result != null) {
+        return GetLocatedFileInfoResponseProto.newBuilder().setFs(
+            PBHelperClient.convert(result)).build();
+      }
+      return VOID_GETLOCATEDFILEINFO_RESPONSE;
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
 
+  @Override
   public GetFileLinkInfoResponseProto getFileLinkInfo(RpcController controller,
       GetFileLinkInfoRequestProto req) throws ServiceException {
     try {

+ 5 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java

@@ -293,10 +293,12 @@ class InvalidateBlocks {
       getBlocksToInvalidateByLimit(nodeToECBlocks.get(dn),
           toInvalidate, numECBlocks, remainingLimit);
     }
-    if (toInvalidate.size() > 0 && getBlockSetsSize(dn) == 0) {
-      remove(dn);
+    if (toInvalidate.size() > 0) {
+      if (getBlockSetsSize(dn) == 0) {
+        remove(dn);
+      }
+      dn.addBlocksToBeInvalidated(toInvalidate);
     }
-    dn.addBlocksToBeInvalidated(toInvalidate);
     return toInvalidate;
   }
   

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java

@@ -126,6 +126,7 @@ class BlockReceiver implements Closeable {
 
   private boolean syncOnClose;
   private volatile boolean dirSyncOnFinalize;
+  private boolean dirSyncOnHSyncDone = false;
   private long restartBudget;
   /** the reference of the volume where the block receiver writes to */
   private ReplicaHandler replicaHandler;
@@ -424,6 +425,10 @@ class BlockReceiver implements Closeable {
       }
       flushTotalNanos += flushEndNanos - flushStartNanos;
     }
+    if (isSync && !dirSyncOnHSyncDone && replicaInfo instanceof LocalReplica) {
+      ((LocalReplica) replicaInfo).fsyncDirectory();
+      dirSyncOnHSyncDone = true;
+    }
     if (checksumOut != null || streams.getDataOut() != null) {
       datanode.metrics.addFlushNanos(flushTotalNanos);
       if (isSync) {

+ 47 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java

@@ -85,6 +85,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -103,8 +104,10 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie
 import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo;
 import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
 import org.apache.hadoop.hdfs.server.federation.resolver.PathLocation;
 import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation;
+import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
 import org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
@@ -1069,6 +1072,18 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol {
         locations, method, HdfsFileStatus.class, null);
   }
 
+  @Override
+  public HdfsLocatedFileStatus getLocatedFileInfo(String src,
+      boolean needBlockToken) throws IOException {
+    checkOperation(OperationCategory.READ);
+    final List<RemoteLocation> locations = getLocationsForPath(src, false);
+    RemoteMethod method = new RemoteMethod("getLocatedFileInfo",
+        new Class<?>[] {String.class, boolean.class}, new RemoteParam(),
+        Boolean.valueOf(needBlockToken));
+    return (HdfsLocatedFileStatus) rpcClient.invokeSequential(
+        locations, method, HdfsFileStatus.class, null);
+  }
+
   @Override // ClientProtocol
   public long[] getStats() throws IOException {
     checkOperation(OperationCategory.UNCHECKED);
@@ -1982,6 +1997,17 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol {
             this.subclusterResolver);
       }
 
+      // We may block some write operations
+      if (opCategory.get() == OperationCategory.WRITE) {
+        // Check if the path is in a read only mount point
+        if (isPathReadOnly(path)) {
+          if (this.rpcMonitor != null) {
+            this.rpcMonitor.routerFailureReadOnly();
+          }
+          throw new IOException(path + " is in a read only mount point");
+        }
+      }
+
       return location.getDestinations();
     } catch (IOException ioe) {
       if (this.rpcMonitor != null) {
@@ -1991,6 +2017,27 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol {
     }
   }
 
+  /**
+   * Check if a path is in a read only mount point.
+   *
+   * @param path Path to check.
+   * @return If the path is in a read only mount point.
+   */
+  private boolean isPathReadOnly(final String path) {
+    if (subclusterResolver instanceof MountTableResolver) {
+      try {
+        MountTableResolver mountTable = (MountTableResolver)subclusterResolver;
+        MountTable entry = mountTable.getMountPoint(path);
+        if (entry != null && entry.isReadOnly()) {
+          return true;
+        }
+      } catch (IOException e) {
+        LOG.error("Cannot get mount point: {}", e.getMessage());
+      }
+    }
+    return false;
+  }
+
   /**
    * Get the modification dates for mount points.
    *

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java

@@ -148,7 +148,8 @@ final class FSDirAppendOp {
       fsd.writeUnlock();
     }
 
-    HdfsFileStatus stat = FSDirStatAndListingOp.getFileInfo(fsd, iip);
+    HdfsFileStatus stat =
+        FSDirStatAndListingOp.getFileInfo(fsd, iip, false, false);
     if (lb != null) {
       NameNode.stateChangeLog.debug(
           "DIR* NameSystem.appendFile: file {} for {} at {} block {} block"

+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSnapshotOp.java

@@ -23,6 +23,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.FSLimitException;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReportListing;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
@@ -213,7 +214,9 @@ class FSDirSnapshotOp {
           snapname += Path.SEPARATOR;
         }
         snapname += file.substring(file.indexOf(dirName) + dirName.length());
-        if (fsd.getFSNamesystem().getFileInfo(snapname, true) != null) {
+        HdfsFileStatus stat =
+            fsd.getFSNamesystem().getFileInfo(snapname, true, false, false);
+        if (stat != null) {
           snaps.add(snapname);
         }
       }

+ 24 - 17
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java

@@ -90,11 +90,13 @@ class FSDirStatAndListingOp {
    * @param resolveLink whether to throw UnresolvedLinkException
    *        if src refers to a symlink
    *
+   * @param needLocation Include {@link LocatedBlocks} in result.
+   * @param needBlockToken Include block tokens in {@link LocatedBlocks}.
    * @return object containing information regarding the file
    *         or null if file not found
    */
-  static HdfsFileStatus getFileInfo(
-      FSDirectory fsd, String srcArg, boolean resolveLink)
+  static HdfsFileStatus getFileInfo(FSDirectory fsd, String srcArg,
+      boolean resolveLink, boolean needLocation, boolean needBlockToken)
       throws IOException {
     DirOp dirOp = resolveLink ? DirOp.READ : DirOp.READ_LINK;
     FSPermissionChecker pc = fsd.getPermissionChecker();
@@ -111,7 +113,7 @@ class FSDirStatAndListingOp {
     } else {
       iip = fsd.resolvePath(pc, srcArg, dirOp);
     }
-    return getFileInfo(fsd, iip);
+    return getFileInfo(fsd, iip, needLocation, needBlockToken);
   }
 
   /**
@@ -234,7 +236,7 @@ class FSDirStatAndListingOp {
         // target INode
         return new DirectoryListing(
             new HdfsFileStatus[]{ createFileStatus(
-                fsd, iip, null, parentStoragePolicy, needLocation)
+                fsd, iip, null, parentStoragePolicy, needLocation, false)
             }, 0);
       }
 
@@ -253,8 +255,8 @@ class FSDirStatAndListingOp {
             ? getStoragePolicyID(child.getLocalStoragePolicyID(),
                                  parentStoragePolicy)
             : parentStoragePolicy;
-        listing[i] =
-            createFileStatus(fsd, iip, child, childStoragePolicy, needLocation);
+        listing[i] = createFileStatus(fsd, iip, child, childStoragePolicy,
+            needLocation, false);
         listingCnt++;
         if (listing[i] instanceof HdfsLocatedFileStatus) {
             // Once we  hit lsLimit locations, stop.
@@ -305,7 +307,7 @@ class FSDirStatAndListingOp {
     for (int i = 0; i < numOfListing; i++) {
       Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
       listing[i] = createFileStatus(fsd, iip, sRoot,
-          HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, false);
+          HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, false, false);
     }
     return new DirectoryListing(
         listing, snapshots.size() - skipSize - numOfListing);
@@ -324,11 +326,14 @@ class FSDirStatAndListingOp {
    * @param fsd FSDirectory
    * @param iip The path to the file, the file is included
    * @param includeStoragePolicy whether to include storage policy
+   * @param needLocation Include {@link LocatedBlocks} in response
+   * @param needBlockToken Generate block tokens for {@link LocatedBlocks}
    * @return object containing information regarding the file
    *         or null if file not found
    */
-  static HdfsFileStatus getFileInfo(FSDirectory fsd,
-      INodesInPath iip, boolean includeStoragePolicy) throws IOException {
+  static HdfsFileStatus getFileInfo(FSDirectory fsd, INodesInPath iip,
+      boolean includeStoragePolicy, boolean needLocation,
+      boolean needBlockToken) throws IOException {
     fsd.readLock();
     try {
       final INode node = iip.getLastINode();
@@ -338,14 +343,15 @@ class FSDirStatAndListingOp {
       byte policy = (includeStoragePolicy && !node.isSymlink())
           ? node.getStoragePolicyID()
           : HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
-      return createFileStatus(fsd, iip, null, policy, false);
+      return createFileStatus(fsd, iip, null, policy, needLocation,
+          needBlockToken);
     } finally {
       fsd.readUnlock();
     }
   }
 
-  static HdfsFileStatus getFileInfo(FSDirectory fsd, INodesInPath iip)
-    throws IOException {
+  static HdfsFileStatus getFileInfo(FSDirectory fsd, INodesInPath iip,
+      boolean needLocation, boolean needBlockToken) throws IOException {
     fsd.readLock();
     try {
       HdfsFileStatus status = null;
@@ -356,7 +362,7 @@ class FSDirStatAndListingOp {
           status = FSDirectory.DOT_SNAPSHOT_DIR_STATUS;
         }
       } else {
-        status = getFileInfo(fsd, iip, true);
+        status = getFileInfo(fsd, iip, true, needLocation, needBlockToken);
       }
       return status;
     } finally {
@@ -373,7 +379,7 @@ class FSDirStatAndListingOp {
   static HdfsFileStatus createFileStatusForEditLog(
       FSDirectory fsd, INodesInPath iip) throws IOException {
     return createFileStatus(fsd, iip,
-        null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, false);
+        null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, false, false);
   }
 
   /**
@@ -384,12 +390,13 @@ class FSDirStatAndListingOp {
    * @param child for a directory listing of the iip, else null
    * @param storagePolicy for the path or closest ancestor
    * @param needLocation if block locations need to be included or not
+   * @param needBlockToken
    * @return a file status
    * @throws java.io.IOException if any error occurs
    */
   private static HdfsFileStatus createFileStatus(
       FSDirectory fsd, INodesInPath iip, INode child, byte storagePolicy,
-      boolean needLocation) throws IOException {
+      boolean needLocation, boolean needBlockToken) throws IOException {
     assert fsd.hasReadLock();
     // only directory listing sets the status name.
     byte[] name = HdfsFileStatus.EMPTY_NAME;
@@ -429,8 +436,8 @@ class FSDirStatAndListingOp {
         final long fileSize = !inSnapshot && isUc
             ? fileNode.computeFileSizeNotIncludingLastUcBlock() : size;
         loc = fsd.getBlockManager().createLocatedBlocks(
-            fileNode.getBlocks(snapshot), fileSize, isUc, 0L, size, false,
-            inSnapshot, feInfo, ecPolicy);
+            fileNode.getBlocks(snapshot), fileSize, isUc, 0L, size,
+            needBlockToken, inSnapshot, feInfo, ecPolicy);
         if (loc == null) {
           loc = new LocatedBlocks();
         }

+ 15 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java

@@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.util.ChunkedArrayList;
@@ -407,7 +408,7 @@ class FSDirWriteFileOp {
       NameNode.stateChangeLog.debug("DIR* NameSystem.startFile: added " +
           src + " inode " + newNode.getId() + " " + holder);
     }
-    return FSDirStatAndListingOp.getFileInfo(fsd, iip);
+    return FSDirStatAndListingOp.getFileInfo(fsd, iip, false, false);
   }
 
   static INodeFile addFileForEditLog(
@@ -415,22 +416,28 @@ class FSDirWriteFileOp {
       PermissionStatus permissions, List<AclEntry> aclEntries,
       List<XAttr> xAttrs, short replication, long modificationTime, long atime,
       long preferredBlockSize, boolean underConstruction, String clientName,
-      String clientMachine, byte storagePolicyId) {
+      String clientMachine, byte storagePolicyId, byte ecPolicyID) {
     final INodeFile newNode;
     Preconditions.checkNotNull(existing);
     assert fsd.hasWriteLock();
     try {
       // check if the file has an EC policy
-      boolean isStriped = false;
-      ErasureCodingPolicy ecPolicy = FSDirErasureCodingOp.
-          unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), existing);
-      if (ecPolicy != null) {
-        isStriped = true;
+      boolean isStriped =
+          ecPolicyID != ErasureCodeConstants.REPLICATION_POLICY_ID;
+      ErasureCodingPolicy ecPolicy = null;
+      if (isStriped) {
+        ecPolicy = fsd.getFSNamesystem().getErasureCodingPolicyManager()
+          .getByID(ecPolicyID);
+        if (ecPolicy == null) {
+          throw new IOException(String.format(
+              "Cannot find erasure coding policy for new file %s/%s, " +
+                  "ecPolicyID=%d",
+              existing.getPath(), Arrays.toString(localName), ecPolicyID));
+        }
       }
       final BlockType blockType = isStriped ?
           BlockType.STRIPED : BlockType.CONTIGUOUS;
       final Short replicationFactor = (!isStriped ? replication : null);
-      final Byte ecPolicyID = (isStriped ? ecPolicy.getId() : null);
       if (underConstruction) {
         newNode = newINodeFile(id, permissions, modificationTime,
             modificationTime, replicationFactor, ecPolicyID, preferredBlockSize,

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java

@@ -815,7 +815,8 @@ public class FSEditLog implements LogsPurgeable {
       .setClientMachine(
           newNode.getFileUnderConstructionFeature().getClientMachine())
       .setOverwrite(overwrite)
-      .setStoragePolicyId(newNode.getLocalStoragePolicyID());
+      .setStoragePolicyId(newNode.getLocalStoragePolicyID())
+      .setErasureCodingPolicyId(newNode.getErasureCodingPolicyID());
 
     AclFeature f = newNode.getAclFeature();
     if (f != null) {

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java

@@ -385,7 +385,7 @@ public class FSEditLogLoader {
             addCloseOp.xAttrs, replication, addCloseOp.mtime,
             addCloseOp.atime, addCloseOp.blockSize, true,
             addCloseOp.clientName, addCloseOp.clientMachine,
-            addCloseOp.storagePolicyId);
+            addCloseOp.storagePolicyId, addCloseOp.erasureCodingPolicyId);
         assert newFile != null;
         iip = INodesInPath.replace(iip, iip.length() - 1, newFile);
         fsNamesys.leaseManager.addLease(addCloseOp.clientName, newFile.getId());

+ 26 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java

@@ -127,6 +127,7 @@ import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactory;
 import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
 import org.apache.hadoop.ipc.ClientId;
 import org.apache.hadoop.ipc.RpcConstants;
 import org.apache.hadoop.security.token.delegation.DelegationKey;
@@ -425,10 +426,12 @@ public abstract class FSEditLogOp {
     String clientMachine;
     boolean overwrite;
     byte storagePolicyId;
+    byte erasureCodingPolicyId;
     
     private AddCloseOp(FSEditLogOpCodes opCode) {
       super(opCode);
       storagePolicyId = HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
+      erasureCodingPolicyId = ErasureCodeConstants.REPLICATION_POLICY_ID;
       assert(opCode == OP_ADD || opCode == OP_CLOSE || opCode == OP_APPEND);
     }
 
@@ -449,6 +452,7 @@ public abstract class FSEditLogOp {
       clientMachine = null;
       overwrite = false;
       storagePolicyId = 0;
+      erasureCodingPolicyId = ErasureCodeConstants.REPLICATION_POLICY_ID;
     }
 
     <T extends AddCloseOp> T setInodeId(long inodeId) {
@@ -535,6 +539,11 @@ public abstract class FSEditLogOp {
       return (T)this;
     }
 
+    <T extends AddCloseOp> T setErasureCodingPolicyId(byte ecPolicyId) {
+      this.erasureCodingPolicyId = ecPolicyId;
+      return (T)this;
+    }
+
     @Override
     public void writeFields(DataOutputStream out) throws IOException {
       FSImageSerialization.writeLong(inodeId, out);
@@ -555,6 +564,7 @@ public abstract class FSEditLogOp {
         FSImageSerialization.writeString(clientMachine,out);
         FSImageSerialization.writeBoolean(overwrite, out);
         FSImageSerialization.writeByte(storagePolicyId, out);
+        FSImageSerialization.writeByte(erasureCodingPolicyId, out);
         // write clientId and callId
         writeRpcIds(rpcClientId, rpcCallId, out);
       }
@@ -633,6 +643,14 @@ public abstract class FSEditLogOp {
           this.storagePolicyId =
               HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
         }
+
+        if (NameNodeLayoutVersion.supports(
+            NameNodeLayoutVersion.Feature.ERASURE_CODING, logVersion)) {
+          this.erasureCodingPolicyId = FSImageSerialization.readByte(in);
+        } else {
+          this.erasureCodingPolicyId =
+              ErasureCodeConstants.REPLICATION_POLICY_ID;
+        }
         // read clientId and callId
         readRpcIds(in, logVersion);
       } else {
@@ -695,6 +713,8 @@ public abstract class FSEditLogOp {
       }
       builder.append(", storagePolicyId=");
       builder.append(storagePolicyId);
+      builder.append(", erasureCodingPolicyId=");
+      builder.append(erasureCodingPolicyId);
       builder.append(", opCode=");
       builder.append(opCode);
       builder.append(", txid=");
@@ -730,6 +750,8 @@ public abstract class FSEditLogOp {
         if (aclEntries != null) {
           appendAclEntriesToXml(contentHandler, aclEntries);
         }
+        XMLUtils.addSaxString(contentHandler, "ERASURE_CODING_POLICY_ID",
+            Byte.toString(erasureCodingPolicyId));
         appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
       }
     }
@@ -758,6 +780,10 @@ public abstract class FSEditLogOp {
       }
       this.permissions = permissionStatusFromXml(st);
       aclEntries = readAclEntriesFromXml(st);
+      if (st.hasChildren("ERASURE_CODING_POLICY_ID")) {
+        this.erasureCodingPolicyId = Byte.parseByte(st.getValue(
+            "ERASURE_CODING_POLICY_ID"));
+      }
       readRpcIdsFromXml(st);
     }
   }

+ 11 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -2990,6 +2990,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    * @param resolveLink whether to throw UnresolvedLinkException
    *        if src refers to a symlink
    *
+   * @param needLocation Include {@link LocatedBlocks} in result.
+   * @param needBlockToken Include block tokens in {@link LocatedBlocks}
    * @throws AccessControlException if access is denied
    * @throws UnresolvedLinkException if a symlink is encountered.
    *
@@ -2997,15 +2999,19 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    *         or null if file not found
    * @throws StandbyException
    */
-  HdfsFileStatus getFileInfo(final String src, boolean resolveLink)
-    throws IOException {
-    final String operationName = "getfileinfo";
+  HdfsFileStatus getFileInfo(final String src, boolean resolveLink,
+      boolean needLocation, boolean needBlockToken) throws IOException {
+    // if the client requests block tokens, then it can read data blocks
+    // and should appear in the audit log as if getBlockLocations had been
+    // called
+    final String operationName = needBlockToken ? "open" : "getfileinfo";
     checkOperation(OperationCategory.READ);
     HdfsFileStatus stat = null;
     readLock();
     try {
       checkOperation(OperationCategory.READ);
-      stat = FSDirStatAndListingOp.getFileInfo(dir, src, resolveLink);
+      stat = FSDirStatAndListingOp.getFileInfo(
+          dir, src, resolveLink, needLocation, needBlockToken);
     } catch (AccessControlException e) {
       logAuditEvent(false, operationName, src);
       throw e;
@@ -6158,7 +6164,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     }
 
     for (CorruptFileBlockInfo c : corruptFileBlocks) {
-      if (getFileInfo(c.path, true) != null) {
+      if (getFileInfo(c.path, true, false, false) != null) {
         list.add(c.toString());
       }
       final Collection<String> snaps = FSDirSnapshotOp

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java

@@ -56,6 +56,7 @@ import org.apache.hadoop.hdfs.util.LongBitFormat;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.io.erasurecode.ErasureCodeConstants;
 import org.apache.hadoop.util.StringUtils;
 
 /** I-node for closed file. */
@@ -200,9 +201,10 @@ public class INodeFile extends INodeWithAdditionalFields
         // as the PolicyID can never be in negative.
         layoutRedundancy |= erasureCodingPolicyID;
       } else {
-        Preconditions.checkArgument(replication != null &&
-            erasureCodingPolicyID == null);
-        Preconditions.checkArgument(replication >= 0 &&
+        Preconditions.checkArgument(erasureCodingPolicyID == null ||
+                erasureCodingPolicyID ==
+                    ErasureCodeConstants.REPLICATION_POLICY_ID);
+        Preconditions.checkArgument(replication != null && replication >= 0 &&
             replication <= MAX_REDUNDANCY,
             "Invalid replication value " + replication);
         layoutRedundancy |= replication;
@@ -588,10 +590,8 @@ public class INodeFile extends INodeWithAdditionalFields
     setStoragePolicyID(storagePolicyId);
   }
 
-
   /**
-   * @return The ID of the erasure coding policy on the file. -1 represents no
-   *          EC policy.
+   * @return The ID of the erasure coding policy on the file.
    */
   @VisibleForTesting
   @Override
@@ -599,7 +599,7 @@ public class INodeFile extends INodeWithAdditionalFields
     if (isStriped()) {
       return HeaderFormat.getECPolicyID(header);
     }
-    return -1;
+    return ErasureCodeConstants.REPLICATION_POLICY_ID;
   }
 
   /**

+ 19 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

@@ -104,6 +104,7 @@ import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicyInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSLimitException;
+import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
@@ -1138,12 +1139,25 @@ public class NameNodeRpcServer implements NamenodeProtocols {
   }
 
   @Override // ClientProtocol
-  public HdfsFileStatus getFileInfo(String src)  throws IOException {
+  public HdfsFileStatus getFileInfo(String src) throws IOException {
     checkNNStartup();
     metrics.incrFileInfoOps();
-    return namesystem.getFileInfo(src, true);
+    return namesystem.getFileInfo(src, true, false, false);
   }
-  
+
+  @Override // ClientProtocol
+  public HdfsLocatedFileStatus getLocatedFileInfo(String src,
+      boolean needBlockToken) throws IOException {
+    checkNNStartup();
+    if (needBlockToken) {
+      metrics.incrGetBlockLocations();
+    } else {
+      metrics.incrFileInfoOps();
+    }
+    return (HdfsLocatedFileStatus)
+        namesystem.getFileInfo(src, true, true, needBlockToken);
+  }
+
   @Override // ClientProtocol
   public boolean isFileClosed(String src) throws IOException{
     checkNNStartup();
@@ -1154,7 +1168,7 @@ public class NameNodeRpcServer implements NamenodeProtocols {
   public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
     checkNNStartup();
     metrics.incrFileInfoOps();
-    return namesystem.getFileInfo(src, false);
+    return namesystem.getFileInfo(src, false, false, false);
   }
   
   @Override // ClientProtocol
@@ -1429,7 +1443,7 @@ public class NameNodeRpcServer implements NamenodeProtocols {
     metrics.incrGetLinkTargetOps();
     HdfsFileStatus stat = null;
     try {
-      stat = namesystem.getFileInfo(path, false);
+      stat = namesystem.getFileInfo(path, false, false, false);
     } catch (UnresolvedPathException e) {
       return e.getResolvedPath().toString();
     } catch (UnresolvedLinkException e) {

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java

@@ -1236,7 +1236,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
                 ((float) (numUnderMinReplicatedBlocks * 100) / (float) totalBlocks))
                 .append(" %)");
           }
-          res.append("\n  ").append(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY + ":\t")
+          res.append("\n  ").append("MINIMAL BLOCK REPLICATION:\t")
              .append(minReplication);
         }
         if(corruptFiles>0) {

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java

@@ -182,7 +182,8 @@ public class DelegationTokenFetcher {
       Credentials cred = new Credentials();
       cred.addToken(token.getService(), token);
       // dtutil is replacing this tool; preserve legacy functionality
-      cred.writeLegacyTokenStorageFile(tokenFile, conf);
+      cred.writeTokenStorageFile(tokenFile, conf,
+          Credentials.SerializedFormat.WRITABLE);
 
       if (LOG.isDebugEnabled()) {
         LOG.debug("Fetched token " + fs.getUri() + " for " +

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java

@@ -77,7 +77,7 @@ public class RouterAdmin extends Configured implements Tool {
   public void printUsage() {
     String usage = "Federation Admin Tools:\n"
         + "\t[-add <source> <nameservice> <destination> "
-        + "[-readonly] [-order HASH|LOCAL|RANDOM|HASH_ALL]]\n"
+        + "[-readonly]\n"
         + "\t[-rm <source>]\n"
         + "\t[-ls <path>]\n";
     System.out.println(usage);

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/router/robots.txt

@@ -0,0 +1,2 @@
+User-agent: *
+Disallow: /

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md

@@ -425,7 +425,7 @@ Runs the DFS router. See [Router](./HDFSRouterFederation.html#Router) for more i
 Usage:
 
       hdfs dfsrouteradmin
-          [-add <source> <nameservice> <destination>]
+          [-add <source> <nameservice> <destination> [-readonly]]
           [-rm <source>]
           [-ls <path>]
 

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md

@@ -184,6 +184,10 @@ For example, to create three mount points and list them:
     [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -add /data/app2 ns3 /data/app2
     [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -ls
 
+It also supports mount points that disallow writes:
+
+    [hdfs]$ $HADOOP_HOME/bin/hdfs dfsrouteradmin -add /readonly ns1 / -readonly
+
 If a mount point is not set, the Router will map it to the default namespace `dfs.federation.router.default.nameserviceId`.
 
 

+ 15 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUnbuffer.java

@@ -131,17 +131,27 @@ public class TestUnbuffer {
   }
 
   /**
-   * Test unbuffer method which throws an Exception with class name included.
+   * Test that a InputStream should throw an exception when not implementing
+   * CanUnbuffer
+   *
+   * This should throw an exception when the stream claims to have the
+   * unbuffer capability, but actually does not implement CanUnbuffer.
    */
   @Test
   public void testUnbufferException() {
-    FSInputStream in = Mockito.mock(FSInputStream.class);
-    FSDataInputStream fs = new FSDataInputStream(in);
+    abstract class BuggyStream
+            extends FSInputStream
+            implements StreamCapabilities {
+    }
+
+    BuggyStream bs = Mockito.mock(BuggyStream.class);
+    Mockito.when(bs.hasCapability(Mockito.anyString())).thenReturn(true);
 
     exception.expect(UnsupportedOperationException.class);
-    exception.expectMessage("this stream " + in.getClass().getName()
-        + " does not support unbuffering");
+    exception.expectMessage(
+            StreamCapabilitiesPolicy.CAN_UNBUFFER_NOT_IMPLEMENTED_MESSAGE);
 
+    FSDataInputStream fs = new FSDataInputStream(bs);
     fs.unbuffer();
   }
 }

+ 23 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java

@@ -1510,6 +1510,29 @@ public class DFSTestUtil {
     // OP_REMOVE_ERASURE_CODING_POLICY
     filesystem.removeErasureCodingPolicy(newPolicy1.getName());
     filesystem.removeErasureCodingPolicy(newPolicy2.getName());
+
+    // OP_ADD on erasure coding directory
+    Path ecDir = new Path("/ec");
+    filesystem.mkdirs(ecDir);
+    final ErasureCodingPolicy defaultEcPolicy =
+        SystemErasureCodingPolicies.getByID(
+            SystemErasureCodingPolicies.RS_6_3_POLICY_ID);
+    final ErasureCodingPolicy ecPolicyRS32 =
+        SystemErasureCodingPolicies.getByID(
+            SystemErasureCodingPolicies.RS_3_2_POLICY_ID);
+    filesystem.enableErasureCodingPolicy(ecPolicyRS32.getName());
+    filesystem.enableErasureCodingPolicy(defaultEcPolicy.getName());
+    filesystem.setErasureCodingPolicy(ecDir, defaultEcPolicy.getName());
+
+    try (FSDataOutputStream out = filesystem.createFile(
+        new Path(ecDir, "replicated")).replicate().build()) {
+      out.write("replicated".getBytes());
+    }
+
+    try (FSDataOutputStream out = filesystem.createFile(
+        new Path(ecDir, "RS-3-2")).ecPolicyName(ecPolicyRS32.getName()).build()) {
+      out.write("RS-3-2".getBytes());
+    }
   }
 
   public static void abortStream(DFSOutputStream out) throws IOException {

+ 55 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystemWithECFile.java

@@ -19,11 +19,13 @@ package org.apache.hadoop.hdfs;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.LocatedFileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -32,6 +34,8 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 /**
@@ -194,4 +198,55 @@ public class TestDistributedFileSystemWithECFile {
     assertTrue(lastBlock.getOffset() == blockGroupSize);
     assertTrue(lastBlock.getLength() == lastBlockSize);
   }
+
+  @Test(timeout=60000)
+  public void testReplayEditLogsForReplicatedFile() throws Exception {
+    cluster.shutdown();
+
+    ErasureCodingPolicy rs63 = SystemErasureCodingPolicies.getByID(
+        SystemErasureCodingPolicies.RS_6_3_POLICY_ID
+    );
+    ErasureCodingPolicy rs32 = SystemErasureCodingPolicies.getByID(
+        SystemErasureCodingPolicies.RS_3_2_POLICY_ID
+    );
+    // Test RS(6,3) as default policy
+    int numDataNodes = rs63.getNumDataUnits() + rs63.getNumParityUnits();
+    cluster = new MiniDFSCluster.Builder(conf)
+        .nnTopology(MiniDFSNNTopology.simpleHATopology())
+        .numDataNodes(numDataNodes)
+        .build();
+
+    cluster.transitionToActive(0);
+    fs = cluster.getFileSystem(0);
+    fs.enableErasureCodingPolicy(rs63.getName());
+    fs.enableErasureCodingPolicy(rs32.getName());
+
+    Path dir = new Path("/ec");
+    fs.mkdirs(dir);
+    fs.setErasureCodingPolicy(dir, rs63.getName());
+
+    // Create an erasure coded file with the default policy.
+    Path ecFile = new Path(dir, "ecFile");
+    createFile(ecFile.toString(), 10);
+    // Create a replicated file.
+    Path replicatedFile = new Path(dir, "replicated");
+    try (FSDataOutputStream out = fs.createFile(replicatedFile)
+      .replicate().build()) {
+      out.write(123);
+    }
+    // Create an EC file with a different policy.
+    Path ecFile2 = new Path(dir, "RS-3-2");
+    try (FSDataOutputStream out = fs.createFile(ecFile2)
+         .ecPolicyName(rs32.getName()).build()) {
+      out.write(456);
+    }
+
+    cluster.transitionToStandby(0);
+    cluster.transitionToActive(1);
+
+    fs = cluster.getFileSystem(1);
+    assertNull(fs.getErasureCodingPolicy(replicatedFile));
+    assertEquals(rs63, fs.getErasureCodingPolicy(ecFile));
+    assertEquals(rs32, fs.getErasureCodingPolicy(ecFile2));
+  }
 }

+ 0 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java

@@ -18,7 +18,6 @@
 package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 
 import com.google.common.base.Supplier;
-import org.apache.commons.lang.UnhandledException;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 
 import static org.apache.hadoop.fs.CreateFlag.CREATE;

+ 35 - 25
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/FederationTestUtils.java

@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.federation;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
-import static org.junit.Assert.fail;
 
 import java.io.BufferedReader;
 import java.io.FileNotFoundException;
@@ -52,6 +51,9 @@ import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServi
 import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.test.GenericTestUtils;
+
+import com.google.common.base.Supplier;
 
 /**
  * Helper utilities for testing HDFS Federation.
@@ -108,33 +110,41 @@ public final class FederationTestUtils {
     return report;
   }
 
-  public static void waitNamenodeRegistered(ActiveNamenodeResolver resolver,
-      String nsId, String nnId, FederationNamenodeServiceState finalState)
-          throws InterruptedException, IllegalStateException, IOException {
-
-    for (int loopCount = 0; loopCount < 20; loopCount++) {
-      if (loopCount > 0) {
-        Thread.sleep(1000);
-      }
-
-      List<? extends FederationNamenodeContext> namenodes =
-          resolver.getNamenodesForNameserviceId(nsId);
-      for (FederationNamenodeContext namenode : namenodes) {
-        // Check if this is the Namenode we are checking
-        if (namenode.getNamenodeId() == nnId  ||
-            namenode.getNamenodeId().equals(nnId)) {
-          if (finalState != null && !namenode.getState().equals(finalState)) {
-            // Wrong state, wait a bit more
-            break;
-          } else {
-            // Found and verified
-            return;
+  /**
+   * Wait for a namenode to be registered with a particular state.
+   * @param resolver Active namenode resolver.
+   * @param nsId Nameservice identifier.
+   * @param nnId Namenode identifier.
+   * @param finalState State to check for.
+   * @throws Exception Failed to verify State Store registration of namenode
+   *                   nsId:nnId for state.
+   */
+  public static void waitNamenodeRegistered(
+      final ActiveNamenodeResolver resolver,
+      final String nsId, final String nnId,
+      final FederationNamenodeServiceState state) throws Exception {
+
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        try {
+          List<? extends FederationNamenodeContext> namenodes =
+              resolver.getNamenodesForNameserviceId(nsId);
+          if (namenodes != null) {
+            for (FederationNamenodeContext namenode : namenodes) {
+              // Check if this is the Namenode we are checking
+              if (namenode.getNamenodeId() == nnId  ||
+                  namenode.getNamenodeId().equals(nnId)) {
+                return state == null || namenode.getState().equals(state);
+              }
+            }
           }
+        } catch (IOException e) {
+          // Ignore
         }
+        return false;
       }
-    }
-    fail("Failed to verify State Store registration of " + nsId + " " + nnId +
-        " for state " + finalState);
+    }, 1000, 20 * 1000);
   }
 
   public static boolean verifyDate(Date d1, Date d2, long precision) {

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/RouterDFSCluster.java

@@ -747,8 +747,7 @@ public class RouterDFSCluster {
     }
   }
 
-  public void waitNamenodeRegistration()
-      throws InterruptedException, IllegalStateException, IOException {
+  public void waitNamenodeRegistration() throws Exception {
     for (RouterContext r : this.routers) {
       Router router = r.router;
       for (NamenodeContext nn : this.namenodes) {
@@ -761,7 +760,7 @@ public class RouterDFSCluster {
 
   public void waitRouterRegistrationQuorum(RouterContext router,
       FederationNamenodeServiceState state, String nsId, String nnId)
-          throws InterruptedException, IOException {
+          throws Exception {
     LOG.info("Waiting for NN {} {} to transition to {}", nsId, nnId, state);
     ActiveNamenodeResolver nnResolver = router.router.getNamenodeResolver();
     waitNamenodeRegistered(nnResolver, nsId, nnId, state);

+ 25 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestMountTableResolver.java

@@ -69,6 +69,7 @@ public class TestMountTableResolver {
    * ______file1.txt -> 4:/user/file1.txt
    * __usr
    * ____bin -> 2:/bin
+   * __readonly -> 2:/tmp
    *
    * @throws IOException If it cannot set the mount table.
    */
@@ -107,6 +108,12 @@ public class TestMountTableResolver {
     // /user/a/demo/test/b
     map = getMountTableEntry("3", "/user/test");
     mountTable.addEntry(MountTable.newInstance("/user/a/demo/test/b", map));
+
+    // /readonly
+    map = getMountTableEntry("2", "/tmp");
+    MountTable readOnlyEntry = MountTable.newInstance("/readonly", map);
+    readOnlyEntry.setReadOnly(true);
+    mountTable.addEntry(readOnlyEntry);
   }
 
   @Before
@@ -152,6 +159,9 @@ public class TestMountTableResolver {
     assertEquals("3->/user/test/a",
         mountTable.getDestinationForPath("/user/test/a").toString());
 
+    assertEquals("2->/tmp/tesfile1.txt",
+        mountTable.getDestinationForPath("/readonly/tesfile1.txt").toString());
+
   }
 
   private void compareLists(List<String> list1, String[] list2) {
@@ -166,8 +176,8 @@ public class TestMountTableResolver {
 
     // Check getting all mount points (virtual and real) beneath a path
     List<String> mounts = mountTable.getMountPoints("/");
-    assertEquals(3, mounts.size());
-    compareLists(mounts, new String[] {"tmp", "user", "usr"});
+    assertEquals(4, mounts.size());
+    compareLists(mounts, new String[] {"tmp", "user", "usr", "readonly"});
 
     mounts = mountTable.getMountPoints("/user");
     assertEquals(2, mounts.size());
@@ -212,9 +222,10 @@ public class TestMountTableResolver {
 
     // Check listing the mount table records at or beneath a path
     List<MountTable> records = mountTable.getMounts("/");
-    assertEquals(8, records.size());
+    assertEquals(9, records.size());
     compareRecords(records, new String[] {"/", "/tmp", "/user", "/usr/bin",
-        "user/a", "/user/a/demo/a", "/user/a/demo/b", "/user/b/file1.txt"});
+        "user/a", "/user/a/demo/a", "/user/a/demo/b", "/user/b/file1.txt",
+        "readonly"});
 
     records = mountTable.getMounts("/user");
     assertEquals(5, records.size());
@@ -229,6 +240,11 @@ public class TestMountTableResolver {
     records = mountTable.getMounts("/tmp");
     assertEquals(1, records.size());
     compareRecords(records, new String[] {"/tmp"});
+
+    records = mountTable.getMounts("/readonly");
+    assertEquals(1, records.size());
+    compareRecords(records, new String[] {"/readonly"});
+    assertTrue(records.get(0).isReadOnly());
   }
 
   @Test
@@ -237,7 +253,7 @@ public class TestMountTableResolver {
 
     // 3 mount points are present /tmp, /user, /usr
     compareLists(mountTable.getMountPoints("/"),
-        new String[] {"user", "usr", "tmp"});
+        new String[] {"user", "usr", "tmp", "readonly"});
 
     // /tmp currently points to namespace 2
     assertEquals("2", mountTable.getDestinationForPath("/tmp/testfile.txt")
@@ -248,7 +264,7 @@ public class TestMountTableResolver {
 
     // Now 2 mount points are present /user, /usr
     compareLists(mountTable.getMountPoints("/"),
-        new String[] {"user", "usr"});
+        new String[] {"user", "usr", "readonly"});
 
     // /tmp no longer exists, uses default namespace for mapping /
     assertEquals("1", mountTable.getDestinationForPath("/tmp/testfile.txt")
@@ -261,7 +277,7 @@ public class TestMountTableResolver {
 
     // 3 mount points are present /tmp, /user, /usr
     compareLists(mountTable.getMountPoints("/"),
-        new String[] {"user", "usr", "tmp"});
+        new String[] {"user", "usr", "tmp", "readonly"});
 
     // /usr is virtual, uses namespace 1->/
     assertEquals("1", mountTable.getDestinationForPath("/usr/testfile.txt")
@@ -272,7 +288,7 @@ public class TestMountTableResolver {
 
     // Verify the remove failed
     compareLists(mountTable.getMountPoints("/"),
-        new String[] {"user", "usr", "tmp"});
+        new String[] {"user", "usr", "tmp", "readonly"});
   }
 
   @Test
@@ -304,7 +320,7 @@ public class TestMountTableResolver {
 
     // Initial table loaded
     testDestination();
-    assertEquals(8, mountTable.getMounts("/").size());
+    assertEquals(9, mountTable.getMounts("/").size());
 
     // Replace table with /1 and /2
     List<MountTable> records = new ArrayList<>();

+ 31 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java

@@ -143,6 +143,37 @@ public class TestRouterAdmin {
     assertFalse(addResponse2.getStatus());
   }
 
+  @Test
+  public void testAddReadOnlyMountTable() throws IOException {
+    MountTable newEntry = MountTable.newInstance(
+        "/readonly", Collections.singletonMap("ns0", "/testdir"),
+        Time.now(), Time.now());
+    newEntry.setReadOnly(true);
+
+    RouterClient client = routerContext.getAdminClient();
+    MountTableManager mountTable = client.getMountTableManager();
+
+    // Existing mount table size
+    List<MountTable> records = getMountTableEntries(mountTable);
+    assertEquals(records.size(), mockMountTable.size());
+
+    // Add
+    AddMountTableEntryRequest addRequest =
+        AddMountTableEntryRequest.newInstance(newEntry);
+    AddMountTableEntryResponse addResponse =
+        mountTable.addMountTableEntry(addRequest);
+    assertTrue(addResponse.getStatus());
+
+    // New mount table size
+    List<MountTable> records2 = getMountTableEntries(mountTable);
+    assertEquals(records2.size(), mockMountTable.size() + 1);
+
+    // Check that we have the read only entry
+    MountTable record = getMountTableEntry("/readonly");
+    assertEquals("/readonly", record.getSourcePath());
+    assertTrue(record.isReadOnly());
+  }
+
   @Test
   public void testRemoveMountTable() throws IOException {
 

+ 143 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterMountTable.java

@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.federation.router;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.IOException;
+import java.util.Collections;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder;
+import org.apache.hadoop.hdfs.server.federation.RouterDFSCluster.NamenodeContext;
+import org.apache.hadoop.hdfs.server.federation.RouterDFSCluster.RouterContext;
+import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
+import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.records.MountTable;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * Test a router end-to-end including the MountTable.
+ */
+public class TestRouterMountTable {
+
+  private static StateStoreDFSCluster cluster;
+  private static NamenodeContext nnContext;
+  private static RouterContext routerContext;
+  private static MountTableResolver mountTable;
+
+  @BeforeClass
+  public static void globalSetUp() throws Exception {
+
+    // Build and start a federated cluster
+    cluster = new StateStoreDFSCluster(false, 1);
+    Configuration conf = new RouterConfigBuilder()
+        .stateStore()
+        .admin()
+        .rpc()
+        .build();
+    cluster.addRouterOverrides(conf);
+    cluster.startCluster();
+    cluster.startRouters();
+    cluster.waitClusterUp();
+
+    // Get the end points
+    nnContext = cluster.getRandomNamenode();
+    routerContext = cluster.getRandomRouter();
+    Router router = routerContext.getRouter();
+    mountTable = (MountTableResolver) router.getSubclusterResolver();
+  }
+
+  @AfterClass
+  public static void tearDown() {
+    if (cluster != null) {
+      cluster.stopRouter(routerContext);
+      cluster.shutdown();
+      cluster = null;
+    }
+  }
+
+  @Test
+  public void testReadOnly() throws Exception {
+
+    // Add a read only entry
+    MountTable readOnlyEntry = MountTable.newInstance(
+        "/readonly", Collections.singletonMap("ns0", "/testdir"));
+    readOnlyEntry.setReadOnly(true);
+    assertTrue(addMountTable(readOnlyEntry));
+
+    // Add a regular entry
+    MountTable regularEntry = MountTable.newInstance(
+        "/regular", Collections.singletonMap("ns0", "/testdir"));
+    assertTrue(addMountTable(regularEntry));
+
+    // Create a folder which should show in all locations
+    final FileSystem nnFs = nnContext.getFileSystem();
+    final FileSystem routerFs = routerContext.getFileSystem();
+    assertTrue(routerFs.mkdirs(new Path("/regular/newdir")));
+
+    FileStatus dirStatusNn =
+        nnFs.getFileStatus(new Path("/testdir/newdir"));
+    assertTrue(dirStatusNn.isDirectory());
+    FileStatus dirStatusRegular =
+        routerFs.getFileStatus(new Path("/regular/newdir"));
+    assertTrue(dirStatusRegular.isDirectory());
+    FileStatus dirStatusReadOnly =
+        routerFs.getFileStatus(new Path("/readonly/newdir"));
+    assertTrue(dirStatusReadOnly.isDirectory());
+
+    // It should fail writing into a read only path
+    try {
+      routerFs.mkdirs(new Path("/readonly/newdirfail"));
+      fail("We should not be able to write into a read only mount point");
+    } catch (IOException ioe) {
+      String msg = ioe.getMessage();
+      assertTrue(msg.startsWith(
+          "/readonly/newdirfail is in a read only mount point"));
+    }
+  }
+
+  /**
+   * Add a mount table entry to the mount table through the admin API.
+   * @param entry Mount table entry to add.
+   * @return If it was succesfully added.
+   * @throws IOException Problems adding entries.
+   */
+  private boolean addMountTable(final MountTable entry) throws IOException {
+    RouterClient client = routerContext.getAdminClient();
+    MountTableManager mountTableManager = client.getMountTableManager();
+    AddMountTableEntryRequest addRequest =
+        AddMountTableEntryRequest.newInstance(entry);
+    AddMountTableEntryResponse addResponse =
+        mountTableManager.addMountTableEntry(addRequest);
+
+    // Reload the Router cache
+    mountTable.loadCache(true);
+
+    return addResponse.getStatus();
+  }
+}

+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java

@@ -72,12 +72,13 @@ public class NameNodeAdapter {
   }
   
   public static HdfsFileStatus getFileInfo(NameNode namenode, String src,
-      boolean resolveLink) throws AccessControlException, UnresolvedLinkException,
-        StandbyException, IOException {
+      boolean resolveLink, boolean needLocation, boolean needBlockToken)
+      throws AccessControlException, UnresolvedLinkException, StandbyException,
+      IOException {
     namenode.getNamesystem().readLock();
     try {
       return FSDirStatAndListingOp.getFileInfo(namenode.getNamesystem()
-          .getFSDirectory(), src, resolveLink);
+          .getFSDirectory(), src, resolveLink, needLocation, needBlockToken);
     } finally {
       namenode.getNamesystem().readUnlock();
     }

+ 3 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java

@@ -98,8 +98,10 @@ public class OfflineEditsViewerHelper {
     config.setBoolean(
         DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
     config.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+    final int numDataNodes = 9;
     cluster =
-      new MiniDFSCluster.Builder(config).manageNameDfsDirs(false).build();
+      new MiniDFSCluster.Builder(config).manageNameDfsDirs(false)
+          .numDataNodes(numDataNodes).build();
     cluster.waitClusterUp();
   }
 

+ 5 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java

@@ -274,7 +274,8 @@ public class TestBackupNode {
       backup = startBackupNode(conf, StartupOption.BACKUP, 1);
 
       testBNInSync(cluster, backup, 4);
-      assertNotNull(backup.getNamesystem().getFileInfo("/edit-while-bn-down", false));
+      assertNotNull(backup.getNamesystem()
+          .getFileInfo("/edit-while-bn-down", false, false, false));
       
       // Trigger an unclean shutdown of the backup node. Backup node will not
       // unregister from the active when this is done simulating a node crash.
@@ -314,7 +315,8 @@ public class TestBackupNode {
         public Boolean get() {
           LOG.info("Checking for " + src + " on BN");
           try {
-            boolean hasFile = backup.getNamesystem().getFileInfo(src, false) != null;
+            boolean hasFile = backup.getNamesystem()
+                .getFileInfo(src, false, false, false) != null;
             boolean txnIdMatch =
               backup.getRpcServer().getTransactionID() ==
               nn.getRpcServer().getTransactionID();
@@ -465,7 +467,7 @@ public class TestBackupNode {
       assertTrue("file3 does not exist on BackupNode",
           op != StartupOption.BACKUP ||
           backup.getNamesystem().getFileInfo(
-              file3.toUri().getPath(), false) != null);
+              file3.toUri().getPath(), false, false, false) != null);
 
     } catch(IOException e) {
       LOG.error("Error in TestBackupNode:", e);

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java

@@ -288,7 +288,8 @@ public class TestEditLog {
       long numEdits = testLoad(HADOOP20_SOME_EDITS, namesystem);
       assertEquals(3, numEdits);
       // Sanity check the edit
-      HdfsFileStatus fileInfo = namesystem.getFileInfo("/myfile", false);
+      HdfsFileStatus fileInfo =
+          namesystem.getFileInfo("/myfile", false, false, false);
       assertEquals("supergroup", fileInfo.getGroup());
       assertEquals(3, fileInfo.getReplication());
     } finally {

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java

@@ -902,7 +902,7 @@ public class TestFsck {
     System.out.println(outStr);
     assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
     assertTrue(outStr.contains("UNDER MIN REPL'D BLOCKS:\t1 (100.0 %)"));
-    assertTrue(outStr.contains("dfs.namenode.replication.min:\t2"));
+    assertTrue(outStr.contains("MINIMAL BLOCK REPLICATION:\t2"));
   }
 
   @Test(timeout = 90000)

+ 11 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java

@@ -43,9 +43,11 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
+import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.ipc.ClientId;
@@ -79,6 +81,11 @@ import org.junit.Test;
 public class TestNamenodeRetryCache {
   private static final byte[] CLIENT_ID = ClientId.getClientId();
   private static MiniDFSCluster cluster;
+  private static ErasureCodingPolicy defaultEcPolicy =
+      SystemErasureCodingPolicies.getByID(
+          SystemErasureCodingPolicies.RS_6_3_POLICY_ID);
+  private static int numDataNodes = defaultEcPolicy.getNumDataUnits() +
+      defaultEcPolicy.getNumParityUnits() + 1;
   private static NamenodeProtocols nnRpc;
   private static final FsPermission perm = FsPermission.getDefault();
   private static DistributedFileSystem filesystem;
@@ -93,7 +100,8 @@ public class TestNamenodeRetryCache {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
-    cluster = new MiniDFSCluster.Builder(conf).build();
+    cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(numDataNodes).build();
     cluster.waitActive();
     nnRpc = cluster.getNameNode().getRpcServer();
     filesystem = cluster.getFileSystem();
@@ -436,7 +444,7 @@ public class TestNamenodeRetryCache {
 
     LightWeightCache<CacheEntry, CacheEntry> cacheSet = 
         (LightWeightCache<CacheEntry, CacheEntry>) namesystem.getRetryCache().getCacheSet();
-    assertEquals("Retry cache size is wrong", 34, cacheSet.size());
+    assertEquals("Retry cache size is wrong", 39, cacheSet.size());
     
     Map<CacheEntry, CacheEntry> oldEntries = 
         new HashMap<CacheEntry, CacheEntry>();
@@ -455,7 +463,7 @@ public class TestNamenodeRetryCache {
     assertTrue(namesystem.hasRetryCache());
     cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) namesystem
         .getRetryCache().getCacheSet();
-    assertEquals("Retry cache size is wrong", 34, cacheSet.size());
+    assertEquals("Retry cache size is wrong", 39, cacheSet.size());
     iter = cacheSet.iterator();
     while (iter.hasNext()) {
       CacheEntry entry = iter.next();

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReencryption.java

@@ -163,7 +163,8 @@ public class TestReencryption {
   }
 
   private FileEncryptionInfo getFileEncryptionInfo(Path path) throws Exception {
-    return fsn.getFileInfo(path.toString(), false).getFileEncryptionInfo();
+    return fsn.getFileInfo(path.toString(), false, false, false)
+        .getFileEncryptionInfo();
   }
 
   @Test
@@ -1954,4 +1955,4 @@ public class TestReencryption {
     // after NN  restart consistent.
     dfsAdmin.getKeyProvider().flush();
   }
-}
+}

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java

@@ -745,7 +745,7 @@ public class TestSaveNamespace {
 
   private void checkEditExists(FSNamesystem fsn, int id) throws IOException {
     // Make sure the image loaded including our edit.
-    assertNotNull(fsn.getFileInfo("/test" + id, false));
+    assertNotNull(fsn.getFileInfo("/test" + id, false, false, false));
   }
 
   private Configuration getConf() throws IOException {

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java

@@ -124,7 +124,7 @@ public class TestEditLogTailer {
       
       for (int i = 0; i < DIRS_TO_MAKE / 2; i++) {
         assertTrue(NameNodeAdapter.getFileInfo(nn2,
-            getDirPath(i), false).isDirectory());
+            getDirPath(i), false, false, false).isDirectory());
       }
       
       for (int i = DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) {
@@ -137,7 +137,7 @@ public class TestEditLogTailer {
       
       for (int i = DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) {
         assertTrue(NameNodeAdapter.getFileInfo(nn2,
-            getDirPath(i), false).isDirectory());
+            getDirPath(i), false, false, false).isDirectory());
       }
     } finally {
       cluster.shutdown();

+ 10 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java

@@ -38,9 +38,10 @@ import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
-import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
+import static org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter.getFileInfo;
+
 import org.junit.Test;
 
 import com.google.common.base.Joiner;
@@ -110,7 +111,8 @@ public class TestEditLogsDuringFailover {
       // the current log segment, and on the next roll, it would have to
       // either replay starting in the middle of the segment (not allowed)
       // or double-replay the edits (incorrect).
-      assertNull(NameNodeAdapter.getFileInfo(cluster.getNameNode(1), "/test", true));
+      assertNull(getFileInfo(cluster.getNameNode(1), "/test",
+          true, false, false));
       
       cluster.getNameNode(0).getRpcServer().mkdirs("/test2",
           FsPermission.createImmutable((short)0755), true);
@@ -122,8 +124,10 @@ public class TestEditLogsDuringFailover {
 
       // NN1 should have both the edits that came before its restart, and the edits that
       // came after its restart.
-      assertNotNull(NameNodeAdapter.getFileInfo(cluster.getNameNode(1), "/test", true));
-      assertNotNull(NameNodeAdapter.getFileInfo(cluster.getNameNode(1), "/test2", true));
+      assertNotNull(getFileInfo(cluster.getNameNode(1), "/test",
+          true, false, false));
+      assertNotNull(getFileInfo(cluster.getNameNode(1), "/test2",
+          true, false, false));
     } finally {
       cluster.shutdown();
     }
@@ -165,7 +169,8 @@ public class TestEditLogsDuringFailover {
       // In the transition to active, it should have read the log -- and
       // hence see one of the dirs we made in the fake log.
       String testPath = "/dir" + NUM_DIRS_IN_LOG;
-      assertNotNull(cluster.getNameNode(0).getRpcServer().getFileInfo(testPath));
+      assertNotNull(cluster.getNameNode(0).getRpcServer()
+          .getFileInfo(testPath));
       
       // It also should have finalized that log in the shared directory and started
       // writing to a new one at the next txid.

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java

@@ -213,13 +213,13 @@ public class TestFailureToReadEdits {
     
     // Null because it was deleted.
     assertNull(NameNodeAdapter.getFileInfo(nn1,
-        TEST_DIR1, false));
+        TEST_DIR1, false, false, false));
     // Should have been successfully created.
     assertTrue(NameNodeAdapter.getFileInfo(nn1,
-        TEST_DIR2, false).isDirectory());
+        TEST_DIR2, false, false, false).isDirectory());
     // Null because it hasn't been created yet.
     assertNull(NameNodeAdapter.getFileInfo(nn1,
-        TEST_DIR3, false));
+        TEST_DIR3, false, false, false));
 
     // Now let the standby read ALL the edits.
     answer.setThrowExceptionOnRead(false);
@@ -227,13 +227,13 @@ public class TestFailureToReadEdits {
     
     // Null because it was deleted.
     assertNull(NameNodeAdapter.getFileInfo(nn1,
-        TEST_DIR1, false));
+        TEST_DIR1, false, false, false));
     // Should have been successfully created.
     assertTrue(NameNodeAdapter.getFileInfo(nn1,
-        TEST_DIR2, false).isDirectory());
+        TEST_DIR2, false, false, false).isDirectory());
     // Should now have been successfully created.
     assertTrue(NameNodeAdapter.getFileInfo(nn1,
-        TEST_DIR3, false).isDirectory());
+        TEST_DIR3, false, false, false).isDirectory());
   }
   
   /**

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java

@@ -128,7 +128,7 @@ public class TestInitializeSharedEdits {
       HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
           cluster.getNameNode(1));
       assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
-          newPath.toString(), false).isDirectory());
+          newPath.toString(), false, false, false).isDirectory());
     } finally {
       if (fs != null) {
         fs.close();

+ 10 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java

@@ -67,11 +67,13 @@ import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.SystemErasureCodingPolicies;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@@ -93,7 +95,12 @@ public class TestRetryCacheWithHA {
   private static final Log LOG = LogFactory.getLog(TestRetryCacheWithHA.class);
   
   private static final int BlockSize = 1024;
-  private static final short DataNodes = 3;
+  private static ErasureCodingPolicy defaultEcPolicy =
+      SystemErasureCodingPolicies.getByID(
+          SystemErasureCodingPolicies.RS_6_3_POLICY_ID);
+  private static final short DataNodes = (short)(
+      defaultEcPolicy.getNumDataUnits() +
+      defaultEcPolicy.getNumParityUnits() + 1);
   private static final int CHECKTIMES = 10;
   private static final int ResponseSize = 3;
   
@@ -166,7 +173,7 @@ public class TestRetryCacheWithHA {
     FSNamesystem fsn0 = cluster.getNamesystem(0);
     LightWeightCache<CacheEntry, CacheEntry> cacheSet = 
         (LightWeightCache<CacheEntry, CacheEntry>) fsn0.getRetryCache().getCacheSet();
-    assertEquals("Retry cache size is wrong", 34, cacheSet.size());
+    assertEquals("Retry cache size is wrong", 39, cacheSet.size());
     
     Map<CacheEntry, CacheEntry> oldEntries = 
         new HashMap<CacheEntry, CacheEntry>();
@@ -187,7 +194,7 @@ public class TestRetryCacheWithHA {
     FSNamesystem fsn1 = cluster.getNamesystem(1);
     cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) fsn1
         .getRetryCache().getCacheSet();
-    assertEquals("Retry cache size is wrong", 34, cacheSet.size());
+    assertEquals("Retry cache size is wrong", 39, cacheSet.size());
     iter = cacheSet.iterator();
     while (iter.hasNext()) {
       CacheEntry entry = iter.next();

+ 35 - 34
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyInProgressTail.java

@@ -35,8 +35,9 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.qjournal.MiniQJMHACluster;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.test.GenericTestUtils;
+import static org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter.getFileInfo;
+
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -116,8 +117,8 @@ public class TestStandbyInProgressTail {
       cluster.getNameNode(1).getNamesystem().getEditLogTailer().doTailEdits();
 
       // StandbyNameNode should not finish tailing in-progress logs
-      assertNull(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
-              "/test", true));
+      assertNull(getFileInfo(cluster.getNameNode(1),
+              "/test", true, false, false));
 
       // Restarting the standby should not finalize any edits files
       // in the shared directory when it starts up!
@@ -132,8 +133,8 @@ public class TestStandbyInProgressTail {
       // the current log segment, and on the next roll, it would have to
       // either replay starting in the middle of the segment (not allowed)
       // or double-replay the edits (incorrect).
-      assertNull(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
-              "/test", true));
+      assertNull(getFileInfo(cluster.getNameNode(1),
+              "/test", true, false, false));
 
       cluster.getNameNode(0).getRpcServer().mkdirs("/test2",
               FsPermission.createImmutable((short) 0755), true);
@@ -145,10 +146,10 @@ public class TestStandbyInProgressTail {
 
       // NN1 should have both the edits that came before its restart,
       // and the edits that came after its restart.
-      assertNotNull(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
-              "/test", true));
-      assertNotNull(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
-              "/test2", true));
+      assertNotNull(getFileInfo(cluster.getNameNode(1),
+              "/test", true, false, false));
+      assertNotNull(getFileInfo(cluster.getNameNode(1),
+              "/test2", true, false, false));
     } finally {
       if (qjmhaCluster != null) {
         qjmhaCluster.shutdown();
@@ -182,8 +183,8 @@ public class TestStandbyInProgressTail {
 
     // After waiting for 5 seconds, StandbyNameNode should finish tailing
     // in-progress logs
-    assertNotNull(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
-            "/test", true));
+    assertNotNull(getFileInfo(cluster.getNameNode(1),
+            "/test", true, false, false));
 
     // Restarting the standby should not finalize any edits files
     // in the shared directory when it starts up!
@@ -194,8 +195,8 @@ public class TestStandbyInProgressTail {
     assertNoEditFiles(cluster.getNameDirs(1));
 
     // Because we're using in-progress tailer, this should not be null
-    assertNotNull(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
-            "/test", true));
+    assertNotNull(getFileInfo(cluster.getNameNode(1),
+            "/test", true, false, false));
 
     cluster.getNameNode(0).getRpcServer().mkdirs("/test2",
             FsPermission.createImmutable((short) 0755), true);
@@ -207,10 +208,10 @@ public class TestStandbyInProgressTail {
 
     // NN1 should have both the edits that came before its restart,
     // and the edits that came after its restart.
-    assertNotNull(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
-            "/test", true));
-    assertNotNull(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
-            "/test2", true));
+    assertNotNull(getFileInfo(cluster.getNameNode(1),
+            "/test", true, false, false));
+    assertNotNull(getFileInfo(cluster.getNameNode(1),
+            "/test2", true, false, false));
   }
 
   @Test
@@ -229,7 +230,7 @@ public class TestStandbyInProgressTail {
     nn1.getNamesystem().getEditLogTailer().doTailEdits();
 
     // StandbyNameNode should tail the in-progress edit
-    assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test", true));
+    assertNotNull(getFileInfo(nn1, "/test", true, false, false));
 
     // Create a new edit and finalized it
     cluster.getNameNode(0).getRpcServer().mkdirs("/test2",
@@ -237,7 +238,7 @@ public class TestStandbyInProgressTail {
     nn0.getRpcServer().rollEditLog();
 
     // StandbyNameNode shouldn't tail the edit since we do not call the method
-    assertNull(NameNodeAdapter.getFileInfo(nn1, "/test2", true));
+    assertNull(getFileInfo(nn1, "/test2", true, false, false));
 
     // Create a new in-progress edit and let SBNN do the tail
     cluster.getNameNode(0).getRpcServer().mkdirs("/test3",
@@ -245,9 +246,9 @@ public class TestStandbyInProgressTail {
     nn1.getNamesystem().getEditLogTailer().doTailEdits();
 
     // StandbyNameNode should tail the finalized edit and the new in-progress
-    assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test", true));
-    assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test2", true));
-    assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test3", true));
+    assertNotNull(getFileInfo(nn1, "/test", true, false, false));
+    assertNotNull(getFileInfo(nn1, "/test2", true, false, false));
+    assertNotNull(getFileInfo(nn1, "/test3", true, false, false));
   }
 
   @Test
@@ -270,16 +271,16 @@ public class TestStandbyInProgressTail {
     cluster.getNameNode(0).getRpcServer().mkdirs("/test3",
             FsPermission.createImmutable((short) 0755), true);
 
-    assertNull(NameNodeAdapter.getFileInfo(nn1, "/test", true));
-    assertNull(NameNodeAdapter.getFileInfo(nn1, "/test2", true));
-    assertNull(NameNodeAdapter.getFileInfo(nn1, "/test3", true));
+    assertNull(getFileInfo(nn1, "/test", true, false, false));
+    assertNull(getFileInfo(nn1, "/test2", true, false, false));
+    assertNull(getFileInfo(nn1, "/test3", true, false, false));
 
     nn1.getNamesystem().getEditLogTailer().doTailEdits();
 
     // StandbyNameNode shoudl tail the finalized edit and the new in-progress
-    assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test", true));
-    assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test2", true));
-    assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test3", true));
+    assertNotNull(getFileInfo(nn1, "/test", true, false, false));
+    assertNotNull(getFileInfo(nn1, "/test2", true, false, false));
+    assertNotNull(getFileInfo(nn1, "/test3", true, false, false));
   }
 
   @Test
@@ -296,17 +297,17 @@ public class TestStandbyInProgressTail {
             FsPermission.createImmutable((short) 0755), true);
     nn1.getNamesystem().getEditLogTailer().doTailEdits();
     nn0.getRpcServer().rollEditLog();
-    assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test", true));
-    assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test2", true));
+    assertNotNull(getFileInfo(nn1, "/test", true, false, false));
+    assertNotNull(getFileInfo(nn1, "/test2", true, false, false));
 
     cluster.getNameNode(0).getRpcServer().mkdirs("/test3",
             FsPermission.createImmutable((short) 0755), true);
     nn1.getNamesystem().getEditLogTailer().doTailEdits();
 
     // StandbyNameNode shoudl tail the finalized edit and the new in-progress
-    assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test", true));
-    assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test2", true));
-    assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test3", true));
+    assertNotNull(getFileInfo(nn1, "/test", true, false, false));
+    assertNotNull(getFileInfo(nn1, "/test2", true, false, false));
+    assertNotNull(getFileInfo(nn1, "/test3", true, false, false));
   }
 
   @Test
@@ -325,7 +326,7 @@ public class TestStandbyInProgressTail {
     cluster.getNameNode(0).getRpcServer().rollEdits();
 
     cluster.getNameNode(1).getNamesystem().getEditLogTailer().doTailEdits();
-    assertNotNull(NameNodeAdapter.getFileInfo(nn1, "/test", true));
+    assertNotNull(getFileInfo(nn1, "/test", true, false, false));
   }
 
   /**

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/contract/hdfs.xml

@@ -108,7 +108,7 @@
 
   <property>
     <name>fs.contract.supports-content-check</name>
-    <value>false</value>
+    <value>true</value>
   </property>
 
 </configuration>

BIN
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored


+ 378 - 158
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml

@@ -13,8 +13,8 @@
       <TXID>2</TXID>
       <DELEGATION_KEY>
         <KEY_ID>1</KEY_ID>
-        <EXPIRY_DATE>1512000829976</EXPIRY_DATE>
-        <KEY>e7457bcc6ab95a84</KEY>
+        <EXPIRY_DATE>1513298395825</EXPIRY_DATE>
+        <KEY>ddb3d2c37b57926a</KEY>
       </DELEGATION_KEY>
     </DATA>
   </RECORD>
@@ -24,8 +24,8 @@
       <TXID>3</TXID>
       <DELEGATION_KEY>
         <KEY_ID>2</KEY_ID>
-        <EXPIRY_DATE>1512000829980</EXPIRY_DATE>
-        <KEY>07cc38caf6c47bb4</KEY>
+        <EXPIRY_DATE>1513298395827</EXPIRY_DATE>
+        <KEY>57acfb80c8b539fa</KEY>
       </DELEGATION_KEY>
     </DATA>
   </RECORD>
@@ -36,11 +36,11 @@
       <LENGTH>0</LENGTH>
       <INODEID>16386</INODEID>
       <PATH>/file_create</PATH>
-      <REPLICATION>1</REPLICATION>
-      <MTIME>1511309632199</MTIME>
-      <ATIME>1511309632199</ATIME>
+      <REPLICATION>3</REPLICATION>
+      <MTIME>1512607197452</MTIME>
+      <ATIME>1512607197452</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_2134933941_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-923924783_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <OVERWRITE>true</OVERWRITE>
       <PERMISSION_STATUS>
@@ -48,8 +48,9 @@
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>5</RPC_CALLID>
+      <ERASURE_CODING_POLICY_ID>0</ERASURE_CODING_POLICY_ID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>35</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -59,9 +60,9 @@
       <LENGTH>0</LENGTH>
       <INODEID>0</INODEID>
       <PATH>/file_create</PATH>
-      <REPLICATION>1</REPLICATION>
-      <MTIME>1511309632248</MTIME>
-      <ATIME>1511309632199</ATIME>
+      <REPLICATION>3</REPLICATION>
+      <MTIME>1512607197500</MTIME>
+      <ATIME>1512607197452</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME/>
       <CLIENT_MACHINE/>
@@ -78,11 +79,11 @@
     <DATA>
       <TXID>6</TXID>
       <PATH>/file_create</PATH>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_2134933941_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-923924783_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <NEWBLOCK>false</NEWBLOCK>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>7</RPC_CALLID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>37</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -92,9 +93,9 @@
       <LENGTH>0</LENGTH>
       <INODEID>0</INODEID>
       <PATH>/file_create</PATH>
-      <REPLICATION>1</REPLICATION>
-      <MTIME>1511309632263</MTIME>
-      <ATIME>1511309632199</ATIME>
+      <REPLICATION>3</REPLICATION>
+      <MTIME>1512607197516</MTIME>
+      <ATIME>1512607197452</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME/>
       <CLIENT_MACHINE/>
@@ -114,10 +115,10 @@
       <INODEID>16387</INODEID>
       <PATH>/update_blocks</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1511309632266</MTIME>
-      <ATIME>1511309632266</ATIME>
+      <MTIME>1512607197519</MTIME>
+      <ATIME>1512607197519</ATIME>
       <BLOCKSIZE>4096</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_2134933941_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-923924783_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <OVERWRITE>true</OVERWRITE>
       <PERMISSION_STATUS>
@@ -125,8 +126,9 @@
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>9</RPC_CALLID>
+      <ERASURE_CODING_POLICY_ID>0</ERASURE_CODING_POLICY_ID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>39</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -188,8 +190,8 @@
       <INODEID>0</INODEID>
       <PATH>/update_blocks</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1511309632454</MTIME>
-      <ATIME>1511309632266</ATIME>
+      <MTIME>1512607197657</MTIME>
+      <ATIME>1512607197519</ATIME>
       <BLOCKSIZE>4096</BLOCKSIZE>
       <CLIENT_NAME/>
       <CLIENT_MACHINE/>
@@ -216,9 +218,9 @@
       <LENGTH>0</LENGTH>
       <SRC>/file_create</SRC>
       <DST>/file_moved</DST>
-      <TIMESTAMP>1511309632467</TIMESTAMP>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>17</RPC_CALLID>
+      <TIMESTAMP>1512607197671</TIMESTAMP>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>47</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -227,9 +229,9 @@
       <TXID>17</TXID>
       <LENGTH>0</LENGTH>
       <PATH>/file_moved</PATH>
-      <TIMESTAMP>1511309632480</TIMESTAMP>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>19</RPC_CALLID>
+      <TIMESTAMP>1512607197680</TIMESTAMP>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>49</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -239,7 +241,7 @@
       <LENGTH>0</LENGTH>
       <INODEID>16388</INODEID>
       <PATH>/directory_mkdir</PATH>
-      <TIMESTAMP>1511309632488</TIMESTAMP>
+      <TIMESTAMP>1512607197690</TIMESTAMP>
       <PERMISSION_STATUS>
         <USERNAME>lei</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
@@ -274,8 +276,8 @@
       <TXID>22</TXID>
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTNAME>snapshot1</SNAPSHOTNAME>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>24</RPC_CALLID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>54</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -285,8 +287,8 @@
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTOLDNAME>snapshot1</SNAPSHOTOLDNAME>
       <SNAPSHOTNEWNAME>snapshot2</SNAPSHOTNEWNAME>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>25</RPC_CALLID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>55</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -295,8 +297,8 @@
       <TXID>24</TXID>
       <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
       <SNAPSHOTNAME>snapshot2</SNAPSHOTNAME>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>26</RPC_CALLID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>56</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -306,11 +308,11 @@
       <LENGTH>0</LENGTH>
       <INODEID>16389</INODEID>
       <PATH>/file_create</PATH>
-      <REPLICATION>1</REPLICATION>
-      <MTIME>1511309632528</MTIME>
-      <ATIME>1511309632528</ATIME>
+      <REPLICATION>3</REPLICATION>
+      <MTIME>1512607197723</MTIME>
+      <ATIME>1512607197723</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_2134933941_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-923924783_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <OVERWRITE>true</OVERWRITE>
       <PERMISSION_STATUS>
@@ -318,8 +320,9 @@
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>27</RPC_CALLID>
+      <ERASURE_CODING_POLICY_ID>0</ERASURE_CODING_POLICY_ID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>57</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -329,9 +332,9 @@
       <LENGTH>0</LENGTH>
       <INODEID>0</INODEID>
       <PATH>/file_create</PATH>
-      <REPLICATION>1</REPLICATION>
-      <MTIME>1511309632530</MTIME>
-      <ATIME>1511309632528</ATIME>
+      <REPLICATION>3</REPLICATION>
+      <MTIME>1512607197726</MTIME>
+      <ATIME>1512607197723</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME/>
       <CLIENT_MACHINE/>
@@ -402,10 +405,10 @@
       <LENGTH>0</LENGTH>
       <SRC>/file_create</SRC>
       <DST>/file_moved</DST>
-      <TIMESTAMP>1511309632561</TIMESTAMP>
+      <TIMESTAMP>1512607197754</TIMESTAMP>
       <OPTIONS>TO_TRASH</OPTIONS>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>35</RPC_CALLID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>65</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -416,10 +419,10 @@
       <INODEID>16390</INODEID>
       <PATH>/file_concat_target</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1511309632566</MTIME>
-      <ATIME>1511309632566</ATIME>
+      <MTIME>1512607197759</MTIME>
+      <ATIME>1512607197759</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_2134933941_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-923924783_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <OVERWRITE>true</OVERWRITE>
       <PERMISSION_STATUS>
@@ -427,8 +430,9 @@
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>37</RPC_CALLID>
+      <ERASURE_CODING_POLICY_ID>0</ERASURE_CODING_POLICY_ID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>67</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -533,8 +537,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_concat_target</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1511309632593</MTIME>
-      <ATIME>1511309632566</ATIME>
+      <MTIME>1512607197800</MTIME>
+      <ATIME>1512607197759</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME/>
       <CLIENT_MACHINE/>
@@ -569,10 +573,10 @@
       <INODEID>16391</INODEID>
       <PATH>/file_concat_0</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1511309632596</MTIME>
-      <ATIME>1511309632596</ATIME>
+      <MTIME>1512607197803</MTIME>
+      <ATIME>1512607197803</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_2134933941_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-923924783_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <OVERWRITE>true</OVERWRITE>
       <PERMISSION_STATUS>
@@ -580,8 +584,9 @@
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>46</RPC_CALLID>
+      <ERASURE_CODING_POLICY_ID>0</ERASURE_CODING_POLICY_ID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>76</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -686,8 +691,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_concat_0</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1511309632618</MTIME>
-      <ATIME>1511309632596</ATIME>
+      <MTIME>1512607197837</MTIME>
+      <ATIME>1512607197803</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME/>
       <CLIENT_MACHINE/>
@@ -722,10 +727,10 @@
       <INODEID>16392</INODEID>
       <PATH>/file_concat_1</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1511309632620</MTIME>
-      <ATIME>1511309632620</ATIME>
+      <MTIME>1512607197839</MTIME>
+      <ATIME>1512607197839</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_2134933941_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-923924783_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <OVERWRITE>true</OVERWRITE>
       <PERMISSION_STATUS>
@@ -733,8 +738,9 @@
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>55</RPC_CALLID>
+      <ERASURE_CODING_POLICY_ID>0</ERASURE_CODING_POLICY_ID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>85</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -839,8 +845,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_concat_1</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1511309632643</MTIME>
-      <ATIME>1511309632620</ATIME>
+      <MTIME>1512607197878</MTIME>
+      <ATIME>1512607197839</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME/>
       <CLIENT_MACHINE/>
@@ -873,13 +879,13 @@
       <TXID>67</TXID>
       <LENGTH>0</LENGTH>
       <TRG>/file_concat_target</TRG>
-      <TIMESTAMP>1511309632648</TIMESTAMP>
+      <TIMESTAMP>1512607197882</TIMESTAMP>
       <SOURCES>
         <SOURCE1>/file_concat_0</SOURCE1>
         <SOURCE2>/file_concat_1</SOURCE2>
       </SOURCES>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>63</RPC_CALLID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>93</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -890,10 +896,10 @@
       <INODEID>16393</INODEID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1511309632651</MTIME>
-      <ATIME>1511309632651</ATIME>
+      <MTIME>1512607197885</MTIME>
+      <ATIME>1512607197885</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_2134933941_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-923924783_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <OVERWRITE>true</OVERWRITE>
       <PERMISSION_STATUS>
@@ -901,8 +907,9 @@
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>65</RPC_CALLID>
+      <ERASURE_CODING_POLICY_ID>0</ERASURE_CODING_POLICY_ID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>95</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -974,8 +981,8 @@
       <INODEID>0</INODEID>
       <PATH>/file_create</PATH>
       <REPLICATION>1</REPLICATION>
-      <MTIME>1511309632667</MTIME>
-      <ATIME>1511309632651</ATIME>
+      <MTIME>1512607197909</MTIME>
+      <ATIME>1512607197885</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME/>
       <CLIENT_MACHINE/>
@@ -1002,10 +1009,10 @@
     <DATA>
       <TXID>76</TXID>
       <SRC>/file_create</SRC>
-      <CLIENTNAME>DFSClient_NONMAPREDUCE_2134933941_1</CLIENTNAME>
+      <CLIENTNAME>DFSClient_NONMAPREDUCE_-923924783_1</CLIENTNAME>
       <CLIENTMACHINE>127.0.0.1</CLIENTMACHINE>
       <NEWLENGTH>512</NEWLENGTH>
-      <TIMESTAMP>1511309632671</TIMESTAMP>
+      <TIMESTAMP>1512607197912</TIMESTAMP>
     </DATA>
   </RECORD>
   <RECORD>
@@ -1016,15 +1023,15 @@
       <INODEID>16394</INODEID>
       <PATH>/file_symlink</PATH>
       <VALUE>/file_concat_target</VALUE>
-      <MTIME>1511309632686</MTIME>
-      <ATIME>1511309632686</ATIME>
+      <MTIME>1512607197921</MTIME>
+      <ATIME>1512607197921</ATIME>
       <PERMISSION_STATUS>
         <USERNAME>lei</USERNAME>
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>511</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>72</RPC_CALLID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>102</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -1034,11 +1041,11 @@
       <LENGTH>0</LENGTH>
       <INODEID>16395</INODEID>
       <PATH>/hard-lease-recovery-test</PATH>
-      <REPLICATION>1</REPLICATION>
-      <MTIME>1511309632689</MTIME>
-      <ATIME>1511309632689</ATIME>
+      <REPLICATION>3</REPLICATION>
+      <MTIME>1512607197925</MTIME>
+      <ATIME>1512607197925</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
-      <CLIENT_NAME>DFSClient_NONMAPREDUCE_2134933941_1</CLIENT_NAME>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-923924783_1</CLIENT_NAME>
       <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
       <OVERWRITE>true</OVERWRITE>
       <PERMISSION_STATUS>
@@ -1046,8 +1053,9 @@
         <GROUPNAME>supergroup</GROUPNAME>
         <MODE>420</MODE>
       </PERMISSION_STATUS>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>73</RPC_CALLID>
+      <ERASURE_CODING_POLICY_ID>0</ERASURE_CODING_POLICY_ID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>103</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
@@ -1103,21 +1111,30 @@
     <OPCODE>OP_REASSIGN_LEASE</OPCODE>
     <DATA>
       <TXID>84</TXID>
-      <LEASEHOLDER>DFSClient_NONMAPREDUCE_2134933941_1</LEASEHOLDER>
+      <LEASEHOLDER>DFSClient_NONMAPREDUCE_-923924783_1</LEASEHOLDER>
       <PATH>/hard-lease-recovery-test</PATH>
-      <NEWHOLDER>HDFS_NameNode-2017-11-21 16:13:54,700-0800</NEWHOLDER>
+      <NEWHOLDER>HDFS_NameNode-2017-12-06 16:39:59,951-0800</NEWHOLDER>
     </DATA>
   </RECORD>
   <RECORD>
-    <OPCODE>OP_CLOSE</OPCODE>
+    <OPCODE>OP_REASSIGN_LEASE</OPCODE>
     <DATA>
       <TXID>85</TXID>
+      <LEASEHOLDER>HDFS_NameNode-2017-12-06 16:39:59,951-0800</LEASEHOLDER>
+      <PATH>/hard-lease-recovery-test</PATH>
+      <NEWHOLDER>HDFS_NameNode-2017-12-06 16:40:01,959-0800</NEWHOLDER>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_CLOSE</OPCODE>
+    <DATA>
+      <TXID>86</TXID>
       <LENGTH>0</LENGTH>
       <INODEID>0</INODEID>
       <PATH>/hard-lease-recovery-test</PATH>
-      <REPLICATION>1</REPLICATION>
-      <MTIME>1511309634880</MTIME>
-      <ATIME>1511309632689</ATIME>
+      <REPLICATION>3</REPLICATION>
+      <MTIME>1512607202974</MTIME>
+      <ATIME>1512607197925</ATIME>
       <BLOCKSIZE>512</BLOCKSIZE>
       <CLIENT_NAME/>
       <CLIENT_MACHINE/>
@@ -1137,7 +1154,7 @@
   <RECORD>
     <OPCODE>OP_ADD_CACHE_POOL</OPCODE>
     <DATA>
-      <TXID>86</TXID>
+      <TXID>87</TXID>
       <POOLNAME>pool1</POOLNAME>
       <OWNERNAME>lei</OWNERNAME>
       <GROUPNAME>staff</GROUPNAME>
@@ -1145,65 +1162,65 @@
       <LIMIT>9223372036854775807</LIMIT>
       <MAXRELATIVEEXPIRY>2305843009213693951</MAXRELATIVEEXPIRY>
       <DEFAULTREPLICATION>1</DEFAULTREPLICATION>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>80</RPC_CALLID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>138</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_MODIFY_CACHE_POOL</OPCODE>
     <DATA>
-      <TXID>87</TXID>
+      <TXID>88</TXID>
       <POOLNAME>pool1</POOLNAME>
       <LIMIT>99</LIMIT>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>81</RPC_CALLID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>139</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ADD_CACHE_DIRECTIVE</OPCODE>
     <DATA>
-      <TXID>88</TXID>
+      <TXID>89</TXID>
       <ID>1</ID>
       <PATH>/path</PATH>
       <REPLICATION>1</REPLICATION>
       <POOL>pool1</POOL>
-      <EXPIRATION>2305844520523329692</EXPIRATION>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>82</RPC_CALLID>
+      <EXPIRATION>2305844521820897941</EXPIRATION>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>140</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_MODIFY_CACHE_DIRECTIVE</OPCODE>
     <DATA>
-      <TXID>89</TXID>
+      <TXID>90</TXID>
       <ID>1</ID>
       <REPLICATION>2</REPLICATION>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>83</RPC_CALLID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>141</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_REMOVE_CACHE_DIRECTIVE</OPCODE>
     <DATA>
-      <TXID>90</TXID>
+      <TXID>91</TXID>
       <ID>1</ID>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>84</RPC_CALLID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>142</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_REMOVE_CACHE_POOL</OPCODE>
     <DATA>
-      <TXID>91</TXID>
+      <TXID>92</TXID>
       <POOLNAME>pool1</POOLNAME>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>85</RPC_CALLID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>143</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_ACL</OPCODE>
     <DATA>
-      <TXID>92</TXID>
+      <TXID>93</TXID>
       <SRC>/file_concat_target</SRC>
       <ENTRY>
         <SCOPE>ACCESS</SCOPE>
@@ -1236,61 +1253,61 @@
   <RECORD>
     <OPCODE>OP_SET_XATTR</OPCODE>
     <DATA>
-      <TXID>93</TXID>
+      <TXID>94</TXID>
       <SRC>/file_concat_target</SRC>
       <XATTR>
         <NAMESPACE>USER</NAMESPACE>
         <NAME>a1</NAME>
         <VALUE>0x313233</VALUE>
       </XATTR>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>87</RPC_CALLID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>145</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_SET_XATTR</OPCODE>
     <DATA>
-      <TXID>94</TXID>
+      <TXID>95</TXID>
       <SRC>/file_concat_target</SRC>
       <XATTR>
         <NAMESPACE>USER</NAMESPACE>
         <NAME>a2</NAME>
         <VALUE>0x373839</VALUE>
       </XATTR>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>88</RPC_CALLID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>146</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_REMOVE_XATTR</OPCODE>
     <DATA>
-      <TXID>95</TXID>
+      <TXID>96</TXID>
       <SRC>/file_concat_target</SRC>
       <XATTR>
         <NAMESPACE>USER</NAMESPACE>
         <NAME>a2</NAME>
       </XATTR>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>89</RPC_CALLID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>147</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ADD_ERASURE_CODING_POLICY</OPCODE>
     <DATA>
-      <TXID>96</TXID>
+      <TXID>97</TXID>
       <CODEC>rs</CODEC>
       <DATAUNITS>3</DATAUNITS>
       <PARITYUNITS>2</PARITYUNITS>
       <CELLSIZE>8192</CELLSIZE>
       <EXTRAOPTIONS>0</EXTRAOPTIONS>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>90</RPC_CALLID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>148</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ADD_ERASURE_CODING_POLICY</OPCODE>
     <DATA>
-      <TXID>97</TXID>
+      <TXID>98</TXID>
       <CODEC>rs</CODEC>
       <DATAUNITS>6</DATAUNITS>
       <PARITYUNITS>10</PARITYUNITS>
@@ -1300,82 +1317,285 @@
         <KEY>dummyKey</KEY>
         <VALUE>dummyValue</VALUE>
       </EXTRAOPTION>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>91</RPC_CALLID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>149</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ENABLE_ERASURE_CODING_POLICY</OPCODE>
     <DATA>
-      <TXID>98</TXID>
+      <TXID>99</TXID>
       <POLICYNAME>RS-3-2-8k</POLICYNAME>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>92</RPC_CALLID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>150</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ENABLE_ERASURE_CODING_POLICY</OPCODE>
     <DATA>
-      <TXID>99</TXID>
+      <TXID>100</TXID>
       <POLICYNAME>RS-6-10-4k</POLICYNAME>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>93</RPC_CALLID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>151</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_DISABLE_ERASURE_CODING_POLICY</OPCODE>
     <DATA>
-      <TXID>100</TXID>
+      <TXID>101</TXID>
       <POLICYNAME>RS-3-2-8k</POLICYNAME>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>94</RPC_CALLID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>152</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_DISABLE_ERASURE_CODING_POLICY</OPCODE>
     <DATA>
-      <TXID>101</TXID>
+      <TXID>102</TXID>
       <POLICYNAME>RS-6-10-4k</POLICYNAME>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>95</RPC_CALLID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>153</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_REMOVE_ERASURE_CODING_POLICY</OPCODE>
     <DATA>
-      <TXID>102</TXID>
+      <TXID>103</TXID>
       <POLICYNAME>RS-3-2-8k</POLICYNAME>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>96</RPC_CALLID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>154</RPC_CALLID>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_REMOVE_ERASURE_CODING_POLICY</OPCODE>
     <DATA>
-      <TXID>103</TXID>
+      <TXID>104</TXID>
       <POLICYNAME>RS-6-10-4k</POLICYNAME>
-      <RPC_CLIENTID>a4dc081c-6d6f-42d6-af5b-d260228f1aad</RPC_CLIENTID>
-      <RPC_CALLID>97</RPC_CALLID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>155</RPC_CALLID>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_MKDIR</OPCODE>
+    <DATA>
+      <TXID>105</TXID>
+      <LENGTH>0</LENGTH>
+      <INODEID>16396</INODEID>
+      <PATH>/ec</PATH>
+      <TIMESTAMP>1512607204077</TIMESTAMP>
+      <PERMISSION_STATUS>
+        <USERNAME>lei</USERNAME>
+        <GROUPNAME>supergroup</GROUPNAME>
+        <MODE>493</MODE>
+      </PERMISSION_STATUS>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_ENABLE_ERASURE_CODING_POLICY</OPCODE>
+    <DATA>
+      <TXID>106</TXID>
+      <POLICYNAME>RS-3-2-1024k</POLICYNAME>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>157</RPC_CALLID>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_ENABLE_ERASURE_CODING_POLICY</OPCODE>
+    <DATA>
+      <TXID>107</TXID>
+      <POLICYNAME>RS-6-3-1024k</POLICYNAME>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>158</RPC_CALLID>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_SET_XATTR</OPCODE>
+    <DATA>
+      <TXID>108</TXID>
+      <SRC>/ec</SRC>
+      <XATTR>
+        <NAMESPACE>SYSTEM</NAMESPACE>
+        <NAME>hdfs.erasurecoding.policy</NAME>
+        <VALUE>0x0000000c52532d362d332d313032346b</VALUE>
+      </XATTR>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>159</RPC_CALLID>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_ADD</OPCODE>
+    <DATA>
+      <TXID>109</TXID>
+      <LENGTH>0</LENGTH>
+      <INODEID>16397</INODEID>
+      <PATH>/ec/replicated</PATH>
+      <REPLICATION>3</REPLICATION>
+      <MTIME>1512607204088</MTIME>
+      <ATIME>1512607204088</ATIME>
+      <BLOCKSIZE>512</BLOCKSIZE>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-923924783_1</CLIENT_NAME>
+      <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
+      <OVERWRITE>true</OVERWRITE>
+      <PERMISSION_STATUS>
+        <USERNAME>lei</USERNAME>
+        <GROUPNAME>supergroup</GROUPNAME>
+        <MODE>420</MODE>
+      </PERMISSION_STATUS>
+      <ERASURE_CODING_POLICY_ID>0</ERASURE_CODING_POLICY_ID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>160</RPC_CALLID>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_ALLOCATE_BLOCK_ID</OPCODE>
+    <DATA>
+      <TXID>110</TXID>
+      <BLOCK_ID>1073741838</BLOCK_ID>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
+    <DATA>
+      <TXID>111</TXID>
+      <GENSTAMPV2>1015</GENSTAMPV2>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_ADD_BLOCK</OPCODE>
+    <DATA>
+      <TXID>112</TXID>
+      <PATH>/ec/replicated</PATH>
+      <BLOCK>
+        <BLOCK_ID>1073741838</BLOCK_ID>
+        <NUM_BYTES>0</NUM_BYTES>
+        <GENSTAMP>1015</GENSTAMP>
+      </BLOCK>
+      <RPC_CLIENTID/>
+      <RPC_CALLID>-2</RPC_CALLID>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_CLOSE</OPCODE>
+    <DATA>
+      <TXID>113</TXID>
+      <LENGTH>0</LENGTH>
+      <INODEID>0</INODEID>
+      <PATH>/ec/replicated</PATH>
+      <REPLICATION>3</REPLICATION>
+      <MTIME>1512607204118</MTIME>
+      <ATIME>1512607204088</ATIME>
+      <BLOCKSIZE>512</BLOCKSIZE>
+      <CLIENT_NAME/>
+      <CLIENT_MACHINE/>
+      <OVERWRITE>false</OVERWRITE>
+      <BLOCK>
+        <BLOCK_ID>1073741838</BLOCK_ID>
+        <NUM_BYTES>10</NUM_BYTES>
+        <GENSTAMP>1015</GENSTAMP>
+      </BLOCK>
+      <PERMISSION_STATUS>
+        <USERNAME>lei</USERNAME>
+        <GROUPNAME>supergroup</GROUPNAME>
+        <MODE>420</MODE>
+      </PERMISSION_STATUS>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_ADD</OPCODE>
+    <DATA>
+      <TXID>114</TXID>
+      <LENGTH>0</LENGTH>
+      <INODEID>16398</INODEID>
+      <PATH>/ec/RS-3-2</PATH>
+      <REPLICATION>1</REPLICATION>
+      <MTIME>1512607204120</MTIME>
+      <ATIME>1512607204120</ATIME>
+      <BLOCKSIZE>512</BLOCKSIZE>
+      <CLIENT_NAME>DFSClient_NONMAPREDUCE_-923924783_1</CLIENT_NAME>
+      <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
+      <OVERWRITE>true</OVERWRITE>
+      <PERMISSION_STATUS>
+        <USERNAME>lei</USERNAME>
+        <GROUPNAME>supergroup</GROUPNAME>
+        <MODE>420</MODE>
+      </PERMISSION_STATUS>
+      <ERASURE_CODING_POLICY_ID>2</ERASURE_CODING_POLICY_ID>
+      <RPC_CLIENTID>cab1aa2d-e08a-4d2f-8216-76e167eccd94</RPC_CLIENTID>
+      <RPC_CALLID>166</RPC_CALLID>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_ALLOCATE_BLOCK_ID</OPCODE>
+    <DATA>
+      <TXID>115</TXID>
+      <BLOCK_ID>-9223372036854775792</BLOCK_ID>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_SET_GENSTAMP_V2</OPCODE>
+    <DATA>
+      <TXID>116</TXID>
+      <GENSTAMPV2>1016</GENSTAMPV2>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_ADD_BLOCK</OPCODE>
+    <DATA>
+      <TXID>117</TXID>
+      <PATH>/ec/RS-3-2</PATH>
+      <BLOCK>
+        <BLOCK_ID>-9223372036854775792</BLOCK_ID>
+        <NUM_BYTES>0</NUM_BYTES>
+        <GENSTAMP>1016</GENSTAMP>
+      </BLOCK>
+      <RPC_CLIENTID/>
+      <RPC_CALLID>-2</RPC_CALLID>
+    </DATA>
+  </RECORD>
+  <RECORD>
+    <OPCODE>OP_CLOSE</OPCODE>
+    <DATA>
+      <TXID>118</TXID>
+      <LENGTH>0</LENGTH>
+      <INODEID>0</INODEID>
+      <PATH>/ec/RS-3-2</PATH>
+      <REPLICATION>1</REPLICATION>
+      <MTIME>1512607204229</MTIME>
+      <ATIME>1512607204120</ATIME>
+      <BLOCKSIZE>512</BLOCKSIZE>
+      <CLIENT_NAME/>
+      <CLIENT_MACHINE/>
+      <OVERWRITE>false</OVERWRITE>
+      <BLOCK>
+        <BLOCK_ID>-9223372036854775792</BLOCK_ID>
+        <NUM_BYTES>6</NUM_BYTES>
+        <GENSTAMP>1016</GENSTAMP>
+      </BLOCK>
+      <PERMISSION_STATUS>
+        <USERNAME>lei</USERNAME>
+        <GROUPNAME>supergroup</GROUPNAME>
+        <MODE>420</MODE>
+      </PERMISSION_STATUS>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ROLLING_UPGRADE_START</OPCODE>
     <DATA>
-      <TXID>104</TXID>
-      <STARTTIME>1511309635904</STARTTIME>
+      <TXID>119</TXID>
+      <STARTTIME>1512607204230</STARTTIME>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_ROLLING_UPGRADE_FINALIZE</OPCODE>
     <DATA>
-      <TXID>105</TXID>
-      <FINALIZETIME>1511309635904</FINALIZETIME>
+      <TXID>120</TXID>
+      <FINALIZETIME>1512607204233</FINALIZETIME>
     </DATA>
   </RECORD>
   <RECORD>
     <OPCODE>OP_END_LOG_SEGMENT</OPCODE>
     <DATA>
-      <TXID>106</TXID>
+      <TXID>121</TXID>
     </DATA>
   </RECORD>
 </EDITS>

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientCache.java

@@ -23,8 +23,6 @@ import java.security.PrivilegedAction;
 import java.util.HashMap;
 import java.util.Map;
 import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.mapreduce.JobID;
@@ -35,13 +33,15 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class ClientCache {
 
   private final Configuration conf;
   private final ResourceMgrDelegate rm;
 
-  private static final Log LOG = LogFactory.getLog(ClientCache.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ClientCache.class);
 
   private Map<JobID, ClientServiceDelegate> cache = 
       new HashMap<JobID, ClientServiceDelegate>();

+ 4 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java

@@ -29,8 +29,6 @@ import java.util.List;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.ipc.RPC;
@@ -79,11 +77,14 @@ import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier;
 import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 
 public class ClientServiceDelegate {
-  private static final Log LOG = LogFactory.getLog(ClientServiceDelegate.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ClientServiceDelegate.class);
   private static final String UNAVAILABLE = "N/A";
 
   // Caches for per-user NotRunningJobs

+ 4 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java

@@ -25,8 +25,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -78,11 +76,14 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 
 public class ResourceMgrDelegate extends YarnClient {
-  private static final Log LOG = LogFactory.getLog(ResourceMgrDelegate.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ResourceMgrDelegate.class);
       
   private YarnConfiguration conf;
   private ApplicationSubmissionContext application;

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java

@@ -36,8 +36,6 @@ import java.util.Vector;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileContext;
@@ -99,6 +97,8 @@ import org.apache.hadoop.yarn.security.client.RMDelegationTokenSelector;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.util.UnitsConversionUtil;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
 
@@ -108,7 +108,7 @@ import com.google.common.annotations.VisibleForTesting;
 @SuppressWarnings("unchecked")
 public class YARNRunner implements ClientProtocol {
 
-  private static final Log LOG = LogFactory.getLog(YARNRunner.class);
+  private static final Logger LOG = LoggerFactory.getLogger(YARNRunner.class);
 
   private static final String RACK_GROUP = "rack";
   private static final String NODE_IF_RACK_GROUP = "node1";

+ 4 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fi/ProbabilityModel.java

@@ -19,9 +19,9 @@ package org.apache.hadoop.fi;
 
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class is responsible for the decision of when a fault 
@@ -42,7 +42,8 @@ import org.apache.hadoop.conf.Configuration;
  */
 public class ProbabilityModel {
   private static Random generator = new Random();
-  private static final Log LOG = LogFactory.getLog(ProbabilityModel.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ProbabilityModel.class);
 
   static final String FPROB_NAME = "fi.";
   private static final String ALL_PROBABILITIES = FPROB_NAME + "*";

+ 5 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/AccumulatingReducer.java

@@ -20,10 +20,10 @@ package org.apache.hadoop.fs;
 import java.io.IOException;
 import java.util.Iterator;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.*;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Reducer that accumulates values based on their type.
@@ -47,7 +47,9 @@ public class AccumulatingReducer extends MapReduceBase
   static final String VALUE_TYPE_LONG = "l:";
   static final String VALUE_TYPE_FLOAT = "f:";
   static final String VALUE_TYPE_STRING = "s:";
-  private static final Log LOG = LogFactory.getLog(AccumulatingReducer.class);
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(AccumulatingReducer.class);
   
   protected String hostName;
   

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/DFSCIOTest.java

@@ -28,8 +28,6 @@ import java.io.PrintStream;
 import java.util.Date;
 import java.util.StringTokenizer;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.SequenceFile;
@@ -38,6 +36,8 @@ import org.apache.hadoop.io.SequenceFile.CompressionType;
 import org.apache.hadoop.mapred.*;
 import org.junit.Ignore;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
  /**
  * Distributed i/o benchmark.
@@ -69,7 +69,7 @@ import org.junit.Test;
 @Ignore
 public class DFSCIOTest {
   // Constants
-  private static final Log LOG = LogFactory.getLog(DFSCIOTest.class);
+  private static final Logger LOG = LoggerFactory.getLogger(DFSCIOTest.class);
   private static final int TEST_TYPE_READ = 0;
   private static final int TEST_TYPE_WRITE = 1;
   private static final int TEST_TYPE_CLEANUP = 2;

+ 4 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/DistributedFSCheck.java

@@ -33,8 +33,6 @@ import java.util.Vector;
 
 import junit.framework.TestCase;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.SequenceFile;
@@ -42,6 +40,8 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.SequenceFile.CompressionType;
 import org.apache.hadoop.mapred.*;
 import org.junit.Ignore;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Distributed checkup of the file system consistency.
@@ -56,7 +56,8 @@ import org.junit.Ignore;
 @Ignore
 public class DistributedFSCheck extends TestCase {
   // Constants
-  private static final Log LOG = LogFactory.getLog(DistributedFSCheck.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(DistributedFSCheck.class);
   private static final int TEST_TYPE_READ = 0;
   private static final int TEST_TYPE_CLEANUP = 2;
   private static final int DEFAULT_BUFFER_SIZE = 1000000;

+ 4 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/JHLogAnalyzer.java

@@ -34,8 +34,6 @@ import java.util.Map;
 import java.util.StringTokenizer;
 import java.util.HashMap;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.SequenceFile;
@@ -46,6 +44,8 @@ import org.apache.hadoop.io.compress.GzipCodec;
 import org.apache.hadoop.mapred.*;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Job History Log Analyzer.
@@ -144,7 +144,8 @@ import org.apache.hadoop.util.StringUtils;
  */
 @SuppressWarnings("deprecation")
 public class JHLogAnalyzer {
-  private static final Log LOG = LogFactory.getLog(JHLogAnalyzer.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(JHLogAnalyzer.class);
   // Constants
   private static final String JHLA_ROOT_DIR = 
                             System.getProperty("test.build.data", "stats/JHLA");

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java

@@ -33,8 +33,6 @@ import java.util.Collection;
 import java.util.Date;
 import java.util.Random;
 import java.util.StringTokenizer;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -62,6 +60,8 @@ import org.apache.hadoop.util.ToolRunner;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Distributed i/o benchmark.
@@ -92,7 +92,7 @@ import org.junit.Test;
  */
 public class TestDFSIO implements Tool {
   // Constants
-  private static final Log LOG = LogFactory.getLog(TestDFSIO.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TestDFSIO.class);
   private static final int DEFAULT_BUFFER_SIZE = 1000000;
   private static final String BASE_FILE_NAME = "test_io_";
   private static final String DEFAULT_RES_FILE_NAME = "TestDFSIO_results.log";

+ 4 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestJHLA.java

@@ -23,11 +23,11 @@ import java.io.FileOutputStream;
 import java.io.OutputStreamWriter;
 import java.io.File;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Test Job History Log Analyzer.
@@ -35,7 +35,8 @@ import org.junit.Test;
  * @see JHLogAnalyzer
  */
 public class TestJHLA {
-  private static final Log LOG = LogFactory.getLog(JHLogAnalyzer.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(JHLogAnalyzer.class);
   private String historyLog = System.getProperty("test.build.data", 
                                   "build/test/data") + "/history/test.log";
 

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGeneratorMR.java

@@ -26,8 +26,6 @@ import java.net.UnknownHostException;
 import java.util.EnumSet;
 import java.util.Iterator;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.CreateFlag;
@@ -50,6 +48,8 @@ import org.apache.hadoop.mapred.Reducer;
 import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.mapred.TextOutputFormat;
 import org.apache.hadoop.util.ToolRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /** The load generator is a tool for testing NameNode behavior under
  * different client loads.
@@ -63,7 +63,7 @@ import org.apache.hadoop.util.ToolRunner;
  *
  */
 public class LoadGeneratorMR extends LoadGenerator {
-  public static final Log LOG = LogFactory.getLog(LoadGenerator.class);
+  public static final Logger LOG = LoggerFactory.getLogger(LoadGenerator.class);
   private static int numMapTasks = 1;
   private String mrOutDir;
   

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/AppendOp.java

@@ -24,12 +24,12 @@ import java.io.OutputStream;
 import java.util.List;
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.slive.DataWriter.GenerateOutput;
 import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Operation which selects a random file and appends a random amount of bytes
@@ -41,7 +41,7 @@ import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
  */
 class AppendOp extends Operation {
 
-  private static final Log LOG = LogFactory.getLog(AppendOp.class);
+  private static final Logger LOG = LoggerFactory.getLogger(AppendOp.class);
 
   AppendOp(ConfigExtractor cfg, Random rnd) {
     super(AppendOp.class.getSimpleName(), cfg, rnd);

+ 4 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ConfigExtractor.java

@@ -22,12 +22,12 @@ import java.text.NumberFormat;
 import java.util.HashMap;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.slive.Constants.OperationType;
 import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Simple access layer onto of a configuration object that extracts the slive
@@ -35,7 +35,8 @@ import org.apache.hadoop.util.StringUtils;
  */
 class ConfigExtractor {
 
-  private static final Log LOG = LogFactory.getLog(ConfigExtractor.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ConfigExtractor.class);
 
   private Configuration config;
 

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/CreateOp.java

@@ -22,13 +22,13 @@ import java.io.IOException;
 import java.util.List;
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.slive.DataWriter.GenerateOutput;
 import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Operation which selects a random file and a random number of bytes to create
@@ -42,7 +42,7 @@ import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
  */
 class CreateOp extends Operation {
 
-  private static final Log LOG = LogFactory.getLog(CreateOp.class);
+  private static final Logger LOG = LoggerFactory.getLogger(CreateOp.class);
 
   private static int DEF_IO_BUFFER_SIZE = 4096;
 

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/DeleteOp.java

@@ -23,11 +23,11 @@ import java.io.IOException;
 import java.util.List;
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Operation which selects a random file and attempts to delete that file (if it
@@ -39,7 +39,7 @@ import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
  */
 class DeleteOp extends Operation {
 
-  private static final Log LOG = LogFactory.getLog(DeleteOp.class);
+  private static final Logger LOG = LoggerFactory.getLogger(DeleteOp.class);
 
   DeleteOp(ConfigExtractor cfg, Random rnd) {
     super(DeleteOp.class.getSimpleName(), cfg, rnd);

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ListOp.java

@@ -23,12 +23,12 @@ import java.io.IOException;
 import java.util.List;
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Operation which selects a random directory and attempts to list that
@@ -41,7 +41,7 @@ import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
  */
 class ListOp extends Operation {
 
-  private static final Log LOG = LogFactory.getLog(ListOp.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ListOp.class);
 
   ListOp(ConfigExtractor cfg, Random rnd) {
     super(ListOp.class.getSimpleName(), cfg, rnd);

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/MkdirOp.java

@@ -23,11 +23,11 @@ import java.io.IOException;
 import java.util.List;
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Operation which selects a random directory and attempts to create that
@@ -40,7 +40,7 @@ import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
  */
 class MkdirOp extends Operation {
 
-  private static final Log LOG = LogFactory.getLog(MkdirOp.class);
+  private static final Logger LOG = LoggerFactory.getLogger(MkdirOp.class);
 
   MkdirOp(ConfigExtractor cfg, Random rnd) {
     super(MkdirOp.class.getSimpleName(), cfg, rnd);

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ReadOp.java

@@ -24,12 +24,12 @@ import java.io.IOException;
 import java.util.List;
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.slive.DataVerifier.VerifyOutput;
 import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Operation which selects a random file and selects a random read size (from
@@ -43,7 +43,7 @@ import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
  * number of failures and the amount of time taken to fail
  */
 class ReadOp extends Operation {
-  private static final Log LOG = LogFactory.getLog(ReadOp.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ReadOp.class);
 
   ReadOp(ConfigExtractor cfg, Random rnd) {
     super(ReadOp.class.getSimpleName(), cfg, rnd);

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/RenameOp.java

@@ -23,11 +23,11 @@ import java.io.IOException;
 import java.util.List;
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Operation which selects a random file and a second random file and attempts
@@ -60,7 +60,7 @@ class RenameOp extends Operation {
     }
   }
 
-  private static final Log LOG = LogFactory.getLog(RenameOp.class);
+  private static final Logger LOG = LoggerFactory.getLogger(RenameOp.class);
 
   RenameOp(ConfigExtractor cfg, Random rnd) {
     super(RenameOp.class.getSimpleName(), cfg, rnd);

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/ReportWriter.java

@@ -24,8 +24,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Class which provides a report for the given operation output
@@ -48,7 +48,7 @@ class ReportWriter {
   static final String NOT_FOUND = "files_not_found";
   static final String BAD_FILES = "bad_files";
 
-  private static final Log LOG = LogFactory.getLog(ReportWriter.class);
+  private static final Logger LOG = LoggerFactory.getLogger(ReportWriter.class);
 
   private static final String SECTION_DELIM = "-------------";
 

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/SleepOp.java

@@ -21,10 +21,10 @@ package org.apache.hadoop.fs.slive;
 import java.util.List;
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Operation which sleeps for a given number of milliseconds according to the
@@ -32,7 +32,7 @@ import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
  */
 class SleepOp extends Operation {
 
-  private static final Log LOG = LogFactory.getLog(SleepOp.class);
+  private static final Logger LOG = LoggerFactory.getLogger(SleepOp.class);
 
   SleepOp(ConfigExtractor cfg, Random rnd) {
     super(SleepOp.class.getSimpleName(), cfg, rnd);

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/SliveMapper.java

@@ -22,8 +22,6 @@ import java.io.IOException;
 import java.util.List;
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
 import org.apache.hadoop.io.Text;
@@ -35,6 +33,8 @@ import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The slive class which sets up the mapper to be used which itself will receive
@@ -45,7 +45,7 @@ import org.apache.hadoop.util.StringUtils;
 public class SliveMapper extends MapReduceBase implements
     Mapper<Object, Object, Text, Text> {
 
-  private static final Log LOG = LogFactory.getLog(SliveMapper.class);
+  private static final Logger LOG = LoggerFactory.getLogger(SliveMapper.class);
 
   private static final String OP_TYPE = SliveMapper.class.getSimpleName();
 

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/SliveReducer.java

@@ -21,8 +21,6 @@ package org.apache.hadoop.fs.slive;
 import java.io.IOException;
 import java.util.Iterator;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.MapReduceBase;
@@ -30,6 +28,8 @@ import org.apache.hadoop.mapred.OutputCollector;
 import org.apache.hadoop.mapred.Reducer;
 import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * The slive reducer which iterates over the given input values and merges them
@@ -38,7 +38,7 @@ import org.apache.hadoop.util.StringUtils;
 public class SliveReducer extends MapReduceBase implements
     Reducer<Text, Text, Text, Text> {
 
-  private static final Log LOG = LogFactory.getLog(SliveReducer.class);
+  private static final Logger LOG = LoggerFactory.getLogger(SliveReducer.class);
 
   private ConfigExtractor config;
 

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/SliveTest.java

@@ -30,8 +30,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.TreeMap;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -45,6 +43,8 @@ import org.apache.hadoop.mapred.TextOutputFormat;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Slive test entry point + main program
@@ -61,7 +61,7 @@ import org.apache.hadoop.util.ToolRunner;
  */
 public class SliveTest implements Tool {
 
-  private static final Log LOG = LogFactory.getLog(SliveTest.class);
+  private static final Logger LOG = LoggerFactory.getLogger(SliveTest.class);
 
   // ensures the hdfs configurations are loaded if they exist
   static {

+ 5 - 5
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/TestSlive.java

@@ -31,8 +31,6 @@ import java.util.List;
 import java.util.Random;
 import java.util.Set;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -43,13 +41,15 @@ import org.apache.hadoop.fs.slive.DataWriter.GenerateOutput;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.Before;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Junit 4 test for slive
  */
 public class TestSlive {
 
-  private static final Log LOG = LogFactory.getLog(TestSlive.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TestSlive.class);
 
   private static final Random rnd = new Random(1L);
 
@@ -258,13 +258,13 @@ public class TestSlive {
     DataWriter writer = new DataWriter(rnd);
     FileOutputStream fs = new FileOutputStream(fn);
     GenerateOutput ostat = writer.writeSegment(byteAm, fs);
-    LOG.info(ostat);
+    LOG.info(ostat.toString());
     fs.close();
     assertTrue(ostat.getBytesWritten() == byteAm);
     DataVerifier vf = new DataVerifier();
     FileInputStream fin = new FileInputStream(fn);
     VerifyOutput vfout = vf.verifyFile(byteAm, new DataInputStream(fin));
-    LOG.info(vfout);
+    LOG.info(vfout.toString());
     fin.close();
     assertEquals(vfout.getBytesRead(), byteAm);
     assertTrue(vfout.getChunksDifferent() == 0);

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/TruncateOp.java

@@ -22,12 +22,12 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.util.List;
 import java.util.Random;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Operation which selects a random file and truncates a random amount of bytes
@@ -40,7 +40,7 @@ import org.apache.hadoop.fs.slive.OperationOutput.OutputType;
  */
 class TruncateOp extends Operation {
 
-  private static final Log LOG = LogFactory.getLog(TruncateOp.class);
+  private static final Logger LOG = LoggerFactory.getLogger(TruncateOp.class);
 
   TruncateOp(ConfigExtractor cfg, Random rnd) {
     super(TruncateOp.class.getSimpleName(), cfg, rnd);

+ 4 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/slive/WeightSelector.java

@@ -26,12 +26,12 @@ import java.util.Map;
 import java.util.Random;
 import java.util.TreeMap;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.slive.Constants.Distribution;
 import org.apache.hadoop.fs.slive.Constants.OperationType;
 import org.apache.hadoop.fs.slive.Weights.UniformWeight;
 import org.apache.hadoop.fs.slive.ObserveableOp.Observer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class is the main handler that selects operations to run using the
@@ -47,7 +47,8 @@ class WeightSelector {
     Double weight(int elapsed, int duration);
   }
 
-  private static final Log LOG = LogFactory.getLog(WeightSelector.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(WeightSelector.class);
 
   private static class OperationInfo {
     Integer amountLeft;

+ 3 - 4
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBench.java

@@ -30,8 +30,6 @@ import java.util.Date;
 import java.util.Iterator;
 import java.util.StringTokenizer;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
@@ -57,6 +55,8 @@ import org.apache.hadoop.mapred.Reporter;
 import org.apache.hadoop.mapred.SequenceFileInputFormat;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This program executes a specified operation that applies load to 
@@ -78,8 +78,7 @@ import org.apache.hadoop.util.ToolRunner;
  */
 
 public class NNBench extends Configured implements Tool {
-  private static final Log LOG = LogFactory.getLog(
-          "org.apache.hadoop.hdfs.NNBench");
+  private static final Logger LOG = LoggerFactory.getLogger(NNBench.class);
   
   private static String CONTROL_DIR_NAME = "control";
   private static String OUTPUT_DIR_NAME = "output";

+ 4 - 4
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/hdfs/NNBenchWithoutMR.java

@@ -21,8 +21,6 @@ package org.apache.hadoop.hdfs;
 import java.io.IOException;
 import java.util.Date;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -30,6 +28,8 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.mapred.JobConf;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This program executes a specified operation that applies load to 
@@ -45,8 +45,8 @@ import org.apache.hadoop.mapred.JobConf;
  */
 public class NNBenchWithoutMR {
   
-  private static final Log LOG = LogFactory.getLog(
-                                            "org.apache.hadoop.hdfs.NNBench");
+  private static final Logger LOG =
+      LoggerFactory.getLogger(NNBenchWithoutMR.class);
   
   // variable initialzed from command line arguments
   private static long startTime = 0;

+ 4 - 4
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/BigMapOutput.java

@@ -22,8 +22,6 @@ import java.io.IOException;
 import java.util.Date;
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
@@ -37,10 +35,12 @@ import org.apache.hadoop.mapred.lib.IdentityMapper;
 import org.apache.hadoop.mapred.lib.IdentityReducer;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class BigMapOutput extends Configured implements Tool {
-  public static final Log LOG =
-    LogFactory.getLog(BigMapOutput.class.getName());
+  public static final Logger LOG =
+      LoggerFactory.getLogger(BigMapOutput.class);
   private static Random random = new Random();
   public static String MIN_KEY = "mapreduce.bmo.minkey";
   public static String MIN_VALUE = "mapreduce.bmo.minvalue";

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MRBench.java

@@ -24,8 +24,6 @@ import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -33,13 +31,15 @@ import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Runs a job multiple times and takes average of all runs.
  */
 public class MRBench extends Configured implements Tool{
   
-  private static final Log LOG = LogFactory.getLog(MRBench.class);
+  private static final Logger LOG = LoggerFactory.getLogger(MRBench.class);
   private static final String DEFAULT_INPUT_SUB = "mr_input";
   private static final String DEFAULT_OUTPUT_SUB = "mr_output";
 

+ 8 - 7
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRCluster.java

@@ -20,13 +20,13 @@ package org.apache.hadoop.mapred;
 import java.io.IOException;
 import java.util.Random;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class is an MR2 replacement for older MR1 MiniMRCluster, that was used
@@ -45,7 +45,8 @@ import org.apache.hadoop.security.UserGroupInformation;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class MiniMRCluster {
-  private static final Log LOG = LogFactory.getLog(MiniMRCluster.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(MiniMRCluster.class);
 
   private MiniMRClientCluster mrClientCluster;
 
@@ -98,7 +99,7 @@ public class MiniMRCluster {
     try {
       jobConf = new JobConf(mrClientCluster.getConfig());
     } catch (IOException e) {
-      LOG.error(e);
+      LOG.error(e.getMessage());
     }
     return jobConf;
   }
@@ -108,7 +109,7 @@ public class MiniMRCluster {
     try {
       jobConf = new JobConf(mrClientCluster.getConfig());
     } catch (IOException e) {
-      LOG.error(e);
+      LOG.error(e.getMessage());
     }
     return jobConf;
   }
@@ -224,7 +225,7 @@ public class MiniMRCluster {
     try {
       jobConf = new JobConf(mrClientCluster.getConfig());
     } catch (IOException e) {
-      LOG.error(e);
+      LOG.error(e.getMessage());
     }
     return jobConf;
   }
@@ -266,7 +267,7 @@ public class MiniMRCluster {
     try {
       mrClientCluster.stop();
     } catch (IOException e) {
-      LOG.error(e);
+      LOG.error(e.getMessage());
     }
   }
 

+ 4 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/MiniMRYarnClusterAdapter.java

@@ -18,13 +18,13 @@
 
 package org.apache.hadoop.mapred;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.service.Service.STATE;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * An adapter for MiniMRYarnCluster providing a MiniMRClientCluster interface.
@@ -34,7 +34,8 @@ public class MiniMRYarnClusterAdapter implements MiniMRClientCluster {
 
   private MiniMRYarnCluster miniMRYarnCluster;
 
-  private static final Log LOG = LogFactory.getLog(MiniMRYarnClusterAdapter.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(MiniMRYarnClusterAdapter.class);
 
   public MiniMRYarnClusterAdapter(MiniMRYarnCluster miniMRYarnCluster) {
     this.miniMRYarnCluster = miniMRYarnCluster;

+ 7 - 6
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/ReliabilityTest.java

@@ -29,8 +29,6 @@ import java.util.HashMap;
 import java.util.Map;
 import java.util.StringTokenizer;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
@@ -41,6 +39,8 @@ import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This class tests reliability of the framework in the face of failures of
@@ -73,7 +73,8 @@ import org.apache.hadoop.util.ToolRunner;
 public class ReliabilityTest extends Configured implements Tool {
 
   private String dir;
-  private static final Log LOG = LogFactory.getLog(ReliabilityTest.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ReliabilityTest.class);
 
   private void displayUsage() {
     LOG.info("This must be run in only the distributed mode " +
@@ -207,7 +208,7 @@ public class ReliabilityTest extends Configured implements Tool {
               args);
           checkJobExitStatus(status, jobClass);
         } catch (Exception e) {
-          LOG.fatal("JOB " + jobClass + " failed to run");
+          LOG.error("JOB " + jobClass + " failed to run");
           System.exit(-1);
         }
       }
@@ -325,7 +326,7 @@ public class ReliabilityTest extends Configured implements Tool {
           killed = true;
           return;
         } catch (Exception e) {
-          LOG.fatal(StringUtils.stringifyException(e));
+          LOG.error(StringUtils.stringifyException(e));
         }
       }
     }
@@ -495,7 +496,7 @@ public class ReliabilityTest extends Configured implements Tool {
         } catch (InterruptedException ie) {
           killed = true;
         } catch (Exception e) {
-          LOG.fatal(StringUtils.stringifyException(e));
+          LOG.error(StringUtils.stringifyException(e));
         }
       }
     }

+ 4 - 4
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestBadRecords.java

@@ -30,8 +30,6 @@ import java.util.Iterator;
 import java.util.List;
 import java.util.StringTokenizer;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.LongWritable;
@@ -41,6 +39,8 @@ import org.apache.hadoop.mapreduce.TaskCounter;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.junit.Ignore;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
@@ -48,8 +48,8 @@ import static org.junit.Assert.assertNotNull;
 @Ignore
 public class TestBadRecords extends ClusterMapReduceTestCase {
   
-  private static final Log LOG = 
-    LogFactory.getLog(TestBadRecords.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestBadRecords.class);
   
   private static final List<String> MAPPER_BAD_RECORDS = 
     Arrays.asList("hello01","hello04","hello05");

+ 4 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java

@@ -24,8 +24,6 @@ import java.net.InetSocketAddress;
 import java.net.UnknownHostException;
 import java.util.Iterator;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.mapreduce.Cluster;
@@ -144,6 +142,8 @@ import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.junit.Assert;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public class TestClientRedirect {
 
@@ -151,7 +151,8 @@ public class TestClientRedirect {
     DefaultMetricsSystem.setMiniClusterMode(true);
   }
 
-  private static final Log LOG = LogFactory.getLog(TestClientRedirect.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestClientRedirect.class);
   private static final String RMADDRESS = "0.0.0.0:8054";
   private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
 

+ 5 - 6
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestCombineFileInputFormat.java

@@ -26,16 +26,15 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.mapred.lib.CombineFileInputFormat;
 import org.apache.hadoop.mapred.lib.CombineFileSplit;
 import org.apache.hadoop.mapred.lib.CombineFileRecordReader;
-
 import org.junit.Test;
-import static org.junit.Assert.*;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import static org.junit.Assert.*;
 
 public class TestCombineFileInputFormat {
-  private static final Log LOG =
-    LogFactory.getLog(TestCombineFileInputFormat.class.getName());
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestCombineFileInputFormat.class);
   
   private static JobConf defaultConf = new JobConf();
   private static FileSystem localFs = null; 

Неке датотеке нису приказане због велике количине промена