瀏覽代碼

Merge branch 'trunk' into HDFS-7240

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

Added the following code in:
hadoop/ozone/container/common/impl/ContainerManagerImpl.java
  @Override
  public void readLockInterruptibly() throws InterruptedException {
    this.lock.readLock().lockInterruptibly();
  }

and Manually updated  the value of version in
modified: hadoop-tools/hadoop-ozone/pom.xml
to
<version>3.2.0-SNAPSHOT</version>
Anu Engineer 7 年之前
父節點
當前提交
479197872b
共有 100 個文件被更改,包括 2962 次插入899 次删除
  1. 2 2
      hadoop-assemblies/pom.xml
  2. 1 1
      hadoop-build-tools/pom.xml
  3. 2 2
      hadoop-client-modules/hadoop-client-api/pom.xml
  4. 2 2
      hadoop-client-modules/hadoop-client-check-invariants/pom.xml
  5. 2 2
      hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
  6. 2 2
      hadoop-client-modules/hadoop-client-integration-tests/pom.xml
  7. 2 2
      hadoop-client-modules/hadoop-client-minicluster/pom.xml
  8. 2 2
      hadoop-client-modules/hadoop-client-runtime/pom.xml
  9. 2 2
      hadoop-client-modules/hadoop-client/pom.xml
  10. 1 1
      hadoop-client-modules/pom.xml
  11. 2 2
      hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
  12. 2 2
      hadoop-cloud-storage-project/pom.xml
  13. 2 2
      hadoop-common-project/hadoop-annotations/pom.xml
  14. 2 2
      hadoop-common-project/hadoop-auth-examples/pom.xml
  15. 2 12
      hadoop-common-project/hadoop-auth/pom.xml
  16. 2 49
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java
  17. 14 27
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java
  18. 2 2
      hadoop-common-project/hadoop-common/pom.xml
  19. 4 0
      hadoop-common-project/hadoop-common/src/main/bin/hadoop
  20. 6 1
      hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd
  21. 80 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  22. 106 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/StorageSize.java
  23. 530 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/StorageUnit.java
  24. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
  25. 251 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
  26. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
  27. 12 18
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
  28. 10 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
  29. 4 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
  30. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
  31. 4 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java
  32. 19 20
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java
  33. 6 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
  34. 8 19
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java
  35. 2 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java
  36. 59 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CombinedIPList.java
  37. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
  38. 8 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java
  39. 69 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java
  40. 7 6
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  41. 6 0
      hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md
  42. 12 20
      hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md
  43. 76 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
  44. 277 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestStorageUnit.java
  45. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegateToFileSystem.java
  46. 86 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java
  47. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java
  48. 1 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java
  49. 38 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java
  50. 36 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestLambdaTestUtils.java
  51. 57 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java
  52. 102 0
      hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats
  53. 2 2
      hadoop-common-project/hadoop-kms/pom.xml
  54. 2 2
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
  55. 2 2
      hadoop-common-project/hadoop-minikdc/pom.xml
  56. 2 2
      hadoop-common-project/hadoop-nfs/pom.xml
  57. 2 2
      hadoop-common-project/pom.xml
  58. 2 2
      hadoop-dist/pom.xml
  59. 2 2
      hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
  60. 2 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java
  61. 2 2
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
  62. 5 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
  63. 4 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
  64. 34 22
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java
  65. 18 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java
  66. 8 4
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java
  67. 12 5
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java
  68. 15 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
  69. 10 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
  70. 3 3
      hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java
  71. 2 2
      hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
  72. 2 2
      hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
  73. 2 2
      hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
  74. 10 15
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java
  75. 127 187
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java
  76. 154 187
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
  77. 2 2
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  78. 7 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  79. 8 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java
  80. 143 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/BlackListBasedTrustedChannelResolver.java
  81. 24 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/package-info.java
  82. 60 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java
  83. 59 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolTranslatorPB.java
  84. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/InterQJournalProtocol.java
  85. 5 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/InterQJournalProtocolServerSideTranslatorPB.java
  86. 6 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/InterQJournalProtocolTranslatorPB.java
  87. 4 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
  88. 87 42
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
  89. 10 11
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java
  90. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java
  91. 38 15
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java
  92. 6 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java
  93. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
  94. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java
  95. 2 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
  96. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java
  97. 37 38
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
  98. 14 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
  99. 43 45
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
  100. 43 13
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java

+ 2 - 2
hadoop-assemblies/pom.xml

@@ -23,11 +23,11 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   </parent>
   <artifactId>hadoop-assemblies</artifactId>
   <artifactId>hadoop-assemblies</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <name>Apache Hadoop Assemblies</name>
   <name>Apache Hadoop Assemblies</name>
   <description>Apache Hadoop Assemblies</description>
   <description>Apache Hadoop Assemblies</description>
 
 

+ 1 - 1
hadoop-build-tools/pom.xml

@@ -18,7 +18,7 @@
   <parent>
   <parent>
     <artifactId>hadoop-main</artifactId>
     <artifactId>hadoop-main</artifactId>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
   </parent>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-build-tools</artifactId>
   <artifactId>hadoop-build-tools</artifactId>

+ 2 - 2
hadoop-client-modules/hadoop-client-api/pom.xml

@@ -18,11 +18,11 @@
 <parent>
 <parent>
    <groupId>org.apache.hadoop</groupId>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-project</artifactId>
    <artifactId>hadoop-project</artifactId>
-   <version>3.1.0-SNAPSHOT</version>
+   <version>3.2.0-SNAPSHOT</version>
    <relativePath>../../hadoop-project</relativePath>
    <relativePath>../../hadoop-project</relativePath>
 </parent>
 </parent>
   <artifactId>hadoop-client-api</artifactId>
   <artifactId>hadoop-client-api</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>jar</packaging>
   <packaging>jar</packaging>
 
 
   <description>Apache Hadoop Client</description>
   <description>Apache Hadoop Client</description>

+ 2 - 2
hadoop-client-modules/hadoop-client-check-invariants/pom.xml

@@ -18,11 +18,11 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <artifactId>hadoop-client-check-invariants</artifactId>
   <artifactId>hadoop-client-check-invariants</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>pom</packaging>
   <packaging>pom</packaging>
 
 
   <description>
   <description>

+ 2 - 2
hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml

@@ -18,11 +18,11 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <artifactId>hadoop-client-check-test-invariants</artifactId>
   <artifactId>hadoop-client-check-test-invariants</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>pom</packaging>
   <packaging>pom</packaging>
 
 
   <description>
   <description>

+ 2 - 2
hadoop-client-modules/hadoop-client-integration-tests/pom.xml

@@ -18,11 +18,11 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <artifactId>hadoop-client-integration-tests</artifactId>
   <artifactId>hadoop-client-integration-tests</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
 
 
   <description>Checks that we can use the generated artifacts</description>
   <description>Checks that we can use the generated artifacts</description>
   <name>Apache Hadoop Client Packaging Integration Tests</name>
   <name>Apache Hadoop Client Packaging Integration Tests</name>

+ 2 - 2
hadoop-client-modules/hadoop-client-minicluster/pom.xml

@@ -18,11 +18,11 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <artifactId>hadoop-client-minicluster</artifactId>
   <artifactId>hadoop-client-minicluster</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>jar</packaging>
   <packaging>jar</packaging>
 
 
   <description>Apache Hadoop Minicluster for Clients</description>
   <description>Apache Hadoop Minicluster for Clients</description>

+ 2 - 2
hadoop-client-modules/hadoop-client-runtime/pom.xml

@@ -18,11 +18,11 @@
 <parent>
 <parent>
    <groupId>org.apache.hadoop</groupId>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-project</artifactId>
    <artifactId>hadoop-project</artifactId>
-   <version>3.1.0-SNAPSHOT</version>
+   <version>3.2.0-SNAPSHOT</version>
    <relativePath>../../hadoop-project</relativePath>
    <relativePath>../../hadoop-project</relativePath>
 </parent>
 </parent>
   <artifactId>hadoop-client-runtime</artifactId>
   <artifactId>hadoop-client-runtime</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>jar</packaging>
   <packaging>jar</packaging>
 
 
   <description>Apache Hadoop Client</description>
   <description>Apache Hadoop Client</description>

+ 2 - 2
hadoop-client-modules/hadoop-client/pom.xml

@@ -18,11 +18,11 @@
 <parent>
 <parent>
    <groupId>org.apache.hadoop</groupId>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-project-dist</artifactId>
    <artifactId>hadoop-project-dist</artifactId>
-   <version>3.1.0-SNAPSHOT</version>
+   <version>3.2.0-SNAPSHOT</version>
    <relativePath>../../hadoop-project-dist</relativePath>
    <relativePath>../../hadoop-project-dist</relativePath>
 </parent>
 </parent>
   <artifactId>hadoop-client</artifactId>
   <artifactId>hadoop-client</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
 
 
   <description>Apache Hadoop Client aggregation pom with dependencies exposed</description>
   <description>Apache Hadoop Client aggregation pom with dependencies exposed</description>
   <name>Apache Hadoop Client Aggregator</name>
   <name>Apache Hadoop Client Aggregator</name>

+ 1 - 1
hadoop-client-modules/pom.xml

@@ -18,7 +18,7 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   </parent>
   <artifactId>hadoop-client-modules</artifactId>
   <artifactId>hadoop-client-modules</artifactId>

+ 2 - 2
hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml

@@ -18,11 +18,11 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <artifactId>hadoop-cloud-storage</artifactId>
   <artifactId>hadoop-cloud-storage</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>jar</packaging>
   <packaging>jar</packaging>
 
 
   <description>Apache Hadoop Cloud Storage</description>
   <description>Apache Hadoop Cloud Storage</description>

+ 2 - 2
hadoop-cloud-storage-project/pom.xml

@@ -20,11 +20,11 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   </parent>
   <artifactId>hadoop-cloud-storage-project</artifactId>
   <artifactId>hadoop-cloud-storage-project</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop Cloud Storage Project</description>
   <description>Apache Hadoop Cloud Storage Project</description>
   <name>Apache Hadoop Cloud Storage Project</name>
   <name>Apache Hadoop Cloud Storage Project</name>
   <packaging>pom</packaging>
   <packaging>pom</packaging>

+ 2 - 2
hadoop-common-project/hadoop-annotations/pom.xml

@@ -20,11 +20,11 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <artifactId>hadoop-annotations</artifactId>
   <artifactId>hadoop-annotations</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop Annotations</description>
   <description>Apache Hadoop Annotations</description>
   <name>Apache Hadoop Annotations</name>
   <name>Apache Hadoop Annotations</name>
   <packaging>jar</packaging>
   <packaging>jar</packaging>

+ 2 - 2
hadoop-common-project/hadoop-auth-examples/pom.xml

@@ -20,11 +20,11 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <artifactId>hadoop-auth-examples</artifactId>
   <artifactId>hadoop-auth-examples</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>war</packaging>
   <packaging>war</packaging>
 
 
   <name>Apache Hadoop Auth Examples</name>
   <name>Apache Hadoop Auth Examples</name>

+ 2 - 12
hadoop-common-project/hadoop-auth/pom.xml

@@ -20,11 +20,11 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <artifactId>hadoop-auth</artifactId>
   <artifactId>hadoop-auth</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>jar</packaging>
   <packaging>jar</packaging>
 
 
   <name>Apache Hadoop Auth</name>
   <name>Apache Hadoop Auth</name>
@@ -65,16 +65,6 @@
       <groupId>org.eclipse.jetty</groupId>
       <groupId>org.eclipse.jetty</groupId>
       <artifactId>jetty-servlet</artifactId>
       <artifactId>jetty-servlet</artifactId>
       <scope>test</scope>
       <scope>test</scope>
-    </dependency>
-     <dependency>
-      <groupId>org.apache.tomcat.embed</groupId>
-      <artifactId>tomcat-embed-core</artifactId>
-      <scope>test</scope>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.tomcat.embed</groupId>
-      <artifactId>tomcat-embed-logging-juli</artifactId>
-      <scope>test</scope>
     </dependency>
     </dependency>
     <dependency>
     <dependency>
       <groupId>javax.servlet</groupId>
       <groupId>javax.servlet</groupId>

+ 2 - 49
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java

@@ -13,9 +13,6 @@
  */
  */
 package org.apache.hadoop.security.authentication.client;
 package org.apache.hadoop.security.authentication.client;
 
 
-import org.apache.catalina.deploy.FilterDef;
-import org.apache.catalina.deploy.FilterMap;
-import org.apache.catalina.startup.Tomcat;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.http.HttpResponse;
 import org.apache.http.HttpResponse;
 import org.apache.http.auth.AuthScope;
 import org.apache.http.auth.AuthScope;
@@ -45,7 +42,6 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import javax.servlet.http.HttpServletResponse;
 import java.io.BufferedReader;
 import java.io.BufferedReader;
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayInputStream;
-import java.io.File;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.io.OutputStream;
@@ -65,18 +61,12 @@ public class AuthenticatorTestCase {
   private Server server;
   private Server server;
   private String host = null;
   private String host = null;
   private int port = -1;
   private int port = -1;
-  private boolean useTomcat = false;
-  private Tomcat tomcat = null;
   ServletContextHandler context;
   ServletContextHandler context;
 
 
   private static Properties authenticatorConfig;
   private static Properties authenticatorConfig;
 
 
   public AuthenticatorTestCase() {}
   public AuthenticatorTestCase() {}
 
 
-  public AuthenticatorTestCase(boolean useTomcat) {
-    this.useTomcat = useTomcat;
-  }
-
   protected static void setAuthenticationHandlerConfig(Properties config) {
   protected static void setAuthenticationHandlerConfig(Properties config) {
     authenticatorConfig = config;
     authenticatorConfig = config;
   }
   }
@@ -120,8 +110,7 @@ public class AuthenticatorTestCase {
   }
   }
 
 
   protected void start() throws Exception {
   protected void start() throws Exception {
-    if (useTomcat) startTomcat();
-    else startJetty();
+    startJetty();
   }
   }
 
 
   protected void startJetty() throws Exception {
   protected void startJetty() throws Exception {
@@ -142,32 +131,8 @@ public class AuthenticatorTestCase {
     System.out.println("Running embedded servlet container at: http://" + host + ":" + port);
     System.out.println("Running embedded servlet container at: http://" + host + ":" + port);
   }
   }
 
 
-  protected void startTomcat() throws Exception {
-    tomcat = new Tomcat();
-    File base = new File(System.getProperty("java.io.tmpdir"));
-    org.apache.catalina.Context ctx =
-      tomcat.addContext("/foo",base.getAbsolutePath());
-    FilterDef fd = new FilterDef();
-    fd.setFilterClass(TestFilter.class.getName());
-    fd.setFilterName("TestFilter");
-    FilterMap fm = new FilterMap();
-    fm.setFilterName("TestFilter");
-    fm.addURLPattern("/*");
-    fm.addServletName("/bar");
-    ctx.addFilterDef(fd);
-    ctx.addFilterMap(fm);
-    tomcat.addServlet(ctx, "/bar", TestServlet.class.getName());
-    ctx.addServletMapping("/bar", "/bar");
-    host = "localhost";
-    port = getLocalPort();
-    tomcat.setHostname(host);
-    tomcat.setPort(port);
-    tomcat.start();
-  }
-
   protected void stop() throws Exception {
   protected void stop() throws Exception {
-    if (useTomcat) stopTomcat();
-    else stopJetty();
+    stopJetty();
   }
   }
 
 
   protected void stopJetty() throws Exception {
   protected void stopJetty() throws Exception {
@@ -182,18 +147,6 @@ public class AuthenticatorTestCase {
     }
     }
   }
   }
 
 
-  protected void stopTomcat() throws Exception {
-    try {
-      tomcat.stop();
-    } catch (Exception e) {
-    }
-
-    try {
-      tomcat.destroy();
-    } catch (Exception e) {
-    }
-  }
-
   protected String getBaseURL() {
   protected String getBaseURL() {
     return "http://" + host + ":" + port + "/foo/bar";
     return "http://" + host + ":" + port + "/foo/bar";
   }
   }

+ 14 - 27
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java

@@ -28,33 +28,20 @@ import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHand
 import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
 import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Before;
-import org.junit.runners.Parameterized;
-import org.junit.runner.RunWith;
 import org.junit.Test;
 import org.junit.Test;
 
 
 import java.io.File;
 import java.io.File;
 import java.net.HttpURLConnection;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.net.URL;
-import java.util.Arrays;
-import java.util.Collection;
 import java.util.Properties;
 import java.util.Properties;
 import java.util.concurrent.Callable;
 import java.util.concurrent.Callable;
 
 
-@RunWith(Parameterized.class)
+/**
+ * Test class for {@link KerberosAuthenticator}.
+ */
 public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
 public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
 
 
-  private boolean useTomcat = false;
-
-  public TestKerberosAuthenticator(boolean useTomcat) {
-    this.useTomcat = useTomcat;
-  }
-
-  @Parameterized.Parameters
-  public static Collection booleans() {
-    return Arrays.asList(new Object[][] {
-      { false },
-      { true }
-    });
+  public TestKerberosAuthenticator() {
   }
   }
 
 
   @Before
   @Before
@@ -93,7 +80,7 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
 
 
   @Test(timeout=60000)
   @Test(timeout=60000)
   public void testFallbacktoPseudoAuthenticator() throws Exception {
   public void testFallbacktoPseudoAuthenticator() throws Exception {
-    AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
+    AuthenticatorTestCase auth = new AuthenticatorTestCase();
     Properties props = new Properties();
     Properties props = new Properties();
     props.setProperty(AuthenticationFilter.AUTH_TYPE, "simple");
     props.setProperty(AuthenticationFilter.AUTH_TYPE, "simple");
     props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "false");
     props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "false");
@@ -103,7 +90,7 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
 
 
   @Test(timeout=60000)
   @Test(timeout=60000)
   public void testFallbacktoPseudoAuthenticatorAnonymous() throws Exception {
   public void testFallbacktoPseudoAuthenticatorAnonymous() throws Exception {
-    AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
+    AuthenticatorTestCase auth = new AuthenticatorTestCase();
     Properties props = new Properties();
     Properties props = new Properties();
     props.setProperty(AuthenticationFilter.AUTH_TYPE, "simple");
     props.setProperty(AuthenticationFilter.AUTH_TYPE, "simple");
     props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "true");
     props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "true");
@@ -113,7 +100,7 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
 
 
   @Test(timeout=60000)
   @Test(timeout=60000)
   public void testNotAuthenticated() throws Exception {
   public void testNotAuthenticated() throws Exception {
-    AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
+    AuthenticatorTestCase auth = new AuthenticatorTestCase();
     AuthenticatorTestCase.setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration());
     AuthenticatorTestCase.setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration());
     auth.start();
     auth.start();
     try {
     try {
@@ -129,7 +116,7 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
 
 
   @Test(timeout=60000)
   @Test(timeout=60000)
   public void testAuthentication() throws Exception {
   public void testAuthentication() throws Exception {
-    final AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
+    final AuthenticatorTestCase auth = new AuthenticatorTestCase();
     AuthenticatorTestCase.setAuthenticationHandlerConfig(
     AuthenticatorTestCase.setAuthenticationHandlerConfig(
             getAuthenticationHandlerConfiguration());
             getAuthenticationHandlerConfiguration());
     KerberosTestUtils.doAsClient(new Callable<Void>() {
     KerberosTestUtils.doAsClient(new Callable<Void>() {
@@ -143,7 +130,7 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
 
 
   @Test(timeout=60000)
   @Test(timeout=60000)
   public void testAuthenticationPost() throws Exception {
   public void testAuthenticationPost() throws Exception {
-    final AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
+    final AuthenticatorTestCase auth = new AuthenticatorTestCase();
     AuthenticatorTestCase.setAuthenticationHandlerConfig(
     AuthenticatorTestCase.setAuthenticationHandlerConfig(
             getAuthenticationHandlerConfiguration());
             getAuthenticationHandlerConfiguration());
     KerberosTestUtils.doAsClient(new Callable<Void>() {
     KerberosTestUtils.doAsClient(new Callable<Void>() {
@@ -157,7 +144,7 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
 
 
   @Test(timeout=60000)
   @Test(timeout=60000)
   public void testAuthenticationHttpClient() throws Exception {
   public void testAuthenticationHttpClient() throws Exception {
-    final AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
+    final AuthenticatorTestCase auth = new AuthenticatorTestCase();
     AuthenticatorTestCase.setAuthenticationHandlerConfig(
     AuthenticatorTestCase.setAuthenticationHandlerConfig(
             getAuthenticationHandlerConfiguration());
             getAuthenticationHandlerConfiguration());
     KerberosTestUtils.doAsClient(new Callable<Void>() {
     KerberosTestUtils.doAsClient(new Callable<Void>() {
@@ -171,7 +158,7 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
 
 
   @Test(timeout=60000)
   @Test(timeout=60000)
   public void testAuthenticationHttpClientPost() throws Exception {
   public void testAuthenticationHttpClientPost() throws Exception {
-    final AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
+    final AuthenticatorTestCase auth = new AuthenticatorTestCase();
     AuthenticatorTestCase.setAuthenticationHandlerConfig(
     AuthenticatorTestCase.setAuthenticationHandlerConfig(
             getAuthenticationHandlerConfiguration());
             getAuthenticationHandlerConfiguration());
     KerberosTestUtils.doAsClient(new Callable<Void>() {
     KerberosTestUtils.doAsClient(new Callable<Void>() {
@@ -185,7 +172,7 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
 
 
   @Test(timeout = 60000)
   @Test(timeout = 60000)
   public void testNotAuthenticatedWithMultiAuthHandler() throws Exception {
   public void testNotAuthenticatedWithMultiAuthHandler() throws Exception {
-    AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
+    AuthenticatorTestCase auth = new AuthenticatorTestCase();
     AuthenticatorTestCase
     AuthenticatorTestCase
         .setAuthenticationHandlerConfig(getMultiAuthHandlerConfiguration());
         .setAuthenticationHandlerConfig(getMultiAuthHandlerConfiguration());
     auth.start();
     auth.start();
@@ -204,7 +191,7 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
 
 
   @Test(timeout = 60000)
   @Test(timeout = 60000)
   public void testAuthenticationWithMultiAuthHandler() throws Exception {
   public void testAuthenticationWithMultiAuthHandler() throws Exception {
-    final AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
+    final AuthenticatorTestCase auth = new AuthenticatorTestCase();
     AuthenticatorTestCase
     AuthenticatorTestCase
         .setAuthenticationHandlerConfig(getMultiAuthHandlerConfiguration());
         .setAuthenticationHandlerConfig(getMultiAuthHandlerConfiguration());
     KerberosTestUtils.doAsClient(new Callable<Void>() {
     KerberosTestUtils.doAsClient(new Callable<Void>() {
@@ -219,7 +206,7 @@ public class TestKerberosAuthenticator extends KerberosSecurityTestcase {
   @Test(timeout = 60000)
   @Test(timeout = 60000)
   public void testAuthenticationHttpClientPostWithMultiAuthHandler()
   public void testAuthenticationHttpClientPostWithMultiAuthHandler()
       throws Exception {
       throws Exception {
-    final AuthenticatorTestCase auth = new AuthenticatorTestCase(useTomcat);
+    final AuthenticatorTestCase auth = new AuthenticatorTestCase();
     AuthenticatorTestCase
     AuthenticatorTestCase
         .setAuthenticationHandlerConfig(getMultiAuthHandlerConfiguration());
         .setAuthenticationHandlerConfig(getMultiAuthHandlerConfiguration());
     KerberosTestUtils.doAsClient(new Callable<Void>() {
     KerberosTestUtils.doAsClient(new Callable<Void>() {

+ 2 - 2
hadoop-common-project/hadoop-common/pom.xml

@@ -20,11 +20,11 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   </parent>
   <artifactId>hadoop-common</artifactId>
   <artifactId>hadoop-common</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop Common</description>
   <description>Apache Hadoop Common</description>
   <name>Apache Hadoop Common</name>
   <name>Apache Hadoop Common</name>
   <packaging>jar</packaging>
   <packaging>jar</packaging>

+ 4 - 0
hadoop-common-project/hadoop-common/src/main/bin/hadoop

@@ -45,6 +45,7 @@ function hadoop_usage
   hadoop_add_subcommand "key" client "manage keys via the KeyProvider"
   hadoop_add_subcommand "key" client "manage keys via the KeyProvider"
   hadoop_add_subcommand "trace" client "view and modify Hadoop tracing settings"
   hadoop_add_subcommand "trace" client "view and modify Hadoop tracing settings"
   hadoop_add_subcommand "version" client "print the version"
   hadoop_add_subcommand "version" client "print the version"
+  hadoop_add_subcommand "kdiag" client "Diagnose Kerberos Problems"
   hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" true
   hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" true
 }
 }
 
 
@@ -148,6 +149,9 @@ function hadoopcmd_case
     kerbname)
     kerbname)
       HADOOP_CLASSNAME=org.apache.hadoop.security.HadoopKerberosName
       HADOOP_CLASSNAME=org.apache.hadoop.security.HadoopKerberosName
     ;;
     ;;
+    kdiag)
+      HADOOP_CLASSNAME=org.apache.hadoop.security.KDiag
+    ;;
     key)
     key)
       HADOOP_CLASSNAME=org.apache.hadoop.crypto.key.KeyShell
       HADOOP_CLASSNAME=org.apache.hadoop.crypto.key.KeyShell
     ;;
     ;;

+ 6 - 1
hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd

@@ -149,7 +149,7 @@ call :updatepath %HADOOP_BIN_PATH%
     exit /b
     exit /b
   )
   )
 
 
-  set corecommands=fs version jar checknative conftest distch distcp daemonlog archive classpath credential kerbname key trace
+  set corecommands=fs version jar checknative conftest distch distcp daemonlog archive classpath credential kerbname key trace kdiag
   for %%i in ( %corecommands% ) do (
   for %%i in ( %corecommands% ) do (
     if %hadoop-command% == %%i set corecommand=true  
     if %hadoop-command% == %%i set corecommand=true  
   )
   )
@@ -231,6 +231,10 @@ call :updatepath %HADOOP_BIN_PATH%
   set CLASS=org.apache.hadoop.security.HadoopKerberosName
   set CLASS=org.apache.hadoop.security.HadoopKerberosName
   goto :eof
   goto :eof
 
 
+:kdiag
+  set CLASS=org.apache.hadoop.security.KDiag
+  goto :eof
+
 :key
 :key
   set CLASS=org.apache.hadoop.crypto.key.KeyShell
   set CLASS=org.apache.hadoop.crypto.key.KeyShell
   goto :eof
   goto :eof
@@ -307,6 +311,7 @@ call :updatepath %HADOOP_BIN_PATH%
   @echo   credential           interact with credential providers
   @echo   credential           interact with credential providers
   @echo   jnipath              prints the java.library.path
   @echo   jnipath              prints the java.library.path
   @echo   kerbname             show auth_to_local principal conversion
   @echo   kerbname             show auth_to_local principal conversion
+  @echo   kdiag                diagnose kerberos problems
   @echo   key                  manage keys via the KeyProvider
   @echo   key                  manage keys via the KeyProvider
   @echo   trace                view and modify Hadoop tracing settings
   @echo   trace                view and modify Hadoop tracing settings
   @echo   daemonlog            get/set the log level for each daemon
   @echo   daemonlog            get/set the log level for each daemon

+ 80 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -109,6 +109,9 @@ import org.w3c.dom.Element;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 import com.google.common.base.Strings;
 
 
+import static org.apache.commons.lang3.StringUtils.isBlank;
+import static org.apache.commons.lang3.StringUtils.isNotBlank;
+
 /**
 /**
  * Provides access to configuration parameters.
  * Provides access to configuration parameters.
  *
  *
@@ -1819,6 +1822,83 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     }
     }
     return durations;
     return durations;
   }
   }
+  /**
+   * Gets the Storage Size from the config, or returns the defaultValue. The
+   * unit of return value is specified in target unit.
+   *
+   * @param name - Key Name
+   * @param defaultValue - Default Value -- e.g. 100MB
+   * @param targetUnit - The units that we want result to be in.
+   * @return double -- formatted in target Units
+   */
+  public double getStorageSize(String name, String defaultValue,
+      StorageUnit targetUnit) {
+    Preconditions.checkState(isNotBlank(name), "Key cannot be blank.");
+    String vString = get(name);
+    if (isBlank(vString)) {
+      vString = defaultValue;
+    }
+
+    // Please note: There is a bit of subtlety here. If the user specifies
+    // the default unit as "1GB", but the requested unit is MB, we will return
+    // the format in MB even thought the default string is specified in GB.
+
+    // Converts a string like "1GB" to to unit specified in targetUnit.
+
+    StorageSize measure = StorageSize.parse(vString);
+    return convertStorageUnit(measure.getValue(), measure.getUnit(),
+        targetUnit);
+  }
+
+  /**
+   * Gets storage size from a config file.
+   *
+   * @param name - Key to read.
+   * @param defaultValue - The default value to return in case the key is
+   * not present.
+   * @param targetUnit - The Storage unit that should be used
+   * for the return value.
+   * @return - double value in the Storage Unit specified.
+   */
+  public double getStorageSize(String name, double defaultValue,
+      StorageUnit targetUnit) {
+    Preconditions.checkNotNull(targetUnit, "Conversion unit cannot be null.");
+    Preconditions.checkState(isNotBlank(name), "Name cannot be blank.");
+    String vString = get(name);
+    if (isBlank(vString)) {
+      return targetUnit.getDefault(defaultValue);
+    }
+
+    StorageSize measure = StorageSize.parse(vString);
+    return convertStorageUnit(measure.getValue(), measure.getUnit(),
+        targetUnit);
+
+  }
+
+  /**
+   * Sets Storage Size for the specified key.
+   *
+   * @param name - Key to set.
+   * @param value - The numeric value to set.
+   * @param unit - Storage Unit to be used.
+   */
+  public void setStorageSize(String name, double value, StorageUnit unit) {
+    set(name, value + unit.getShortName());
+  }
+
+  /**
+   * convert the value from one storage unit to another.
+   *
+   * @param value - value
+   * @param sourceUnit - Source unit to convert from
+   * @param targetUnit - target unit.
+   * @return double.
+   */
+  private double convertStorageUnit(double value, StorageUnit sourceUnit,
+      StorageUnit targetUnit) {
+    double byteValue = sourceUnit.toBytes(value);
+    return targetUnit.fromBytes(byteValue);
+  }
 
 
   /**
   /**
    * Get the value of the <code>name</code> property as a <code>Pattern</code>.
    * Get the value of the <code>name</code> property as a <code>Pattern</code>.

+ 106 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/StorageSize.java

@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ *
+ */
+
+package org.apache.hadoop.conf;
+
+import java.util.Locale;
+
+import static org.apache.commons.lang3.StringUtils.isNotBlank;
+
+/**
+ * A class that contains the numeric value and the unit of measure.
+ */
+public class StorageSize {
+  private final StorageUnit unit;
+  private final double value;
+
+  /**
+   * Constucts a Storage Measure, which contains the value and the unit of
+   * measure.
+   *
+   * @param unit - Unit of Measure
+   * @param value - Numeric value.
+   */
+  public StorageSize(StorageUnit unit, double value) {
+    this.unit = unit;
+    this.value = value;
+  }
+
+  private static void checkState(boolean state, String errorString){
+    if(!state) {
+      throw new IllegalStateException(errorString);
+    }
+  }
+
+  public static StorageSize parse(String value) {
+    checkState(isNotBlank(value), "value cannot be blank");
+    String sanitizedValue = value.trim().toLowerCase(Locale.ENGLISH);
+    StorageUnit parsedUnit = null;
+    for (StorageUnit unit : StorageUnit.values()) {
+      if (sanitizedValue.endsWith(unit.getShortName()) ||
+          sanitizedValue.endsWith(unit.getLongName()) ||
+          sanitizedValue.endsWith(unit.getSuffixChar())) {
+        parsedUnit = unit;
+        break;
+      }
+    }
+
+    if (parsedUnit == null) {
+      throw new IllegalArgumentException(value + " is not in expected format." +
+          "Expected format is <number><unit>. e.g. 1000MB");
+    }
+
+
+    String suffix = "";
+    boolean found = false;
+
+    // We are trying to get the longest match first, so the order of
+    // matching is getLongName, getShortName and then getSuffixChar.
+    if (!found && sanitizedValue.endsWith(parsedUnit.getLongName())) {
+      found = true;
+      suffix = parsedUnit.getLongName();
+    }
+
+    if (!found && sanitizedValue.endsWith(parsedUnit.getShortName())) {
+      found = true;
+      suffix = parsedUnit.getShortName();
+    }
+
+    if (!found && sanitizedValue.endsWith(parsedUnit.getSuffixChar())) {
+      found = true;
+      suffix = parsedUnit.getSuffixChar();
+    }
+
+    checkState(found, "Something is wrong, we have to find a " +
+        "match. Internal error.");
+
+    String valString =
+        sanitizedValue.substring(0, value.length() - suffix.length());
+    return new StorageSize(parsedUnit, Double.parseDouble(valString));
+
+  }
+
+  public StorageUnit getUnit() {
+    return unit;
+  }
+
+  public double getValue() {
+    return value;
+  }
+
+}

+ 530 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/StorageUnit.java

@@ -0,0 +1,530 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ *
+ */
+
+package org.apache.hadoop.conf;
+
+import java.math.BigDecimal;
+import java.math.RoundingMode;
+
+/**
+ * Class that maintains different forms of Storage Units.
+ */
+public enum StorageUnit {
+  /*
+    We rely on BYTES being the last to get longest matching short names first.
+    The short name of bytes is b and it will match with other longer names.
+
+    if we change this order, the corresponding code in
+    Configuration#parseStorageUnit needs to be changed too, since values()
+    call returns the Enums in declared order and we depend on it.
+   */
+
+  EB {
+    @Override
+    public double toBytes(double value) {
+      return multiply(value, EXABYTES);
+    }
+
+    @Override
+    public double toKBs(double value) {
+      return multiply(value, EXABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toMBs(double value) {
+      return multiply(value, EXABYTES / MEGABYTES);
+    }
+
+    @Override
+    public double toGBs(double value) {
+      return multiply(value, EXABYTES / GIGABYTES);
+    }
+
+    @Override
+    public double toTBs(double value) {
+      return multiply(value, EXABYTES / TERABYTES);
+    }
+
+    @Override
+    public double toPBs(double value) {
+      return multiply(value, EXABYTES / PETABYTES);
+    }
+
+    @Override
+    public double toEBs(double value) {
+      return value;
+    }
+
+    @Override
+    public String getLongName() {
+      return "exabytes";
+    }
+
+    @Override
+    public String getShortName() {
+      return "eb";
+    }
+
+    @Override
+    public String getSuffixChar() {
+      return "e";
+    }
+
+    @Override
+    public double getDefault(double value) {
+      return toEBs(value);
+    }
+
+    @Override
+    public double fromBytes(double value) {
+      return divide(value, EXABYTES);
+    }
+  },
+  PB {
+    @Override
+    public double toBytes(double value) {
+      return multiply(value, PETABYTES);
+    }
+
+    @Override
+    public double toKBs(double value) {
+      return multiply(value, PETABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toMBs(double value) {
+      return multiply(value, PETABYTES / MEGABYTES);
+    }
+
+    @Override
+    public double toGBs(double value) {
+      return multiply(value, PETABYTES / GIGABYTES);
+    }
+
+    @Override
+    public double toTBs(double value) {
+      return multiply(value, PETABYTES / TERABYTES);
+    }
+
+    @Override
+    public double toPBs(double value) {
+      return value;
+    }
+
+    @Override
+    public double toEBs(double value) {
+      return divide(value, EXABYTES / PETABYTES);
+    }
+
+    @Override
+    public String getLongName() {
+      return "petabytes";
+    }
+
+    @Override
+    public String getShortName() {
+      return "pb";
+    }
+
+    @Override
+    public String getSuffixChar() {
+      return "p";
+    }
+
+    @Override
+    public double getDefault(double value) {
+      return toPBs(value);
+    }
+
+    @Override
+    public double fromBytes(double value) {
+      return divide(value, PETABYTES);
+    }
+  },
+  TB {
+    @Override
+    public double toBytes(double value) {
+      return multiply(value, TERABYTES);
+    }
+
+    @Override
+    public double toKBs(double value) {
+      return multiply(value, TERABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toMBs(double value) {
+      return multiply(value, TERABYTES / MEGABYTES);
+    }
+
+    @Override
+    public double toGBs(double value) {
+      return multiply(value, TERABYTES / GIGABYTES);
+    }
+
+    @Override
+    public double toTBs(double value) {
+      return value;
+    }
+
+    @Override
+    public double toPBs(double value) {
+      return divide(value, PETABYTES / TERABYTES);
+    }
+
+    @Override
+    public double toEBs(double value) {
+      return divide(value, EXABYTES / TERABYTES);
+    }
+
+    @Override
+    public String getLongName() {
+      return "terabytes";
+    }
+
+    @Override
+    public String getShortName() {
+      return "tb";
+    }
+
+    @Override
+    public String getSuffixChar() {
+      return "t";
+    }
+
+    @Override
+    public double getDefault(double value) {
+      return toTBs(value);
+    }
+
+    @Override
+    public double fromBytes(double value) {
+      return divide(value, TERABYTES);
+    }
+  },
+  GB {
+    @Override
+    public double toBytes(double value) {
+      return multiply(value, GIGABYTES);
+    }
+
+    @Override
+    public double toKBs(double value) {
+      return multiply(value, GIGABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toMBs(double value) {
+      return multiply(value, GIGABYTES / MEGABYTES);
+    }
+
+    @Override
+    public double toGBs(double value) {
+      return value;
+    }
+
+    @Override
+    public double toTBs(double value) {
+      return divide(value, TERABYTES / GIGABYTES);
+    }
+
+    @Override
+    public double toPBs(double value) {
+      return divide(value, PETABYTES / GIGABYTES);
+    }
+
+    @Override
+    public double toEBs(double value) {
+      return divide(value, EXABYTES / GIGABYTES);
+    }
+
+    @Override
+    public String getLongName() {
+      return "gigabytes";
+    }
+
+    @Override
+    public String getShortName() {
+      return "gb";
+    }
+
+    @Override
+    public String getSuffixChar() {
+      return "g";
+    }
+
+    @Override
+    public double getDefault(double value) {
+      return toGBs(value);
+    }
+
+    @Override
+    public double fromBytes(double value) {
+      return divide(value, GIGABYTES);
+    }
+  },
+  MB {
+    @Override
+    public double toBytes(double value) {
+      return multiply(value, MEGABYTES);
+    }
+
+    @Override
+    public double toKBs(double value) {
+      return multiply(value, MEGABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toMBs(double value) {
+      return value;
+    }
+
+    @Override
+    public double toGBs(double value) {
+      return divide(value, GIGABYTES / MEGABYTES);
+    }
+
+    @Override
+    public double toTBs(double value) {
+      return divide(value, TERABYTES / MEGABYTES);
+    }
+
+    @Override
+    public double toPBs(double value) {
+      return divide(value, PETABYTES / MEGABYTES);
+    }
+
+    @Override
+    public double toEBs(double value) {
+      return divide(value, EXABYTES / MEGABYTES);
+    }
+
+    @Override
+    public String getLongName() {
+      return "megabytes";
+    }
+
+    @Override
+    public String getShortName() {
+      return "mb";
+    }
+
+    @Override
+    public String getSuffixChar() {
+      return "m";
+    }
+
+    @Override
+    public double fromBytes(double value) {
+      return divide(value, MEGABYTES);
+    }
+
+    @Override
+    public double getDefault(double value) {
+      return toMBs(value);
+    }
+  },
+  KB {
+    @Override
+    public double toBytes(double value) {
+      return multiply(value, KILOBYTES);
+    }
+
+    @Override
+    public double toKBs(double value) {
+      return value;
+    }
+
+    @Override
+    public double toMBs(double value) {
+      return divide(value, MEGABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toGBs(double value) {
+      return divide(value, GIGABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toTBs(double value) {
+      return divide(value, TERABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toPBs(double value) {
+      return divide(value, PETABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toEBs(double value) {
+      return divide(value, EXABYTES / KILOBYTES);
+    }
+
+    @Override
+    public String getLongName() {
+      return "kilobytes";
+    }
+
+    @Override
+    public String getShortName() {
+      return "kb";
+    }
+
+    @Override
+    public String getSuffixChar() {
+      return "k";
+    }
+
+    @Override
+    public double getDefault(double value) {
+      return toKBs(value);
+    }
+
+    @Override
+    public double fromBytes(double value) {
+      return divide(value, KILOBYTES);
+    }
+  },
+  BYTES {
+    @Override
+    public double toBytes(double value) {
+      return value;
+    }
+
+    @Override
+    public double toKBs(double value) {
+      return divide(value, KILOBYTES);
+    }
+
+    @Override
+    public double toMBs(double value) {
+      return divide(value, MEGABYTES);
+    }
+
+    @Override
+    public double toGBs(double value) {
+      return divide(value, GIGABYTES);
+    }
+
+    @Override
+    public double toTBs(double value) {
+      return divide(value, TERABYTES);
+    }
+
+    @Override
+    public double toPBs(double value) {
+      return divide(value, PETABYTES);
+    }
+
+    @Override
+    public double toEBs(double value) {
+      return divide(value, EXABYTES);
+    }
+
+    @Override
+    public String getLongName() {
+      return "bytes";
+    }
+
+    @Override
+    public String getShortName() {
+      return "b";
+    }
+
+    @Override
+    public String getSuffixChar() {
+      return "b";
+    }
+
+    @Override
+    public double getDefault(double value) {
+      return toBytes(value);
+    }
+
+    @Override
+    public double fromBytes(double value) {
+      return value;
+    }
+  };
+
+  private static final double BYTE = 1L;
+  private static final double KILOBYTES = BYTE * 1024L;
+  private static final double MEGABYTES = KILOBYTES * 1024L;
+  private static final double GIGABYTES = MEGABYTES * 1024L;
+  private static final double TERABYTES = GIGABYTES * 1024L;
+  private static final double PETABYTES = TERABYTES * 1024L;
+  private static final double EXABYTES = PETABYTES * 1024L;
+  private static final int PRECISION = 4;
+
+  /**
+   * Using BigDecimal to avoid issues with overflow and underflow.
+   *
+   * @param value - value
+   * @param divisor - divisor.
+   * @return -- returns a double that represents this value
+   */
+  private static double divide(double value, double divisor) {
+    BigDecimal val = new BigDecimal(value);
+    BigDecimal bDivisor = new BigDecimal(divisor);
+    return val.divide(bDivisor).setScale(PRECISION, RoundingMode.HALF_UP)
+        .doubleValue();
+  }
+
+  /**
+   * Using BigDecimal so we can throw if we are overflowing the Long.Max.
+   *
+   * @param first - First Num.
+   * @param second - Second Num.
+   * @return Returns a double
+   */
+  private static double multiply(double first, double second) {
+    BigDecimal firstVal = new BigDecimal(first);
+    BigDecimal secondVal = new BigDecimal(second);
+    return firstVal.multiply(secondVal)
+        .setScale(PRECISION, RoundingMode.HALF_UP).doubleValue();
+  }
+
+  public abstract double toBytes(double value);
+
+  public abstract double toKBs(double value);
+
+  public abstract double toMBs(double value);
+
+  public abstract double toGBs(double value);
+
+  public abstract double toTBs(double value);
+
+  public abstract double toPBs(double value);
+
+  public abstract double toEBs(double value);
+
+  public abstract String getLongName();
+
+  public abstract String getShortName();
+
+  public abstract String getSuffixChar();
+
+  public abstract double getDefault(double value);
+
+  public abstract double fromBytes(double value);
+
+  public String toString() {
+    return getLongName();
+  }
+
+}

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java

@@ -98,7 +98,7 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   /**
   /**
    * CallQueue related settings. These are not used directly, but rather
    * CallQueue related settings. These are not used directly, but rather
    * combined with a namespace and port. For instance:
    * combined with a namespace and port. For instance:
-   * IPC_NAMESPACE + ".9820." + IPC_CALLQUEUE_IMPL_KEY
+   * IPC_NAMESPACE + ".8020." + IPC_CALLQUEUE_IMPL_KEY
    */
    */
   public static final String IPC_NAMESPACE = "ipc";
   public static final String IPC_NAMESPACE = "ipc";
   public static final String IPC_CALLQUEUE_IMPL_KEY = "callqueue.impl";
   public static final String IPC_CALLQUEUE_IMPL_KEY = "callqueue.impl";

+ 251 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java

@@ -38,22 +38,31 @@ import org.slf4j.LoggerFactory;
 
 
 import java.io.BufferedInputStream;
 import java.io.BufferedInputStream;
 import java.io.BufferedOutputStream;
 import java.io.BufferedOutputStream;
+import java.io.BufferedReader;
 import java.io.File;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileInputStream;
 import java.io.FileNotFoundException;
 import java.io.FileNotFoundException;
 import java.io.FileOutputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;
+import java.io.InputStreamReader;
 import java.io.OutputStream;
 import java.io.OutputStream;
 import java.net.InetAddress;
 import java.net.InetAddress;
 import java.net.URI;
 import java.net.URI;
 import java.net.UnknownHostException;
 import java.net.UnknownHostException;
+import java.nio.charset.Charset;
 import java.nio.file.AccessDeniedException;
 import java.nio.file.AccessDeniedException;
+import java.nio.file.FileSystems;
+import java.nio.file.Files;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Enumeration;
 import java.util.Enumeration;
 import java.util.Iterator;
 import java.util.Iterator;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 import java.util.jar.Attributes;
 import java.util.jar.Attributes;
 import java.util.jar.JarOutputStream;
 import java.util.jar.JarOutputStream;
 import java.util.jar.Manifest;
 import java.util.jar.Manifest;
@@ -63,6 +72,7 @@ import java.util.zip.GZIPInputStream;
 import java.util.zip.ZipEntry;
 import java.util.zip.ZipEntry;
 import java.util.zip.ZipFile;
 import java.util.zip.ZipFile;
 import java.util.zip.ZipOutputStream;
 import java.util.zip.ZipOutputStream;
+import java.util.zip.ZipInputStream;
 
 
 /**
 /**
  * A collection of file-processing util methods.
  * A collection of file-processing util methods.
@@ -536,6 +546,22 @@ public class FileUtil {
     return makeShellPath(file, false);
     return makeShellPath(file, false);
   }
   }
 
 
+  /**
+   * Convert a os-native filename to a path that works for the shell
+   * and avoids script injection attacks.
+   * @param file The filename to convert
+   * @return The unix pathname
+   * @throws IOException on windows, there can be problems with the subprocess
+   */
+  public static String makeSecureShellPath(File file) throws IOException {
+    if (Shell.WINDOWS) {
+      // Currently it is never called, but it might be helpful in the future.
+      throw new UnsupportedOperationException("Not implemented for Windows");
+    } else {
+      return makeShellPath(file, false).replace("'", "'\\''");
+    }
+  }
+
   /**
   /**
    * Convert a os-native filename to a path that works for the shell.
    * Convert a os-native filename to a path that works for the shell.
    * @param file The filename to convert
    * @param file The filename to convert
@@ -587,6 +613,7 @@ public class FileUtil {
   }
   }
 
 
   /**
   /**
+<<<<<<< HEAD
    * creates zip archieve of the source dir and writes a zip file.
    * creates zip archieve of the source dir and writes a zip file.
    *
    *
    * @param sourceDir - The directory to zip.
    * @param sourceDir - The directory to zip.
@@ -648,9 +675,46 @@ public class FileUtil {
   /**
   /**
    * Given a File input it will unzip the file in a the unzip directory
    * Given a File input it will unzip the file in a the unzip directory
    * passed as the second parameter
    * passed as the second parameter
+   * @param inputStream The zip file as input
+   * @param toDir The unzip directory where to unzip the zip file.
+   * @throws IOException an exception occurred
+   */
+  public static void unZip(InputStream inputStream, File toDir)
+      throws IOException {
+    try (ZipInputStream zip = new ZipInputStream(inputStream)) {
+      int numOfFailedLastModifiedSet = 0;
+      for(ZipEntry entry = zip.getNextEntry();
+          entry != null;
+          entry = zip.getNextEntry()) {
+        if (!entry.isDirectory()) {
+          File file = new File(toDir, entry.getName());
+          File parent = file.getParentFile();
+          if (!parent.mkdirs() &&
+              !parent.isDirectory()) {
+            throw new IOException("Mkdirs failed to create " +
+                parent.getAbsolutePath());
+          }
+          try (OutputStream out = new FileOutputStream(file)) {
+            IOUtils.copyBytes(zip, out, BUFFER_SIZE);
+          }
+          if (!file.setLastModified(entry.getTime())) {
+            numOfFailedLastModifiedSet++;
+          }
+        }
+      }
+      if (numOfFailedLastModifiedSet > 0) {
+        LOG.warn("Could not set last modfied time for {} file(s)",
+            numOfFailedLastModifiedSet);
+      }
+    }
+  }
+
+  /**
+   * Given a File input it will unzip it in the unzip directory.
+   * passed as the second parameter
    * @param inFile The zip file as input
    * @param inFile The zip file as input
    * @param unzipDir The unzip directory where to unzip the zip file.
    * @param unzipDir The unzip directory where to unzip the zip file.
-   * @throws IOException
+   * @throws IOException An I/O exception has occurred
    */
    */
   public static void unZip(File inFile, File unzipDir) throws IOException {
   public static void unZip(File inFile, File unzipDir) throws IOException {
     Enumeration<? extends ZipEntry> entries;
     Enumeration<? extends ZipEntry> entries;
@@ -690,6 +754,138 @@ public class FileUtil {
     }
     }
   }
   }
 
 
+  /**
+   * Run a command and send the contents of an input stream to it.
+   * @param inputStream Input stream to forward to the shell command
+   * @param command shell command to run
+   * @throws IOException read or write failed
+   * @throws InterruptedException command interrupted
+   * @throws ExecutionException task submit failed
+   */
+  private static void runCommandOnStream(
+      InputStream inputStream, String command)
+      throws IOException, InterruptedException, ExecutionException {
+    ExecutorService executor = null;
+    ProcessBuilder builder = new ProcessBuilder();
+    builder.command(
+        Shell.WINDOWS ? "cmd" : "bash",
+        Shell.WINDOWS ? "/c" : "-c",
+        command);
+    Process process = builder.start();
+    int exitCode;
+    try {
+      // Consume stdout and stderr, to avoid blocking the command
+      executor = Executors.newFixedThreadPool(2);
+      Future output = executor.submit(() -> {
+        try {
+          // Read until the output stream receives an EOF and closed.
+          if (LOG.isDebugEnabled()) {
+            // Log directly to avoid out of memory errors
+            try (BufferedReader reader =
+                     new BufferedReader(
+                         new InputStreamReader(process.getInputStream(),
+                             Charset.forName("UTF-8")))) {
+              String line;
+              while((line = reader.readLine()) != null) {
+                LOG.debug(line);
+              }
+            }
+          } else {
+            org.apache.commons.io.IOUtils.copy(
+                process.getInputStream(),
+                new IOUtils.NullOutputStream());
+          }
+        } catch (IOException e) {
+          LOG.debug(e.getMessage());
+        }
+      });
+      Future error = executor.submit(() -> {
+        try {
+          // Read until the error stream receives an EOF and closed.
+          if (LOG.isDebugEnabled()) {
+            // Log directly to avoid out of memory errors
+            try (BufferedReader reader =
+                     new BufferedReader(
+                         new InputStreamReader(process.getErrorStream(),
+                             Charset.forName("UTF-8")))) {
+              String line;
+              while((line = reader.readLine()) != null) {
+                LOG.debug(line);
+              }
+            }
+          } else {
+            org.apache.commons.io.IOUtils.copy(
+                process.getErrorStream(),
+                new IOUtils.NullOutputStream());
+          }
+        } catch (IOException e) {
+          LOG.debug(e.getMessage());
+        }
+      });
+
+      // Pass the input stream to the command to process
+      try {
+        org.apache.commons.io.IOUtils.copy(
+            inputStream, process.getOutputStream());
+      } finally {
+        process.getOutputStream().close();
+      }
+
+      // Wait for both stdout and stderr futures to finish
+      error.get();
+      output.get();
+    } finally {
+      // Clean up the threads
+      if (executor != null) {
+        executor.shutdown();
+      }
+      // Wait to avoid leaking the child process
+      exitCode = process.waitFor();
+    }
+
+    if (exitCode != 0) {
+      throw new IOException(
+          String.format(
+              "Error executing command. %s " +
+                  "Process exited with exit code %d.",
+              command, exitCode));
+    }
+  }
+
+  /**
+   * Given a Tar File as input it will untar the file in a the untar directory
+   * passed as the second parameter
+   *
+   * This utility will untar ".tar" files and ".tar.gz","tgz" files.
+   *
+   * @param inputStream The tar file as input.
+   * @param untarDir The untar directory where to untar the tar file.
+   * @param gzipped The input stream is gzipped
+   *                TODO Use magic number and PusbackInputStream to identify
+   * @throws IOException an exception occurred
+   * @throws InterruptedException command interrupted
+   * @throws ExecutionException task submit failed
+   */
+  public static void unTar(InputStream inputStream, File untarDir,
+                           boolean gzipped)
+      throws IOException, InterruptedException, ExecutionException {
+    if (!untarDir.mkdirs()) {
+      if (!untarDir.isDirectory()) {
+        throw new IOException("Mkdirs failed to create " + untarDir);
+      }
+    }
+
+    if(Shell.WINDOWS) {
+      // Tar is not native to Windows. Use simple Java based implementation for
+      // tests and simple tar archives
+      unTarUsingJava(inputStream, untarDir, gzipped);
+    } else {
+      // spawn tar utility to untar archive for full fledged unix behavior such
+      // as resolving symlinks in tar archives
+      unTarUsingTar(inputStream, untarDir, gzipped);
+    }
+  }
+
   /**
   /**
    * Given a Tar File as input it will untar the file in a the untar directory
    * Given a Tar File as input it will untar the file in a the untar directory
    * passed as the second parameter
    * passed as the second parameter
@@ -720,23 +916,41 @@ public class FileUtil {
     }
     }
   }
   }
 
 
+  private static void unTarUsingTar(InputStream inputStream, File untarDir,
+                                    boolean gzipped)
+      throws IOException, InterruptedException, ExecutionException {
+    StringBuilder untarCommand = new StringBuilder();
+    if (gzipped) {
+      untarCommand.append("gzip -dc | (");
+    }
+    untarCommand.append("cd '");
+    untarCommand.append(FileUtil.makeSecureShellPath(untarDir));
+    untarCommand.append("' && ");
+    untarCommand.append("tar -x ");
+
+    if (gzipped) {
+      untarCommand.append(")");
+    }
+    runCommandOnStream(inputStream, untarCommand.toString());
+  }
+
   private static void unTarUsingTar(File inFile, File untarDir,
   private static void unTarUsingTar(File inFile, File untarDir,
       boolean gzipped) throws IOException {
       boolean gzipped) throws IOException {
     StringBuffer untarCommand = new StringBuffer();
     StringBuffer untarCommand = new StringBuffer();
     if (gzipped) {
     if (gzipped) {
       untarCommand.append(" gzip -dc '");
       untarCommand.append(" gzip -dc '");
-      untarCommand.append(FileUtil.makeShellPath(inFile));
+      untarCommand.append(FileUtil.makeSecureShellPath(inFile));
       untarCommand.append("' | (");
       untarCommand.append("' | (");
     }
     }
     untarCommand.append("cd '");
     untarCommand.append("cd '");
-    untarCommand.append(FileUtil.makeShellPath(untarDir));
-    untarCommand.append("' ; ");
+    untarCommand.append(FileUtil.makeSecureShellPath(untarDir));
+    untarCommand.append("' && ");
     untarCommand.append("tar -xf ");
     untarCommand.append("tar -xf ");
 
 
     if (gzipped) {
     if (gzipped) {
       untarCommand.append(" -)");
       untarCommand.append(" -)");
     } else {
     } else {
-      untarCommand.append(FileUtil.makeShellPath(inFile));
+      untarCommand.append(FileUtil.makeSecureShellPath(inFile));
     }
     }
     String[] shellCmd = { "bash", "-c", untarCommand.toString() };
     String[] shellCmd = { "bash", "-c", untarCommand.toString() };
     ShellCommandExecutor shexec = new ShellCommandExecutor(shellCmd);
     ShellCommandExecutor shexec = new ShellCommandExecutor(shellCmd);
@@ -748,7 +962,7 @@ public class FileUtil {
     }
     }
   }
   }
 
 
-  private static void unTarUsingJava(File inFile, File untarDir,
+  static void unTarUsingJava(File inFile, File untarDir,
       boolean gzipped) throws IOException {
       boolean gzipped) throws IOException {
     InputStream inputStream = null;
     InputStream inputStream = null;
     TarArchiveInputStream tis = null;
     TarArchiveInputStream tis = null;
@@ -771,6 +985,29 @@ public class FileUtil {
     }
     }
   }
   }
 
 
+  private static void unTarUsingJava(InputStream inputStream, File untarDir,
+                                     boolean gzipped) throws IOException {
+    TarArchiveInputStream tis = null;
+    try {
+      if (gzipped) {
+        inputStream = new BufferedInputStream(new GZIPInputStream(
+            inputStream));
+      } else {
+        inputStream =
+            new BufferedInputStream(inputStream);
+      }
+
+      tis = new TarArchiveInputStream(inputStream);
+
+      for (TarArchiveEntry entry = tis.getNextTarEntry(); entry != null;) {
+        unpackEntries(tis, entry, untarDir);
+        entry = tis.getNextTarEntry();
+      }
+    } finally {
+      IOUtils.cleanupWithLogger(LOG, tis, inputStream);
+    }
+  }
+
   private static void unpackEntries(TarArchiveInputStream tis,
   private static void unpackEntries(TarArchiveInputStream tis,
       TarArchiveEntry entry, File outputDir) throws IOException {
       TarArchiveEntry entry, File outputDir) throws IOException {
     if (entry.isDirectory()) {
     if (entry.isDirectory()) {
@@ -787,6 +1024,14 @@ public class FileUtil {
       return;
       return;
     }
     }
 
 
+    if (entry.isSymbolicLink()) {
+      // Create symbolic link relative to tar parent dir
+      Files.createSymbolicLink(FileSystems.getDefault()
+              .getPath(outputDir.getPath(), entry.getName()),
+          FileSystems.getDefault().getPath(entry.getLinkName()));
+      return;
+    }
+
     File outputFile = new File(outputDir, entry.getName());
     File outputFile = new File(outputDir, entry.getName());
     if (!outputFile.getParentFile().exists()) {
     if (!outputFile.getParentFile().exists()) {
       if (!outputFile.getParentFile().mkdirs()) {
       if (!outputFile.getParentFile().mkdirs()) {

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java

@@ -139,7 +139,7 @@ public class LocalFileSystem extends ChecksumFileSystem {
           LOG.warn("Ignoring failure of renameTo");
           LOG.warn("Ignoring failure of renameTo");
         }
         }
     } catch (IOException e) {
     } catch (IOException e) {
-      LOG.warn("Error moving bad file " + p + ": " + e);
+      LOG.warn("Error moving bad file " + p, e);
     }
     }
     return false;
     return false;
   }
   }

+ 12 - 18
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java

@@ -888,9 +888,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
       Stat oldBreadcrumbStat = fenceOldActive();
       Stat oldBreadcrumbStat = fenceOldActive();
       writeBreadCrumbNode(oldBreadcrumbStat);
       writeBreadCrumbNode(oldBreadcrumbStat);
 
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Becoming active for " + this);
-      }
+      LOG.debug("Becoming active for {}", this);
+
       appClient.becomeActive();
       appClient.becomeActive();
       state = State.ACTIVE;
       state = State.ACTIVE;
       return true;
       return true;
@@ -910,8 +909,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
       throws KeeperException, InterruptedException {
       throws KeeperException, InterruptedException {
     Preconditions.checkState(appData != null, "no appdata");
     Preconditions.checkState(appData != null, "no appdata");
     
     
-    LOG.info("Writing znode " + zkBreadCrumbPath +
-        " to indicate that the local node is the most recent active...");
+    LOG.info("Writing znode {} to indicate that the local " +
+        "node is the most recent active...", zkBreadCrumbPath);
     if (oldBreadcrumbStat == null) {
     if (oldBreadcrumbStat == null) {
       // No previous active, just create the node
       // No previous active, just create the node
       createWithRetries(zkBreadCrumbPath, appData, zkAcl,
       createWithRetries(zkBreadCrumbPath, appData, zkAcl,
@@ -948,9 +947,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
       
       
       deleteWithRetries(zkBreadCrumbPath, stat.getVersion());
       deleteWithRetries(zkBreadCrumbPath, stat.getVersion());
     } catch (Exception e) {
     } catch (Exception e) {
-      LOG.warn("Unable to delete our own bread-crumb of being active at " +
-          zkBreadCrumbPath + ": " + e.getLocalizedMessage() + ". " +
-          "Expecting to be fenced by the next active.");
+      LOG.warn("Unable to delete our own bread-crumb of being active at {}." +
+          ". Expecting to be fenced by the next active.", zkBreadCrumbPath, e);
     }
     }
   }
   }
 
 
@@ -984,7 +982,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
       throw ke;
       throw ke;
     }
     }
 
 
-    LOG.info("Old node exists: " + StringUtils.byteToHexString(data));
+    LOG.info("Old node exists: {}", StringUtils.byteToHexString(data));
     if (Arrays.equals(data, appData)) {
     if (Arrays.equals(data, appData)) {
       LOG.info("But old node has our own data, so don't need to fence it.");
       LOG.info("But old node has our own data, so don't need to fence it.");
     } else {
     } else {
@@ -995,9 +993,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
 
 
   private void becomeStandby() {
   private void becomeStandby() {
     if (state != State.STANDBY) {
     if (state != State.STANDBY) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Becoming standby for " + this);
-      }
+      LOG.debug("Becoming standby for {}", this);
       state = State.STANDBY;
       state = State.STANDBY;
       appClient.becomeStandby();
       appClient.becomeStandby();
     }
     }
@@ -1005,9 +1001,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
 
 
   private void enterNeutralMode() {
   private void enterNeutralMode() {
     if (state != State.NEUTRAL) {
     if (state != State.NEUTRAL) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Entering neutral mode for " + this);
-      }
+      LOG.debug("Entering neutral mode for {}", this);
       state = State.NEUTRAL;
       state = State.NEUTRAL;
       appClient.enterNeutralMode();
       appClient.enterNeutralMode();
     }
     }
@@ -1124,7 +1118,7 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
   private synchronized boolean isStaleClient(Object ctx) {
   private synchronized boolean isStaleClient(Object ctx) {
     Preconditions.checkNotNull(ctx);
     Preconditions.checkNotNull(ctx);
     if (zkClient != (ZooKeeper)ctx) {
     if (zkClient != (ZooKeeper)ctx) {
-      LOG.warn("Ignoring stale result from old client with sessionId " +
+      LOG.warn("Ignoring stale result from old client with sessionId {}",
           String.format("0x%08x", ((ZooKeeper)ctx).getSessionId()));
           String.format("0x%08x", ((ZooKeeper)ctx).getSessionId()));
       return true;
       return true;
     }
     }
@@ -1162,8 +1156,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
         throws KeeperException, IOException {
         throws KeeperException, IOException {
       try {
       try {
         if (!hasReceivedEvent.await(connectionTimeoutMs, TimeUnit.MILLISECONDS)) {
         if (!hasReceivedEvent.await(connectionTimeoutMs, TimeUnit.MILLISECONDS)) {
-          LOG.error("Connection timed out: couldn't connect to ZooKeeper in "
-              + connectionTimeoutMs + " milliseconds");
+          LOG.error("Connection timed out: couldn't connect to ZooKeeper in " +
+              "{} milliseconds", connectionTimeoutMs);
           zk.close();
           zk.close();
           throw KeeperException.create(Code.CONNECTIONLOSS);
           throw KeeperException.create(Code.CONNECTIONLOSS);
         }
         }

+ 10 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java

@@ -123,7 +123,7 @@ public class FailoverController {
       toSvcStatus = toSvc.getServiceStatus();
       toSvcStatus = toSvc.getServiceStatus();
     } catch (IOException e) {
     } catch (IOException e) {
       String msg = "Unable to get service state for " + target;
       String msg = "Unable to get service state for " + target;
-      LOG.error(msg + ": " + e.getLocalizedMessage());
+      LOG.error(msg, e);
       throw new FailoverFailedException(msg, e);
       throw new FailoverFailedException(msg, e);
     }
     }
 
 
@@ -139,7 +139,7 @@ public class FailoverController {
             target + " is not ready to become active: " +
             target + " is not ready to become active: " +
             notReadyReason);
             notReadyReason);
       } else {
       } else {
-        LOG.warn("Service is not ready to become active, but forcing: " +
+        LOG.warn("Service is not ready to become active, but forcing: {}",
             notReadyReason);
             notReadyReason);
       }
       }
     }
     }
@@ -172,11 +172,11 @@ public class FailoverController {
       proxy.transitionToStandby(createReqInfo());
       proxy.transitionToStandby(createReqInfo());
       return true;
       return true;
     } catch (ServiceFailedException sfe) {
     } catch (ServiceFailedException sfe) {
-      LOG.warn("Unable to gracefully make " + svc + " standby (" +
-          sfe.getMessage() + ")");
+      LOG.warn("Unable to gracefully make {} standby ({})",
+          svc, sfe.getMessage());
     } catch (IOException ioe) {
     } catch (IOException ioe) {
-      LOG.warn("Unable to gracefully make " + svc +
-          " standby (unable to connect)", ioe);
+      LOG.warn("Unable to gracefully make {} standby (unable to connect)",
+          svc, ioe);
     } finally {
     } finally {
       if (proxy != null) {
       if (proxy != null) {
         RPC.stopProxy(proxy);
         RPC.stopProxy(proxy);
@@ -227,13 +227,13 @@ public class FailoverController {
           toSvc.getProxy(conf, rpcTimeoutToNewActive),
           toSvc.getProxy(conf, rpcTimeoutToNewActive),
           createReqInfo());
           createReqInfo());
     } catch (ServiceFailedException sfe) {
     } catch (ServiceFailedException sfe) {
-      LOG.error("Unable to make " + toSvc + " active (" +
-          sfe.getMessage() + "). Failing back.");
+      LOG.error("Unable to make {} active ({}). Failing back.",
+          toSvc, sfe.getMessage());
       failed = true;
       failed = true;
       cause = sfe;
       cause = sfe;
     } catch (IOException ioe) {
     } catch (IOException ioe) {
-      LOG.error("Unable to make " + toSvc +
-          " active (unable to connect). Failing back.", ioe);
+      LOG.error("Unable to make {} active (unable to connect). Failing back.",
+          toSvc, ioe);
       failed = true;
       failed = true;
       cause = ioe;
       cause = ioe;
     }
     }

+ 4 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java

@@ -204,12 +204,11 @@ public class HealthMonitor {
         healthy = true;
         healthy = true;
       } catch (Throwable t) {
       } catch (Throwable t) {
         if (isHealthCheckFailedException(t)) {
         if (isHealthCheckFailedException(t)) {
-          LOG.warn("Service health check failed for " + targetToMonitor
-              + ": " + t.getMessage());
+          LOG.warn("Service health check failed for {}", targetToMonitor, t);
           enterState(State.SERVICE_UNHEALTHY);
           enterState(State.SERVICE_UNHEALTHY);
         } else {
         } else {
-          LOG.warn("Transport-level exception trying to monitor health of " +
-              targetToMonitor + ": " + t.getCause() + " " + t.getLocalizedMessage());
+          LOG.warn("Transport-level exception trying to monitor health of {}",
+              targetToMonitor, t);
           RPC.stopProxy(proxy);
           RPC.stopProxy(proxy);
           proxy = null;
           proxy = null;
           enterState(State.SERVICE_NOT_RESPONDING);
           enterState(State.SERVICE_NOT_RESPONDING);
@@ -246,7 +245,7 @@ public class HealthMonitor {
 
 
   private synchronized void enterState(State newState) {
   private synchronized void enterState(State newState) {
     if (newState != state) {
     if (newState != state) {
-      LOG.info("Entering state " + newState);
+      LOG.info("Entering state {}", newState);
       state = newState;
       state = newState;
       synchronized (callbacks) {
       synchronized (callbacks) {
         for (Callback cb : callbacks) {
         for (Callback cb : callbacks) {

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java

@@ -136,7 +136,7 @@ public final class HttpServer2 implements FilterContainer {
   public static final String HTTP_MAX_THREADS_KEY = "hadoop.http.max.threads";
   public static final String HTTP_MAX_THREADS_KEY = "hadoop.http.max.threads";
   public static final String HTTP_TEMP_DIR_KEY = "hadoop.http.temp.dir";
   public static final String HTTP_TEMP_DIR_KEY = "hadoop.http.temp.dir";
 
 
-  static final String FILTER_INITIALIZER_PROPERTY
+  public static final String FILTER_INITIALIZER_PROPERTY
       = "hadoop.http.filter.initializers";
       = "hadoop.http.filter.initializers";
 
 
   // The ServletContext attribute where the daemon Configuration
   // The ServletContext attribute where the daemon Configuration

+ 4 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryUtils.java

@@ -72,9 +72,7 @@ public class RetryUtils {
             retryPolicySpecKey, defaultRetryPolicySpec
             retryPolicySpecKey, defaultRetryPolicySpec
             );
             );
     
     
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("multipleLinearRandomRetry = " + multipleLinearRandomRetry);
-    }
+    LOG.debug("multipleLinearRandomRetry = {}", multipleLinearRandomRetry);
 
 
     if (multipleLinearRandomRetry == null) {
     if (multipleLinearRandomRetry == null) {
       //no retry
       //no retry
@@ -124,10 +122,9 @@ public class RetryUtils {
         p = RetryPolicies.TRY_ONCE_THEN_FAIL;
         p = RetryPolicies.TRY_ONCE_THEN_FAIL;
       }
       }
 
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("RETRY " + retries + ") policy="
-            + p.getClass().getSimpleName() + ", exception=" + e);
-      }
+      LOG.debug("RETRY {}) policy={}", retries,
+            p.getClass().getSimpleName(), e);
+
       return p.shouldRetry(e, retries, failovers, isMethodIdempotent);
       return p.shouldRetry(e, retries, failovers, isMethodIdempotent);
     }
     }
 
 

+ 19 - 20
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java

@@ -179,8 +179,7 @@ public class DNS {
         netIf = getSubinterface(strInterface);
         netIf = getSubinterface(strInterface);
       }
       }
     } catch (SocketException e) {
     } catch (SocketException e) {
-      LOG.warn("I/O error finding interface " + strInterface +
-          ": " + e.getMessage());
+      LOG.warn("I/O error finding interface {}", strInterface, e);
       return new String[] { cachedHostAddress };
       return new String[] { cachedHostAddress };
     }
     }
     if (netIf == null) {
     if (netIf == null) {
@@ -265,7 +264,7 @@ public class DNS {
     }
     }
 
 
     if (hosts.isEmpty()) {
     if (hosts.isEmpty()) {
-      LOG.warn("Unable to determine hostname for interface " +
+      LOG.warn("Unable to determine hostname for interface {}",
           strInterface);
           strInterface);
       hosts.add(cachedHostname);
       hosts.add(cachedHostname);
     }
     }
@@ -283,8 +282,8 @@ public class DNS {
     try {
     try {
       localhost = InetAddress.getLocalHost().getCanonicalHostName();
       localhost = InetAddress.getLocalHost().getCanonicalHostName();
     } catch (UnknownHostException e) {
     } catch (UnknownHostException e) {
-      LOG.warn("Unable to determine local hostname "
-          + "-falling back to \"" + LOCALHOST + "\"", e);
+      LOG.warn("Unable to determine local hostname -falling back to '{}'",
+          LOCALHOST, e);
       localhost = LOCALHOST;
       localhost = LOCALHOST;
     }
     }
     return localhost;
     return localhost;
@@ -303,21 +302,21 @@ public class DNS {
    */
    */
   private static String resolveLocalHostIPAddress() {
   private static String resolveLocalHostIPAddress() {
     String address;
     String address;
+    try {
+      address = InetAddress.getLocalHost().getHostAddress();
+    } catch (UnknownHostException e) {
+      LOG.warn("Unable to determine address of the host " +
+          "-falling back to '{}' address", LOCALHOST, e);
       try {
       try {
-        address = InetAddress.getLocalHost().getHostAddress();
-      } catch (UnknownHostException e) {
-        LOG.warn("Unable to determine address of the host"
-                + "-falling back to \"" + LOCALHOST + "\" address", e);
-        try {
-          address = InetAddress.getByName(LOCALHOST).getHostAddress();
-        } catch (UnknownHostException noLocalHostAddressException) {
-          //at this point, deep trouble
-          LOG.error("Unable to determine local loopback address "
-                  + "of \"" + LOCALHOST + "\" " +
-                  "-this system's network configuration is unsupported", e);
-          address = null;
-        }
+        address = InetAddress.getByName(LOCALHOST).getHostAddress();
+      } catch (UnknownHostException noLocalHostAddressException) {
+        //at this point, deep trouble
+        LOG.error("Unable to determine local loopback address of '{}' " +
+            "-this system's network configuration is unsupported",
+            LOCALHOST, e);
+        address = null;
       }
       }
+    }
     return address;
     return address;
   }
   }
 
 
@@ -431,8 +430,8 @@ public class DNS {
         netIf = getSubinterface(strInterface);
         netIf = getSubinterface(strInterface);
       }
       }
     } catch (SocketException e) {
     } catch (SocketException e) {
-      LOG.warn("I/O error finding interface " + strInterface +
-          ": " + e.getMessage());
+      LOG.warn("I/O error finding interface {}: {}",
+          strInterface, e.getMessage());
       return Arrays.asList(InetAddress.getByName(cachedHostAddress));
       return Arrays.asList(InetAddress.getByName(cachedHostAddress));
     }
     }
     if (netIf == null) {
     if (netIf == null) {

+ 6 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java

@@ -22,6 +22,7 @@ import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
 import java.io.IOException;
 import java.security.MessageDigest;
 import java.security.MessageDigest;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Iterator;
@@ -627,6 +628,11 @@ extends AbstractDelegationTokenIdentifier>
       }
       }
     }
     }
     // don't hold lock on 'this' to avoid edit log updates blocking token ops
     // don't hold lock on 'this' to avoid edit log updates blocking token ops
+    logExpireTokens(expiredTokens);
+  }
+
+  protected void logExpireTokens(
+      Collection<TokenIdent> expiredTokens) throws IOException {
     for (TokenIdent ident : expiredTokens) {
     for (TokenIdent ident : expiredTokens) {
       logExpireToken(ident);
       logExpireToken(ident);
       LOG.info("Removing expired token " + formatTokenId(ident));
       LOG.info("Removing expired token " + formatTokenId(ident));

+ 8 - 19
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/AbstractService.java

@@ -194,9 +194,7 @@ public abstract class AbstractService implements Service {
           serviceStart();
           serviceStart();
           if (isInState(STATE.STARTED)) {
           if (isInState(STATE.STARTED)) {
             //if the service started (and isn't now in a later state), notify
             //if the service started (and isn't now in a later state), notify
-            if (LOG.isDebugEnabled()) {
-              LOG.debug("Service " + getName() + " is started");
-            }
+            LOG.debug("Service {} is started", getName());
             notifyListeners();
             notifyListeners();
           }
           }
         } catch (Exception e) {
         } catch (Exception e) {
@@ -235,9 +233,7 @@ public abstract class AbstractService implements Service {
         }
         }
       } else {
       } else {
         //already stopped: note it
         //already stopped: note it
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Ignoring re-entrant call to stop()");
-        }
+        LOG.debug("Ignoring re-entrant call to stop()");
       }
       }
     }
     }
   }
   }
@@ -258,9 +254,7 @@ public abstract class AbstractService implements Service {
    * @param exception the exception
    * @param exception the exception
    */
    */
   protected final void noteFailure(Exception exception) {
   protected final void noteFailure(Exception exception) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("noteFailure " + exception, (Throwable) null);
-    }
+    LOG.debug("noteFailure {}" + exception);
     if (exception == null) {
     if (exception == null) {
       //make sure failure logic doesn't itself cause problems
       //make sure failure logic doesn't itself cause problems
       return;
       return;
@@ -270,10 +264,8 @@ public abstract class AbstractService implements Service {
       if (failureCause == null) {
       if (failureCause == null) {
         failureCause = exception;
         failureCause = exception;
         failureState = getServiceState();
         failureState = getServiceState();
-        LOG.info("Service " + getName()
-                 + " failed in state " + failureState
-                 + "; cause: " + exception,
-                 exception);
+        LOG.info("Service {} failed in state {}",
+            getName(), failureState, exception);
       }
       }
     }
     }
   }
   }
@@ -418,8 +410,7 @@ public abstract class AbstractService implements Service {
       listeners.notifyListeners(this);
       listeners.notifyListeners(this);
       globalListeners.notifyListeners(this);
       globalListeners.notifyListeners(this);
     } catch (Throwable e) {
     } catch (Throwable e) {
-      LOG.warn("Exception while notifying listeners of " + this + ": " + e,
-               e);
+      LOG.warn("Exception while notifying listeners of {}", this, e);
     }
     }
   }
   }
 
 
@@ -449,10 +440,8 @@ public abstract class AbstractService implements Service {
     assert stateModel != null : "null state in " + name + " " + this.getClass();
     assert stateModel != null : "null state in " + name + " " + this.getClass();
     STATE oldState = stateModel.enterState(newState);
     STATE oldState = stateModel.enterState(newState);
     if (oldState != newState) {
     if (oldState != newState) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug(
-          "Service: " + getName() + " entered state " + getServiceState());
-      }
+      LOG.debug("Service: {} entered state {}", getName(), getServiceState());
+
       recordLifecycleEvent();
       recordLifecycleEvent();
     }
     }
     return oldState;
     return oldState;

+ 2 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java

@@ -81,9 +81,7 @@ public final class ServiceOperations {
     try {
     try {
       stop(service);
       stop(service);
     } catch (Exception e) {
     } catch (Exception e) {
-      log.warn("When stopping the service " + service.getName()
-               + " : " + e,
-               e);
+      log.warn("When stopping the service " + service.getName(), e);
       return e;
       return e;
     }
     }
     return null;
     return null;
@@ -103,7 +101,7 @@ public final class ServiceOperations {
     try {
     try {
       stop(service);
       stop(service);
     } catch (Exception e) {
     } catch (Exception e) {
-      log.warn("When stopping the service {} : {}", service.getName(), e, e);
+      log.warn("When stopping the service {}", service.getName(), e);
       return e;
       return e;
     }
     }
     return null;
     return null;

+ 59 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CombinedIPList.java

@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Util class to stores ips/hosts/subnets.
+ */
+public class CombinedIPList implements IPList {
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(CombinedIPList.class);
+
+  private final IPList[] networkLists;
+
+  public CombinedIPList(String fixedBlackListFile,
+      String variableBlackListFile, long cacheExpiryInSeconds) {
+
+    IPList fixedNetworkList = new FileBasedIPList(fixedBlackListFile);
+    if (variableBlackListFile != null) {
+      IPList variableNetworkList = new CacheableIPList(
+          new FileBasedIPList(variableBlackListFile), cacheExpiryInSeconds);
+      networkLists = new IPList[]{fixedNetworkList, variableNetworkList};
+    } else {
+      networkLists = new IPList[]{fixedNetworkList};
+    }
+  }
+
+  @Override
+  public boolean isIn(String ipAddress) {
+    if (ipAddress == null) {
+      throw new IllegalArgumentException("ipAddress is null");
+    }
+
+    for (IPList networkList : networkLists) {
+      if (networkList.isIn(ipAddress)) {
+        return true;
+      }
+    }
+    return false;
+  }
+}

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java

@@ -83,11 +83,11 @@ import org.slf4j.LoggerFactory;
  *
  *
  * <p>Examples:</p>
  * <p>Examples:</p>
  * <p><blockquote><pre>
  * <p><blockquote><pre>
- * $ bin/hadoop dfs -fs darwin:9820 -ls /data
- * list /data directory in dfs with namenode darwin:9820
+ * $ bin/hadoop dfs -fs darwin:8020 -ls /data
+ * list /data directory in dfs with namenode darwin:8020
  * 
  * 
- * $ bin/hadoop dfs -D fs.default.name=darwin:9820 -ls /data
- * list /data directory in dfs with namenode darwin:9820
+ * $ bin/hadoop dfs -D fs.default.name=darwin:8020 -ls /data
+ * list /data directory in dfs with namenode darwin:8020
  *     
  *     
  * $ bin/hadoop dfs -conf core-site.xml -conf hdfs-site.xml -ls /data
  * $ bin/hadoop dfs -conf core-site.xml -conf hdfs-site.xml -ls /data
  * list /data directory in dfs with multiple conf files specified.
  * list /data directory in dfs with multiple conf files specified.

+ 8 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java

@@ -89,6 +89,14 @@ public class JsonSerialization<T> {
     return classType.getSimpleName();
     return classType.getSimpleName();
   }
   }
 
 
+  /**
+   * Get the mapper of this class.
+   * @return the mapper
+   */
+  public ObjectMapper getMapper() {
+    return mapper;
+  }
+
   /**
   /**
    * Convert from JSON.
    * Convert from JSON.
    *
    *

+ 69 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/RunJar.java

@@ -34,13 +34,16 @@ import java.util.Enumeration;
 import java.util.List;
 import java.util.List;
 import java.util.jar.JarEntry;
 import java.util.jar.JarEntry;
 import java.util.jar.JarFile;
 import java.util.jar.JarFile;
+import java.util.jar.JarInputStream;
 import java.util.jar.Manifest;
 import java.util.jar.Manifest;
 import java.util.regex.Pattern;
 import java.util.regex.Pattern;
 
 
+import org.apache.commons.io.input.TeeInputStream;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.IOUtils.NullOutputStream;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -94,6 +97,72 @@ public class RunJar {
     unJar(jarFile, toDir, MATCH_ANY);
     unJar(jarFile, toDir, MATCH_ANY);
   }
   }
 
 
+  /**
+   * Unpack matching files from a jar. Entries inside the jar that do
+   * not match the given pattern will be skipped.
+   *
+   * @param inputStream the jar stream to unpack
+   * @param toDir the destination directory into which to unpack the jar
+   * @param unpackRegex the pattern to match jar entries against
+   *
+   * @throws IOException if an I/O error has occurred or toDir
+   * cannot be created and does not already exist
+   */
+  public static void unJar(InputStream inputStream, File toDir,
+                           Pattern unpackRegex)
+      throws IOException {
+    try (JarInputStream jar = new JarInputStream(inputStream)) {
+      int numOfFailedLastModifiedSet = 0;
+      for (JarEntry entry = jar.getNextJarEntry();
+           entry != null;
+           entry = jar.getNextJarEntry()) {
+        if (!entry.isDirectory() &&
+            unpackRegex.matcher(entry.getName()).matches()) {
+          File file = new File(toDir, entry.getName());
+          ensureDirectory(file.getParentFile());
+          try (OutputStream out = new FileOutputStream(file)) {
+            IOUtils.copyBytes(jar, out, BUFFER_SIZE);
+          }
+          if (!file.setLastModified(entry.getTime())) {
+            numOfFailedLastModifiedSet++;
+          }
+        }
+      }
+      if (numOfFailedLastModifiedSet > 0) {
+        LOG.warn("Could not set last modfied time for {} file(s)",
+            numOfFailedLastModifiedSet);
+      }
+      // ZipInputStream does not need the end of the file. Let's read it out.
+      // This helps with an additional TeeInputStream on the input.
+      IOUtils.copyBytes(inputStream, new NullOutputStream(), BUFFER_SIZE);
+    }
+  }
+
+  /**
+   * Unpack matching files from a jar. Entries inside the jar that do
+   * not match the given pattern will be skipped. Keep also a copy
+   * of the entire jar in the same directory for backward compatibility.
+   * TODO remove this feature in a new release and do only unJar
+   *
+   * @param inputStream the jar stream to unpack
+   * @param toDir the destination directory into which to unpack the jar
+   * @param unpackRegex the pattern to match jar entries against
+   *
+   * @throws IOException if an I/O error has occurred or toDir
+   * cannot be created and does not already exist
+   */
+  @Deprecated
+  public static void unJarAndSave(InputStream inputStream, File toDir,
+                           String name, Pattern unpackRegex)
+      throws IOException{
+    File file = new File(toDir, name);
+    ensureDirectory(toDir);
+    try (OutputStream jar = new FileOutputStream(file);
+         TeeInputStream teeInputStream = new TeeInputStream(inputStream, jar)) {
+      unJar(teeInputStream, toDir, unpackRegex);
+    }
+  }
+
   /**
   /**
    * Unpack matching files from a jar. Entries inside the jar that do
    * Unpack matching files from a jar. Entries inside the jar that do
    * not match the given pattern will be skipped.
    * not match the given pattern will be skipped.

+ 7 - 6
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -977,20 +977,21 @@
 </property>
 </property>
 
 
 <property>
 <property>
-  <name>fs.s3a.assumed.role.session.duration</name>
-  <value>30m</value>
+  <name>fs.s3a.assumed.role.policy</name>
+  <value/>
   <description>
   <description>
-    Duration of assumed roles before a refresh is attempted.
+    JSON policy to apply to the role.
     Only used if AssumedRoleCredentialProvider is the AWS credential provider.
     Only used if AssumedRoleCredentialProvider is the AWS credential provider.
   </description>
   </description>
 </property>
 </property>
 
 
 <property>
 <property>
-  <name>fs.s3a.assumed.role.policy</name>
-  <value/>
+  <name>fs.s3a.assumed.role.session.duration</name>
+  <value>30m</value>
   <description>
   <description>
-    JSON policy containing more restrictions to apply to the role.
+    Duration of assumed roles before a refresh is attempted.
     Only used if AssumedRoleCredentialProvider is the AWS credential provider.
     Only used if AssumedRoleCredentialProvider is the AWS credential provider.
+    Range: 15m to 1h
   </description>
   </description>
 </property>
 </property>
 
 

+ 6 - 0
hadoop-common-project/hadoop-common/src/site/markdown/CommandsManual.md

@@ -187,6 +187,12 @@ user name.
 
 
 Example: `hadoop kerbname user@EXAMPLE.COM`
 Example: `hadoop kerbname user@EXAMPLE.COM`
 
 
+### `kdiag`
+
+Usage: `hadoop kdiag`
+
+Diagnose Kerberos Problems
+
 ### `key`
 ### `key`
 
 
 Usage: `hadoop key <subcommand> [options]`
 Usage: `hadoop key <subcommand> [options]`

+ 12 - 20
hadoop-common-project/hadoop-common/src/site/markdown/SecureMode.md

@@ -435,17 +435,12 @@ or a specific principal in a named keytab.
 The output of the command can be used for local diagnostics, or forwarded to
 The output of the command can be used for local diagnostics, or forwarded to
 whoever supports the cluster.
 whoever supports the cluster.
 
 
-The `KDiag` command has its own entry point; it is currently not hooked up
-to the end-user CLI.
-
-It is invoked simply by passing its full classname to one of the `bin/hadoop`,
-`bin/hdfs` or `bin/yarn` commands. Accordingly, it will display the kerberos client
-state of the command used to invoke it.
+The `KDiag` command has its own entry point; It is invoked by passing `kdiag` to
+`bin/hadoop` command. Accordingly, it will display the kerberos client state
+of the command used to invoke it.
 
 
 ```
 ```
-hadoop org.apache.hadoop.security.KDiag
-hdfs org.apache.hadoop.security.KDiag
-yarn org.apache.hadoop.security.KDiag
+hadoop kdiag
 ```
 ```
 
 
 The command returns a status code of 0 for a successful diagnostics run.
 The command returns a status code of 0 for a successful diagnostics run.
@@ -525,7 +520,7 @@ some basic Kerberos preconditions.
 #### `--out outfile`: Write output to file.
 #### `--out outfile`: Write output to file.
 
 
 ```
 ```
-hadoop org.apache.hadoop.security.KDiag --out out.txt
+hadoop kdiag --out out.txt
 ```
 ```
 
 
 Much of the diagnostics information comes from the JRE (to `stderr`) and
 Much of the diagnostics information comes from the JRE (to `stderr`) and
@@ -534,7 +529,7 @@ To get all the output, it is best to redirect both these output streams
 to the same file, and omit the `--out` option.
 to the same file, and omit the `--out` option.
 
 
 ```
 ```
-hadoop org.apache.hadoop.security.KDiag --keytab zk.service.keytab --principal zookeeper/devix.example.org@REALM > out.txt 2>&1
+hadoop kdiag --keytab zk.service.keytab --principal zookeeper/devix.example.org@REALM > out.txt 2>&1
 ```
 ```
 
 
 Even there, the output of the two streams, emitted across multiple threads, can
 Even there, the output of the two streams, emitted across multiple threads, can
@@ -543,15 +538,12 @@ name in the Log4j output to distinguish background threads from the main thread
 helps at the hadoop level, but doesn't assist in JVM-level logging.
 helps at the hadoop level, but doesn't assist in JVM-level logging.
 
 
 #### `--resource <resource>` : XML configuration resource to load.
 #### `--resource <resource>` : XML configuration resource to load.
-
-When using the `hdfs` and `yarn` commands, it is often useful to force
-load the `hdfs-site.xml` and `yarn-site.xml` resource files, to pick up any Kerberos-related
-configuration options therein.
-The `core-default` and `core-site` XML resources are always loaded.
+To load XML configuration files, this option can be used. As by default, the
+`core-default` and `core-site` XML resources are only loaded. This will help,
+when additional configuration files has any Kerberos related configurations.
 
 
 ```
 ```
-hdfs org.apache.hadoop.security.KDiag --resource hbase-default.xml --resource hbase-site.xml
-yarn org.apache.hadoop.security.KDiag --resource yarn-default.xml --resource yarn-site.xml
+hadoop kdiag --resource hbase-default.xml --resource hbase-site.xml
 ```
 ```
 
 
 For extra logging during the operation, set the logging and `HADOOP_JAAS_DEBUG`
 For extra logging during the operation, set the logging and `HADOOP_JAAS_DEBUG`
@@ -580,9 +572,9 @@ nor `"/"` characters.
 ### Example
 ### Example
 
 
 ```
 ```
-hdfs org.apache.hadoop.security.KDiag \
+hadoop kdiag \
   --nofail \
   --nofail \
-  --resource hbase-default.xml --resource hbase-site.xml \
+  --resource hdfs-site.xml --resource yarn-site.xml \
   --keylen 1024 \
   --keylen 1024 \
   --keytab zk.service.keytab --principal zookeeper/devix.example.org@REALM
   --keytab zk.service.keytab --principal zookeeper/devix.example.org@REALM
 ```
 ```

+ 76 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java

@@ -48,7 +48,15 @@ import com.fasterxml.jackson.databind.ObjectMapper;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.junit.After;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Before;
+import org.junit.Rule;
 import org.junit.Test;
 import org.junit.Test;
+
+import static org.apache.hadoop.conf.StorageUnit.BYTES;
+import static org.apache.hadoop.conf.StorageUnit.GB;
+import static org.apache.hadoop.conf.StorageUnit.KB;
+import static org.apache.hadoop.conf.StorageUnit.MB;
+import static org.apache.hadoop.conf.StorageUnit.TB;
+import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertArrayEquals;
 
 
@@ -68,10 +76,13 @@ import org.apache.log4j.AppenderSkeleton;
 import org.apache.log4j.Logger;
 import org.apache.log4j.Logger;
 import org.apache.log4j.spi.LoggingEvent;
 import org.apache.log4j.spi.LoggingEvent;
 import org.hamcrest.CoreMatchers;
 import org.hamcrest.CoreMatchers;
+import org.junit.rules.ExpectedException;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
 
 
 public class TestConfiguration {
 public class TestConfiguration {
 
 
+  @Rule
+  public ExpectedException thrown= ExpectedException.none();
   private static final double DOUBLE_DELTA = 0.000000001f;
   private static final double DOUBLE_DELTA = 0.000000001f;
   private Configuration conf;
   private Configuration conf;
   final static String CONFIG = new File("./test-config-TestConfiguration.xml").getAbsolutePath();
   final static String CONFIG = new File("./test-config-TestConfiguration.xml").getAbsolutePath();
@@ -1325,6 +1336,71 @@ public class TestConfiguration {
     }
     }
   }
   }
 
 
+  @Test
+  public void testStorageUnit() {
+    final String key = "valid.key";
+    final String nonKey = "not.a.key";
+    Configuration conf = new Configuration(false);
+
+    conf.setStorageSize(key, 10, MB);
+    // This call returns the value specified in the Key as a double in MBs.
+    assertThat(conf.getStorageSize(key, "1GB", MB),
+        is(10.0));
+
+    // Since this key is missing, This call converts the default value of  1GB
+    // to MBs are returns that value.
+    assertThat(conf.getStorageSize(nonKey, "1GB", MB),
+        is(1024.0));
+
+
+    conf.setStorageSize(key, 1024, BYTES);
+    assertThat(conf.getStorageSize(key, 100, KB), is(1.0));
+
+    assertThat(conf.getStorageSize(nonKey, 100.0, KB), is(100.0));
+
+    // We try out different kind of String formats to see if they work and
+    // during read, we also try to read using a different Storage Units.
+    conf.setStrings(key, "1TB");
+    assertThat(conf.getStorageSize(key, "1PB", GB), is(1024.0));
+
+    conf.setStrings(key, "1bytes");
+    assertThat(conf.getStorageSize(key, "1PB", KB), is(0.001));
+
+    conf.setStrings(key, "2048b");
+    assertThat(conf.getStorageSize(key, "1PB", KB), is(2.0));
+
+    conf.setStrings(key, "64 GB");
+    assertThat(conf.getStorageSize(key, "1PB", GB), is(64.0));
+
+    // Match the parsing patterns of getLongBytes, which takes single char
+    // suffix.
+    conf.setStrings(key, "1T");
+    assertThat(conf.getStorageSize(key, "1GB", TB), is(1.0));
+
+    conf.setStrings(key, "1k");
+    assertThat(conf.getStorageSize(key, "1GB", KB), is(1.0));
+
+    conf.setStrings(key, "10m");
+    assertThat(conf.getStorageSize(key, "1GB", MB), is(10.0));
+
+
+
+    // Missing format specification, this should throw.
+    conf.setStrings(key, "100");
+    thrown.expect(IllegalArgumentException.class);
+    conf.getStorageSize(key, "1PB", GB);
+
+    // illegal format specification, this should throw.
+    conf.setStrings(key, "1HB");
+    thrown.expect(IllegalArgumentException.class);
+    conf.getStorageSize(key, "1PB", GB);
+
+    // Illegal number  specification, this should throw.
+    conf.setStrings(key, "HadoopGB");
+    thrown.expect(IllegalArgumentException.class);
+    conf.getStorageSize(key, "1PB", GB);
+  }
+
   @Test
   @Test
   public void testTimeDurationWarning() {
   public void testTimeDurationWarning() {
     // check warn for possible loss of precision
     // check warn for possible loss of precision

+ 277 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestStorageUnit.java

@@ -0,0 +1,277 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ *
+ */
+
+package org.apache.hadoop.conf;
+
+import org.junit.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.hamcrest.CoreMatchers.is;
+import static org.hamcrest.MatcherAssert.assertThat;
+
+/**
+ * Tests that Storage Units work as expected.
+ */
+public class TestStorageUnit {
+  final static double KB = 1024.0;
+  final static double MB = KB * 1024.0;
+  final static double GB = MB * 1024.0;
+  final static double TB = GB * 1024.0;
+  final static double PB = TB * 1024.0;
+  final static double EB = PB * 1024.0;
+
+  @Test
+  public void testByteToKiloBytes() {
+    Map<Double, Double> results = new HashMap<>();
+    results.put(1024.0, 1.0);
+    results.put(2048.0, 2.0);
+    results.put(-1024.0, -1.0);
+    results.put(34565.0, 33.7549);
+    results.put(223344332.0, 218109.6992);
+    results.put(1234983.0, 1206.0381);
+    results.put(1234332.0, 1205.4023);
+    results.put(0.0, 0.0);
+
+    for (Map.Entry<Double, Double> entry : results.entrySet()) {
+      assertThat(StorageUnit.BYTES.toKBs(entry.getKey()), is(entry.getValue()));
+    }
+  }
+
+  @Test
+  public void testBytesToMegaBytes() {
+    Map<Double, Double> results = new HashMap<>();
+    results.put(1048576.0, 1.0);
+    results.put(24117248.0, 23.0);
+    results.put(459920023.0, 438.6139);
+    results.put(234443233.0, 223.5825);
+    results.put(-35651584.0, -34.0);
+    results.put(0.0, 0.0);
+    for (Map.Entry<Double, Double> entry : results.entrySet()) {
+      assertThat(StorageUnit.BYTES.toMBs(entry.getKey()), is(entry.getValue()));
+    }
+  }
+
+  @Test
+  public void testBytesToGigaBytes() {
+    Map<Double, Double> results = new HashMap<>();
+    results.put(1073741824.0, 1.0);
+    results.put(24696061952.0, 23.0);
+    results.put(459920023.0, 0.4283);
+    results.put(234443233.0, 0.2183);
+    results.put(-36507222016.0, -34.0);
+    results.put(0.0, 0.0);
+    for (Map.Entry<Double, Double> entry : results.entrySet()) {
+      assertThat(StorageUnit.BYTES.toGBs(entry.getKey()), is(entry.getValue()));
+    }
+  }
+
+  @Test
+  public void testBytesToTerraBytes() {
+    Map<Double, Double> results = new HashMap<>();
+    results.put(1.09951E+12, 1.0);
+    results.put(2.52888E+13, 23.0);
+    results.put(459920023.0, 0.0004);
+    results.put(234443233.0, 0.0002);
+    results.put(-3.73834E+13, -34.0);
+    results.put(0.0, 0.0);
+    for (Map.Entry<Double, Double> entry : results.entrySet()) {
+      assertThat(StorageUnit.BYTES.toTBs(entry.getKey()), is(entry.getValue()));
+    }
+  }
+
+  @Test
+  public void testBytesToPetaBytes() {
+    Map<Double, Double> results = new HashMap<>();
+    results.put(1.1259E+15, 1.0);
+    results.put(2.58957E+16, 23.0);
+    results.put(4.70958E+11, 0.0004);
+    results.put(234443233.0, 0.0000); // Out of precision window.
+    results.put(-3.82806E+16, -34.0);
+    results.put(0.0, 0.0);
+    for (Map.Entry<Double, Double> entry : results.entrySet()) {
+      assertThat(StorageUnit.BYTES.toPBs(entry.getKey()), is(entry.getValue()));
+    }
+  }
+
+  @Test
+  public void testBytesToExaBytes() {
+    Map<Double, Double> results = new HashMap<>();
+    results.put(1.15292E+18, 1.0);
+    results.put(2.65172E+19, 23.0);
+    results.put(4.82261E+14, 0.0004);
+    results.put(234443233.0, 0.0000); // Out of precision window.
+    results.put(-3.91993E+19, -34.0);
+    results.put(0.0, 0.0);
+    for (Map.Entry<Double, Double> entry : results.entrySet()) {
+      assertThat(StorageUnit.BYTES.toEBs(entry.getKey()), is(entry.getValue()));
+    }
+  }
+
+  @Test
+  public void testByteConversions() {
+    assertThat(StorageUnit.BYTES.getShortName(), is("b"));
+    assertThat(StorageUnit.BYTES.getSuffixChar(), is("b"));
+
+    assertThat(StorageUnit.BYTES.getLongName(), is("bytes"));
+    assertThat(StorageUnit.BYTES.toString(), is("bytes"));
+    assertThat(StorageUnit.BYTES.toBytes(1), is(1.0));
+    assertThat(StorageUnit.BYTES.toBytes(1024),
+        is(StorageUnit.BYTES.getDefault(1024)));
+    assertThat(StorageUnit.BYTES.fromBytes(10), is(10.0));
+  }
+
+  @Test
+  public void testKBConversions() {
+    assertThat(StorageUnit.KB.getShortName(), is("kb"));
+    assertThat(StorageUnit.KB.getSuffixChar(), is("k"));
+    assertThat(StorageUnit.KB.getLongName(), is("kilobytes"));
+    assertThat(StorageUnit.KB.toString(), is("kilobytes"));
+    assertThat(StorageUnit.KB.toKBs(1024),
+        is(StorageUnit.KB.getDefault(1024)));
+
+
+    assertThat(StorageUnit.KB.toBytes(1), is(KB));
+    assertThat(StorageUnit.KB.fromBytes(KB), is(1.0));
+
+    assertThat(StorageUnit.KB.toKBs(10), is(10.0));
+    assertThat(StorageUnit.KB.toMBs(3.0 * 1024.0), is(3.0));
+    assertThat(StorageUnit.KB.toGBs(1073741824), is(1024.0));
+    assertThat(StorageUnit.KB.toTBs(1073741824), is(1.0));
+    assertThat(StorageUnit.KB.toPBs(1.0995116e+12), is(1.0));
+    assertThat(StorageUnit.KB.toEBs(1.1258999e+15), is(1.0));
+  }
+
+  @Test
+  public void testMBConversions() {
+    assertThat(StorageUnit.MB.getShortName(), is("mb"));
+    assertThat(StorageUnit.MB.getSuffixChar(), is("m"));
+    assertThat(StorageUnit.MB.getLongName(), is("megabytes"));
+    assertThat(StorageUnit.MB.toString(), is("megabytes"));
+    assertThat(StorageUnit.MB.toMBs(1024),
+        is(StorageUnit.MB.getDefault(1024)));
+
+
+
+    assertThat(StorageUnit.MB.toBytes(1), is(MB));
+    assertThat(StorageUnit.MB.fromBytes(MB), is(1.0));
+
+    assertThat(StorageUnit.MB.toKBs(1), is(1024.0));
+    assertThat(StorageUnit.MB.toMBs(10), is(10.0));
+
+    assertThat(StorageUnit.MB.toGBs(44040192), is(43008.0));
+    assertThat(StorageUnit.MB.toTBs(1073741824), is(1024.0));
+    assertThat(StorageUnit.MB.toPBs(1073741824), is(1.0));
+    assertThat(StorageUnit.MB.toEBs(1 * (EB/MB)), is(1.0));
+  }
+
+  @Test
+  public void testGBConversions() {
+    assertThat(StorageUnit.GB.getShortName(), is("gb"));
+    assertThat(StorageUnit.GB.getSuffixChar(), is("g"));
+    assertThat(StorageUnit.GB.getLongName(), is("gigabytes"));
+    assertThat(StorageUnit.GB.toString(), is("gigabytes"));
+    assertThat(StorageUnit.GB.toGBs(1024),
+        is(StorageUnit.GB.getDefault(1024)));
+
+
+    assertThat(StorageUnit.GB.toBytes(1), is(GB));
+    assertThat(StorageUnit.GB.fromBytes(GB), is(1.0));
+
+    assertThat(StorageUnit.GB.toKBs(1), is(1024.0 * 1024));
+    assertThat(StorageUnit.GB.toMBs(10), is(10.0 * 1024));
+
+    assertThat(StorageUnit.GB.toGBs(44040192.0), is(44040192.0));
+    assertThat(StorageUnit.GB.toTBs(1073741824), is(1048576.0));
+    assertThat(StorageUnit.GB.toPBs(1.07375e+9), is(1024.0078));
+    assertThat(StorageUnit.GB.toEBs(1 * (EB/GB)), is(1.0));
+  }
+
+  @Test
+  public void testTBConversions() {
+    assertThat(StorageUnit.TB.getShortName(), is("tb"));
+    assertThat(StorageUnit.TB.getSuffixChar(), is("t"));
+    assertThat(StorageUnit.TB.getLongName(), is("terabytes"));
+    assertThat(StorageUnit.TB.toString(), is("terabytes"));
+    assertThat(StorageUnit.TB.toTBs(1024),
+        is(StorageUnit.TB.getDefault(1024)));
+
+    assertThat(StorageUnit.TB.toBytes(1), is(TB));
+    assertThat(StorageUnit.TB.fromBytes(TB), is(1.0));
+
+    assertThat(StorageUnit.TB.toKBs(1), is(1024.0 * 1024* 1024));
+    assertThat(StorageUnit.TB.toMBs(10), is(10.0 * 1024 * 1024));
+
+    assertThat(StorageUnit.TB.toGBs(44040192.0), is(45097156608.0));
+    assertThat(StorageUnit.TB.toTBs(1073741824.0), is(1073741824.0));
+    assertThat(StorageUnit.TB.toPBs(1024), is(1.0));
+    assertThat(StorageUnit.TB.toEBs(1 * (EB/TB)), is(1.0));
+  }
+
+  @Test
+  public void testPBConversions() {
+    assertThat(StorageUnit.PB.getShortName(), is("pb"));
+    assertThat(StorageUnit.PB.getSuffixChar(), is("p"));
+    assertThat(StorageUnit.PB.getLongName(), is("petabytes"));
+    assertThat(StorageUnit.PB.toString(), is("petabytes"));
+    assertThat(StorageUnit.PB.toPBs(1024),
+        is(StorageUnit.PB.getDefault(1024)));
+
+
+    assertThat(StorageUnit.PB.toBytes(1), is(PB));
+    assertThat(StorageUnit.PB.fromBytes(PB), is(1.0));
+
+    assertThat(StorageUnit.PB.toKBs(1), is(PB/KB));
+    assertThat(StorageUnit.PB.toMBs(10), is(10.0 * (PB / MB)));
+
+    assertThat(StorageUnit.PB.toGBs(44040192.0),
+        is(44040192.0 * PB/GB));
+    assertThat(StorageUnit.PB.toTBs(1073741824.0),
+        is(1073741824.0 * (PB/TB)));
+    assertThat(StorageUnit.PB.toPBs(1024.0), is(1024.0));
+    assertThat(StorageUnit.PB.toEBs(1024.0), is(1.0));
+  }
+
+
+  @Test
+  public void testEBConversions() {
+    assertThat(StorageUnit.EB.getShortName(), is("eb"));
+    assertThat(StorageUnit.EB.getSuffixChar(), is("e"));
+
+    assertThat(StorageUnit.EB.getLongName(), is("exabytes"));
+    assertThat(StorageUnit.EB.toString(), is("exabytes"));
+    assertThat(StorageUnit.EB.toEBs(1024),
+        is(StorageUnit.EB.getDefault(1024)));
+
+    assertThat(StorageUnit.EB.toBytes(1), is(EB));
+    assertThat(StorageUnit.EB.fromBytes(EB), is(1.0));
+
+    assertThat(StorageUnit.EB.toKBs(1), is(EB/KB));
+    assertThat(StorageUnit.EB.toMBs(10), is(10.0 * (EB / MB)));
+
+    assertThat(StorageUnit.EB.toGBs(44040192.0),
+        is(44040192.0 * EB/GB));
+    assertThat(StorageUnit.EB.toTBs(1073741824.0),
+        is(1073741824.0 * (EB/TB)));
+    assertThat(StorageUnit.EB.toPBs(1.0), is(1024.0));
+    assertThat(StorageUnit.EB.toEBs(42.0), is(42.0));
+  }
+
+
+}

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegateToFileSystem.java

@@ -47,6 +47,6 @@ public class TestDelegateToFileSystem {
 
 
   @Test
   @Test
   public void testDefaultURIwithPort() throws Exception {
   public void testDefaultURIwithPort() throws Exception {
-    testDefaultUriInternal("hdfs://dummyhost:9820");
+    testDefaultUriInternal("hdfs://dummyhost:8020");
   }
   }
 }
 }

+ 86 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java

@@ -26,6 +26,7 @@ import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 import static org.mockito.Mockito.when;
 
 
+import java.io.BufferedInputStream;
 import java.io.File;
 import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.FileOutputStream;
@@ -37,6 +38,8 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 import java.net.URL;
 import java.net.URL;
 import java.net.UnknownHostException;
 import java.net.UnknownHostException;
+import java.nio.file.FileSystems;
+import java.nio.file.Files;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.Collections;
@@ -47,6 +50,9 @@ import java.util.jar.Manifest;
 import java.util.zip.ZipEntry;
 import java.util.zip.ZipEntry;
 import java.util.zip.ZipOutputStream;
 import java.util.zip.ZipOutputStream;
 
 
+import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
+import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
+import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -1173,4 +1179,84 @@ public class TestFileUtil {
     assertEquals(FileUtil.compareFs(fs3,fs4),true);
     assertEquals(FileUtil.compareFs(fs3,fs4),true);
     assertEquals(FileUtil.compareFs(fs1,fs6),false);
     assertEquals(FileUtil.compareFs(fs1,fs6),false);
   }
   }
+
+  @Test(timeout = 8000)
+  public void testCreateSymbolicLinkUsingJava() throws IOException {
+    setupDirs();
+    final File simpleTar = new File(del, FILE);
+    OutputStream os = new FileOutputStream(simpleTar);
+    TarArchiveOutputStream tos = new TarArchiveOutputStream(os);
+    File untarFile = null;
+    try {
+      // Files to tar
+      final String tmpDir = "tmp/test";
+      File tmpDir1 = new File(tmpDir, "dir1/");
+      File tmpDir2 = new File(tmpDir, "dir2/");
+      // Delete the directories if they already exist
+      tmpDir1.mkdirs();
+      tmpDir2.mkdirs();
+
+      java.nio.file.Path symLink = FileSystems
+          .getDefault().getPath(tmpDir1.getPath() + "/sl");
+
+      // Create Symbolic Link
+      Files.createSymbolicLink(symLink,
+          FileSystems.getDefault().getPath(tmpDir2.getPath())).toString();
+      assertTrue(Files.isSymbolicLink(symLink.toAbsolutePath()));
+      // put entries in tar file
+      putEntriesInTar(tos, tmpDir1.getParentFile());
+      tos.close();
+
+      untarFile = new File(tmpDir, "2");
+      // Untar using java
+      FileUtil.unTarUsingJava(simpleTar, untarFile, false);
+
+      // Check symbolic link and other directories are there in untar file
+      assertTrue(Files.exists(untarFile.toPath()));
+      assertTrue(Files.exists(FileSystems.getDefault().getPath(untarFile
+          .getPath(), tmpDir)));
+      assertTrue(Files.isSymbolicLink(FileSystems.getDefault().getPath(untarFile
+          .getPath().toString(), symLink.toString())));
+
+    } finally {
+      FileUtils.deleteDirectory(new File("tmp"));
+      tos.close();
+    }
+
+  }
+
+  private void putEntriesInTar(TarArchiveOutputStream tos, File f)
+      throws IOException {
+    if (Files.isSymbolicLink(f.toPath())) {
+      TarArchiveEntry tarEntry = new TarArchiveEntry(f.getPath(),
+          TarArchiveEntry.LF_SYMLINK);
+      tarEntry.setLinkName(Files.readSymbolicLink(f.toPath()).toString());
+      tos.putArchiveEntry(tarEntry);
+      tos.closeArchiveEntry();
+      return;
+    }
+
+    if (f.isDirectory()) {
+      tos.putArchiveEntry(new TarArchiveEntry(f));
+      tos.closeArchiveEntry();
+      for (File child : f.listFiles()) {
+        putEntriesInTar(tos, child);
+      }
+    }
+
+    if (f.isFile()) {
+      tos.putArchiveEntry(new TarArchiveEntry(f));
+      BufferedInputStream origin = new BufferedInputStream(
+          new FileInputStream(f));
+      int count;
+      byte[] data = new byte[2048];
+      while ((count = origin.read(data)) != -1) {
+        tos.write(data, 0, count);
+      }
+      tos.flush();
+      tos.closeArchiveEntry();
+      origin.close();
+    }
+  }
+
 }
 }

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestSshFenceByTcpPort.java

@@ -38,7 +38,7 @@ public class TestSshFenceByTcpPort {
   private static String TEST_FENCING_HOST = System.getProperty(
   private static String TEST_FENCING_HOST = System.getProperty(
       "test.TestSshFenceByTcpPort.host", "localhost");
       "test.TestSshFenceByTcpPort.host", "localhost");
   private static final String TEST_FENCING_PORT = System.getProperty(
   private static final String TEST_FENCING_PORT = System.getProperty(
-      "test.TestSshFenceByTcpPort.port", "9820");
+      "test.TestSshFenceByTcpPort.port", "8020");
   private static final String TEST_KEYFILE = System.getProperty(
   private static final String TEST_KEYFILE = System.getProperty(
       "test.TestSshFenceByTcpPort.key");
       "test.TestSshFenceByTcpPort.key");
 
 

+ 1 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestServiceOperations.java

@@ -57,8 +57,7 @@ public class TestServiceOperations {
     ServiceOperations.stopQuietly(logger, service);
     ServiceOperations.stopQuietly(logger, service);
 
 
     assertThat(logCapturer.getOutput(),
     assertThat(logCapturer.getOutput(),
-        containsString("When stopping the service " + service.getName()
-            + " : " + e));
+        containsString("When stopping the service " + service.getName()));
     verify(e, times(1)).printStackTrace(Mockito.any(PrintWriter.class));
     verify(e, times(1)).printStackTrace(Mockito.any(PrintWriter.class));
   }
   }
 
 

+ 38 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java

@@ -604,8 +604,44 @@ public final class LambdaTestUtils {
   public static <T> void assertOptionalUnset(String message,
   public static <T> void assertOptionalUnset(String message,
       Optional<T> actual) {
       Optional<T> actual) {
     Assert.assertNotNull(message, actual);
     Assert.assertNotNull(message, actual);
-    if (actual.isPresent()) {
-      Assert.fail("Expected empty option, got " + actual.get().toString());
+    actual.ifPresent(
+        t -> Assert.fail("Expected empty option, got " + t.toString()));
+  }
+
+  /**
+   * Invoke a callable; wrap all checked exceptions with an
+   * AssertionError.
+   * @param closure closure to execute
+   * @param <T> return type of closure
+   * @return the value of the closure
+   * @throws AssertionError if the operation raised an IOE or
+   * other checked exception.
+   */
+  public static <T> T eval(Callable<T> closure) {
+    try {
+      return closure.call();
+    } catch (RuntimeException e) {
+      throw e;
+    } catch (Exception e) {
+      throw new AssertionError(e.toString(), e);
+    }
+  }
+
+  /**
+   * Invoke a callable; wrap all checked exceptions with an
+   * AssertionError.
+   * @param closure closure to execute
+   * @return the value of the closure
+   * @throws AssertionError if the operation raised an IOE or
+   * other checked exception.
+   */
+  public static void eval(VoidCallable closure) {
+    try {
+      closure.call();
+    } catch (RuntimeException e) {
+      throw e;
+    } catch (Exception e) {
+      throw new AssertionError(e.toString(), e);
     }
     }
   }
   }
 
 

+ 36 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/TestLambdaTestUtils.java

@@ -493,4 +493,40 @@ public class TestLambdaTestUtils extends Assert {
     assertMinRetryCount(0);
     assertMinRetryCount(0);
   }
   }
 
 
+  @Test
+  public void testEvalToSuccess() {
+    assertTrue("Eval to success", eval(() -> true));
+  }
+
+  /**
+   * There's no attempt to wrap an unchecked exception
+   * with an AssertionError.
+   */
+  @Test
+  public void testEvalDoesntWrapRTEs() throws Throwable {
+    intercept(RuntimeException.class, "",
+        () -> eval(() -> {
+          throw new RuntimeException("t");
+        }));
+  }
+
+  /**
+   * Verify that IOEs are caught and wrapped, and that the
+   * inner cause is the original IOE.
+   */
+  @Test
+  public void testEvalDoesWrapIOEs() throws Throwable {
+    AssertionError ex = intercept(AssertionError.class, "ioe",
+        () -> eval(() -> {
+          throw new IOException("ioe");
+        }));
+    Throwable cause = ex.getCause();
+    if (cause == null) {
+      throw ex;
+    }
+    if (!(cause instanceof IOException)) {
+      throw cause;
+    }
+  }
+
 }
 }

+ 57 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestRunJar.java

@@ -24,15 +24,24 @@ import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
 import static org.mockito.Mockito.when;
 
 
 import java.io.File;
 import java.io.File;
+import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.IOException;
+import java.io.InputStream;
+import java.util.Random;
+import java.util.jar.JarEntry;
 import java.util.jar.JarOutputStream;
 import java.util.jar.JarOutputStream;
 import java.util.regex.Pattern;
 import java.util.regex.Pattern;
 import java.util.zip.ZipEntry;
 import java.util.zip.ZipEntry;
 
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.After;
+import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
 
 
@@ -114,6 +123,54 @@ public class TestRunJar {
                new File(unjarDir, FOOBAZ_TXT).exists());
                new File(unjarDir, FOOBAZ_TXT).exists());
   }
   }
 
 
+  private File generateBigJar(File dir) throws Exception {
+    File file = new File(dir, "job.jar");
+    try(JarOutputStream stream = new JarOutputStream(
+        new FileOutputStream(file))) {
+      Random r = new Random(100);
+      for (int i = 0; i < 10; ++i) {
+        JarEntry entry = new JarEntry(
+            ((i % 2 == 0) ? "dir/" : "") + "f" + Integer.toString(i));
+        stream.putNextEntry(entry);
+        for (int j=0; j < 756; ++j) {
+          stream.write(r.nextInt() & 0xFF);
+        }
+        stream.closeEntry();
+      }
+      stream.close();
+    }
+    return file;
+  }
+
+  /**
+   * Test unjarring a big file. This checks appending the remainder of the file
+   * to the tee output stream in RunJar.unJarAndSave.
+   */
+  @SuppressWarnings("deprecation")
+  @Test
+  public void testBigJar() throws Exception {
+    Random r = new Random(System.currentTimeMillis());
+    File dir = new File(TEST_ROOT_DIR, Long.toHexString(r.nextLong()));
+    Assert.assertTrue(dir.mkdirs());
+    File input = generateBigJar(dir);
+    File output = new File(dir, "job2.jar");
+    try {
+      try (InputStream is = new FileInputStream(input)) {
+        RunJar.unJarAndSave(is, dir, "job2.jar", Pattern.compile(".*"));
+      }
+      Assert.assertEquals(input.length(), output.length());
+      for (int i = 0; i < 10; ++i) {
+        File subdir = new File(dir, ((i % 2 == 0) ? "dir/" : ""));
+        File f = new File(subdir, "f" + Integer.toString(i));
+        Assert.assertEquals(756, f.length());
+      }
+    } finally {
+      // Clean up
+      FileSystem fs = LocalFileSystem.getLocal(new Configuration());
+      fs.delete(new Path(dir.getAbsolutePath()), true);
+    }
+  }
+
   @Test
   @Test
   public void testUnJarDoesNotLooseLastModify() throws Exception {
   public void testUnJarDoesNotLooseLastModify() throws Exception {
     File unjarDir = getUnjarDir("unjar-lastmod");
     File unjarDir = getUnjarDir("unjar-lastmod");

+ 102 - 0
hadoop-common-project/hadoop-common/src/test/scripts/start-build-env.bats

@@ -0,0 +1,102 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+load hadoop-functions_test_helper
+
+# Mock docker command
+docker () {
+  if [ "$1" = "-v" ]; then
+    shift
+    echo Docker version ${DCKR_MOCK_VER:?}
+  elif [ "$1" = run ]; then
+    shift
+    until [ $# -eq 0 ]; do
+      if [ "$1" = -v ]; then
+        shift
+        echo "$1"|awk -F':' '{if (NF == 3 && $3 == "z")
+                  printf "Mounted %s with z option.\n", $1
+                              else if (NF == 2)
+                  printf "Mounted %s without z option.\n", $1}'
+      fi
+      shift
+    done
+  fi
+}
+export -f docker
+export DCKR_MOCK_VER
+
+# Mock a SELinux enabled system
+enable_selinux () {
+  mkdir -p "${TMP}/bin"
+  echo true >"${TMP}/bin"/selinuxenabled
+  chmod a+x "${TMP}/bin"/selinuxenabled
+  if [ "${PATH#${TMP}/bin}" = "${PATH}" ]; then
+    PATH="${TMP}/bin":"$PATH"
+  fi
+}
+
+setup_user () {
+  if [ -z "$(printenv USER)" ]; then
+    if [ -z "$USER" ]; then
+      USER=${HOME##*/}
+    fi
+    export USER
+  fi
+}
+
+# Mock stat command as used in start-build-env.sh
+stat () {
+  if [ "$1" = --printf='%C' -a $# -eq 2 ]; then
+    printf 'mock_u:mock_r:mock_t:s0'
+  else
+    command stat "$@"
+  fi
+}
+export -f stat
+
+# Verify that host directories get mounted without z option
+# and INFO messages get printed out
+@test "start-build-env.sh (Docker without z mount option)" {
+  if [ "$(uname -s)" != "Linux" ]; then
+    skip "Not on Linux platform"
+  fi
+  enable_selinux
+  setup_user
+  DCKR_MOCK_VER=1.4
+  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
+  [ "$status" -eq 0 ]
+  [[ ${lines[0]} == "INFO: SELinux is enabled." ]]
+  [[ ${lines[1]} =~ \
+     "Mounted ".*" may not be accessible to the container." ]]
+  [[ ${lines[2]} == \
+     "INFO: If so, on the host, run the following command:" ]]
+  [[ ${lines[3]} =~ "# chcon -Rt svirt_sandbox_file_t " ]]
+  [[ ${lines[-2]} =~ "Mounted ".*" without z option." ]]
+  [[ ${lines[-1]} =~ "Mounted ".*" without z option." ]]
+}
+
+# Verify that host directories get mounted with z option
+@test "start-build-env.sh (Docker with z mount option)" {
+  if [ "$(uname -s)" != "Linux" ]; then
+    skip "Not on Linux platform"
+  fi
+  enable_selinux
+  setup_user
+  DCKR_MOCK_VER=1.7
+  run "${BATS_TEST_DIRNAME}/../../../../../start-build-env.sh"
+  [ "$status" -eq 0 ]
+  [[ ${lines[-2]} =~ "Mounted ".*" with z option." ]]
+  [[ ${lines[-1]} =~ "Mounted ".*" with z option." ]]
+}

+ 2 - 2
hadoop-common-project/hadoop-kms/pom.xml

@@ -22,11 +22,11 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <artifactId>hadoop-kms</artifactId>
   <artifactId>hadoop-kms</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>jar</packaging>
   <packaging>jar</packaging>
 
 
   <name>Apache Hadoop KMS</name>
   <name>Apache Hadoop KMS</name>

+ 2 - 2
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java

@@ -18,7 +18,6 @@
 package org.apache.hadoop.crypto.key.kms.server;
 package org.apache.hadoop.crypto.key.kms.server;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
-import com.google.common.base.Stopwatch;
 import org.apache.hadoop.util.KMSUtil;
 import org.apache.hadoop.util.KMSUtil;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -32,6 +31,7 @@ import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
 import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
 import org.apache.hadoop.security.token.delegation.web.HttpUserGroupInformation;
 import org.apache.hadoop.security.token.delegation.web.HttpUserGroupInformation;
+import org.apache.hadoop.util.StopWatch;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -555,7 +555,7 @@ public class KMS {
       throws Exception {
       throws Exception {
     LOG.trace("Entering reencryptEncryptedKeys method.");
     LOG.trace("Entering reencryptEncryptedKeys method.");
     try {
     try {
-      final Stopwatch sw = new Stopwatch().start();
+      final StopWatch sw = new StopWatch().start();
       checkNotEmpty(name, "name");
       checkNotEmpty(name, "name");
       checkNotNull(jsonPayload, "jsonPayload");
       checkNotNull(jsonPayload, "jsonPayload");
       final UserGroupInformation user = HttpUserGroupInformation.get();
       final UserGroupInformation user = HttpUserGroupInformation.get();

+ 2 - 2
hadoop-common-project/hadoop-minikdc/pom.xml

@@ -18,12 +18,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-minikdc</artifactId>
   <artifactId>hadoop-minikdc</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop MiniKDC</description>
   <description>Apache Hadoop MiniKDC</description>
   <name>Apache Hadoop MiniKDC</name>
   <name>Apache Hadoop MiniKDC</name>
   <packaging>jar</packaging>
   <packaging>jar</packaging>

+ 2 - 2
hadoop-common-project/hadoop-nfs/pom.xml

@@ -20,11 +20,11 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <artifactId>hadoop-nfs</artifactId>
   <artifactId>hadoop-nfs</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>jar</packaging>
   <packaging>jar</packaging>
 
 
   <name>Apache Hadoop NFS</name>
   <name>Apache Hadoop NFS</name>

+ 2 - 2
hadoop-common-project/pom.xml

@@ -20,11 +20,11 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   </parent>
   <artifactId>hadoop-common-project</artifactId>
   <artifactId>hadoop-common-project</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop Common Project</description>
   <description>Apache Hadoop Common Project</description>
   <name>Apache Hadoop Common Project</name>
   <name>Apache Hadoop Common Project</name>
   <packaging>pom</packaging>
   <packaging>pom</packaging>

+ 2 - 2
hadoop-dist/pom.xml

@@ -20,11 +20,11 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   </parent>
   <artifactId>hadoop-dist</artifactId>
   <artifactId>hadoop-dist</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop Distribution</description>
   <description>Apache Hadoop Distribution</description>
   <name>Apache Hadoop Distribution</name>
   <name>Apache Hadoop Distribution</name>
   <packaging>jar</packaging>
   <packaging>jar</packaging>

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-client/pom.xml

@@ -20,11 +20,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   </parent>
   <artifactId>hadoop-hdfs-client</artifactId>
   <artifactId>hadoop-hdfs-client</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop HDFS Client</description>
   <description>Apache Hadoop HDFS Client</description>
   <name>Apache Hadoop HDFS Client</name>
   <name>Apache Hadoop HDFS Client</name>
   <packaging>jar</packaging>
   <packaging>jar</packaging>

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.FS_CLIENT_TOPOLOGY_RESOLUTION_ENABLED_DEFAULT;
 
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.List;
 import java.util.List;
@@ -238,7 +239,7 @@ public class ClientContext {
     return byteArrayManager;
     return byteArrayManager;
   }
   }
 
 
-  public int getNetworkDistance(DatanodeInfo datanodeInfo) {
+  public int getNetworkDistance(DatanodeInfo datanodeInfo) throws IOException {
     // If applications disable the feature or the client machine can't
     // If applications disable the feature or the client machine can't
     // resolve its network location, clientNode will be set to null.
     // resolve its network location, clientNode will be set to null.
     if (clientNode == null) {
     if (clientNode == null) {

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java

@@ -1288,8 +1288,8 @@ public class DFSStripedOutputStream extends DFSOutputStream
       int bgIndex = entry.getKey();
       int bgIndex = entry.getKey();
       int corruptBlockCount = entry.getValue();
       int corruptBlockCount = entry.getValue();
       StringBuilder sb = new StringBuilder();
       StringBuilder sb = new StringBuilder();
-      sb.append("Block group <").append(bgIndex).append("> has ")
-          .append(corruptBlockCount).append(" corrupt blocks.");
+      sb.append("Block group <").append(bgIndex).append("> failed to write ")
+          .append(corruptBlockCount).append(" blocks.");
       if (corruptBlockCount == numAllBlocks - numDataBlocks) {
       if (corruptBlockCount == numAllBlocks - numDataBlocks) {
         sb.append(" It's at high risk of losing data.");
         sb.append(" It's at high risk of losing data.");
       }
       }

+ 5 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java

@@ -550,7 +550,11 @@ public class DFSUtilClient {
   private static final Map<String, Boolean> localAddrMap = Collections
   private static final Map<String, Boolean> localAddrMap = Collections
       .synchronizedMap(new HashMap<String, Boolean>());
       .synchronizedMap(new HashMap<String, Boolean>());
 
 
-  public static boolean isLocalAddress(InetSocketAddress targetAddr) {
+  public static boolean isLocalAddress(InetSocketAddress targetAddr)
+      throws IOException {
+    if (targetAddr.isUnresolved()) {
+      throw new IOException("Unresolved host: " + targetAddr);
+    }
     InetAddress addr = targetAddr.getAddress();
     InetAddress addr = targetAddr.getAddress();
     Boolean cached = localAddrMap.get(addr.getHostAddress());
     Boolean cached = localAddrMap.get(addr.getHostAddress());
     if (cached != null) {
     if (cached != null) {

+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java

@@ -73,7 +73,7 @@ public interface HdfsClientConfigKeys {
   int     DFS_NAMENODE_HTTPS_PORT_DEFAULT = 9871;
   int     DFS_NAMENODE_HTTPS_PORT_DEFAULT = 9871;
   String  DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address";
   String  DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address";
   String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes";
   String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes";
-  int DFS_NAMENODE_RPC_PORT_DEFAULT = 9820;
+  int DFS_NAMENODE_RPC_PORT_DEFAULT = 8020;
   String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY =
   String DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY =
       "dfs.namenode.kerberos.principal";
       "dfs.namenode.kerberos.principal";
   String  DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size";
   String  DFS_CLIENT_WRITE_PACKET_SIZE_KEY = "dfs.client-write-packet-size";
@@ -127,6 +127,9 @@ public interface HdfsClientConfigKeys {
   boolean DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT = false;
   boolean DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT = false;
   String  DFS_DOMAIN_SOCKET_PATH_KEY = "dfs.domain.socket.path";
   String  DFS_DOMAIN_SOCKET_PATH_KEY = "dfs.domain.socket.path";
   String  DFS_DOMAIN_SOCKET_PATH_DEFAULT = "";
   String  DFS_DOMAIN_SOCKET_PATH_DEFAULT = "";
+  String DFS_DOMAIN_SOCKET_DISABLE_INTERVAL_SECOND_KEY =
+      "dfs.domain.socket.disable.interval.seconds";
+  long DFS_DOMAIN_SOCKET_DISABLE_INTERVAL_SECOND_DEFAULT = 600;
   String  DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS =
   String  DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS =
       "dfs.short.circuit.shared.memory.watcher.interrupt.check.ms";
       "dfs.short.circuit.shared.memory.watcher.interrupt.check.ms";
   int     DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT =
   int     DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT =

+ 34 - 22
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java

@@ -17,6 +17,7 @@
  */
  */
 package org.apache.hadoop.hdfs.client.impl;
 package org.apache.hadoop.hdfs.client.impl;
 
 
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_DISABLE_INTERVAL_SECOND_KEY;
 import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitFdResponse.USE_RECEIPT_VERIFICATION;
 import static org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitFdResponse.USE_RECEIPT_VERIFICATION;
 
 
 import java.io.BufferedOutputStream;
 import java.io.BufferedOutputStream;
@@ -356,28 +357,32 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
       return reader;
       return reader;
     }
     }
     final ShortCircuitConf scConf = conf.getShortCircuitConf();
     final ShortCircuitConf scConf = conf.getShortCircuitConf();
-    if (scConf.isShortCircuitLocalReads() && allowShortCircuitLocalReads) {
-      if (clientContext.getUseLegacyBlockReaderLocal()) {
-        reader = getLegacyBlockReaderLocal();
-        if (reader != null) {
-          LOG.trace("{}: returning new legacy block reader local.", this);
-          return reader;
+    try {
+      if (scConf.isShortCircuitLocalReads() && allowShortCircuitLocalReads) {
+        if (clientContext.getUseLegacyBlockReaderLocal()) {
+          reader = getLegacyBlockReaderLocal();
+          if (reader != null) {
+            LOG.trace("{}: returning new legacy block reader local.", this);
+            return reader;
+          }
+        } else {
+          reader = getBlockReaderLocal();
+          if (reader != null) {
+            LOG.trace("{}: returning new block reader local.", this);
+            return reader;
+          }
         }
         }
-      } else {
-        reader = getBlockReaderLocal();
+      }
+      if (scConf.isDomainSocketDataTraffic()) {
+        reader = getRemoteBlockReaderFromDomain();
         if (reader != null) {
         if (reader != null) {
-          LOG.trace("{}: returning new block reader local.", this);
+          LOG.trace("{}: returning new remote block reader using UNIX domain "
+              + "socket on {}", this, pathInfo.getPath());
           return reader;
           return reader;
         }
         }
       }
       }
-    }
-    if (scConf.isDomainSocketDataTraffic()) {
-      reader = getRemoteBlockReaderFromDomain();
-      if (reader != null) {
-        LOG.trace("{}: returning new remote block reader using UNIX domain "
-            + "socket on {}", this, pathInfo.getPath());
-        return reader;
-      }
+    } catch (IOException e) {
+      LOG.debug("Block read failed. Getting remote block reader using TCP", e);
     }
     }
     Preconditions.checkState(!DFSInputStream.tcpReadsDisabledForTesting,
     Preconditions.checkState(!DFSInputStream.tcpReadsDisabledForTesting,
         "TCP reads were disabled for testing, but we failed to " +
         "TCP reads were disabled for testing, but we failed to " +
@@ -468,7 +473,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
     return null;
     return null;
   }
   }
 
 
-  private BlockReader getBlockReaderLocal() throws InvalidToken {
+  private BlockReader getBlockReaderLocal() throws IOException {
     LOG.trace("{}: trying to construct a BlockReaderLocal for short-circuit "
     LOG.trace("{}: trying to construct a BlockReaderLocal for short-circuit "
         + " reads.", this);
         + " reads.", this);
     if (pathInfo == null) {
     if (pathInfo == null) {
@@ -644,10 +649,17 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
       LOG.debug("{}:{}", this, msg);
       LOG.debug("{}:{}", this, msg);
       return new ShortCircuitReplicaInfo(new InvalidToken(msg));
       return new ShortCircuitReplicaInfo(new InvalidToken(msg));
     default:
     default:
-      LOG.warn(this + ": unknown response code " + resp.getStatus() +
-          " while attempting to set up short-circuit access. " +
-          resp.getMessage() + ". Disabling short-circuit read for DataNode "
-          + datanode + " temporarily.");
+      final long expiration =
+          clientContext.getDomainSocketFactory().getPathExpireSeconds();
+      String disableMsg = "disabled temporarily for " + expiration + " seconds";
+      if (expiration == 0) {
+        disableMsg = "not disabled";
+      }
+      LOG.warn("{}: unknown response code {} while attempting to set up "
+              + "short-circuit access. {}. Short-circuit read for "
+              + "DataNode {} is {} based on {}.",
+          this, resp.getStatus(), resp.getMessage(), datanode,
+          disableMsg, DFS_DOMAIN_SOCKET_DISABLE_INTERVAL_SECOND_KEY);
       clientContext.getDomainSocketFactory()
       clientContext.getDomainSocketFactory()
           .disableShortCircuitForPath(pathInfo.getPath());
           .disableShortCircuitForPath(pathInfo.getPath());
       return null;
       return null;

+ 18 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java

@@ -68,6 +68,8 @@ import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_USE_
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_DISABLE_INTERVAL_SECOND_DEFAULT;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_DISABLE_INTERVAL_SECOND_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_PATH_DEFAULT;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_PATH_DEFAULT;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_REPLICATION_DEFAULT;
@@ -602,6 +604,7 @@ public class DfsClientConf {
     private final long shortCircuitMmapCacheExpiryMs;
     private final long shortCircuitMmapCacheExpiryMs;
     private final long shortCircuitMmapCacheRetryTimeout;
     private final long shortCircuitMmapCacheRetryTimeout;
     private final long shortCircuitCacheStaleThresholdMs;
     private final long shortCircuitCacheStaleThresholdMs;
+    private final long domainSocketDisableIntervalSeconds;
 
 
     private final long keyProviderCacheExpiryMs;
     private final long keyProviderCacheExpiryMs;
 
 
@@ -679,6 +682,11 @@ public class DfsClientConf {
       shortCircuitSharedMemoryWatcherInterruptCheckMs = conf.getInt(
       shortCircuitSharedMemoryWatcherInterruptCheckMs = conf.getInt(
           DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS,
           DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS,
           DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT);
           DFS_SHORT_CIRCUIT_SHARED_MEMORY_WATCHER_INTERRUPT_CHECK_MS_DEFAULT);
+      domainSocketDisableIntervalSeconds = conf.getLong(
+          DFS_DOMAIN_SOCKET_DISABLE_INTERVAL_SECOND_KEY,
+          DFS_DOMAIN_SOCKET_DISABLE_INTERVAL_SECOND_DEFAULT);
+      Preconditions.checkArgument(domainSocketDisableIntervalSeconds >= 0,
+          DFS_DOMAIN_SOCKET_DISABLE_INTERVAL_SECOND_KEY + "can't be negative.");
 
 
       keyProviderCacheExpiryMs = conf.getLong(
       keyProviderCacheExpiryMs = conf.getLong(
           DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS,
           DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_MS,
@@ -793,6 +801,13 @@ public class DfsClientConf {
       return shortCircuitCacheStaleThresholdMs;
       return shortCircuitCacheStaleThresholdMs;
     }
     }
 
 
+    /**
+     * @return the domainSocketDisableIntervalSeconds
+     */
+    public long getDomainSocketDisableIntervalSeconds() {
+      return domainSocketDisableIntervalSeconds;
+    }
+
     /**
     /**
      * @return the keyProviderCacheExpiryMs
      * @return the keyProviderCacheExpiryMs
      */
      */
@@ -827,7 +842,9 @@ public class DfsClientConf {
           + ", shortCircuitSharedMemoryWatcherInterruptCheckMs = "
           + ", shortCircuitSharedMemoryWatcherInterruptCheckMs = "
           + shortCircuitSharedMemoryWatcherInterruptCheckMs
           + shortCircuitSharedMemoryWatcherInterruptCheckMs
           + ", keyProviderCacheExpiryMs = "
           + ", keyProviderCacheExpiryMs = "
-          + keyProviderCacheExpiryMs;
+          + keyProviderCacheExpiryMs
+          + ", domainSocketDisableIntervalSeconds = "
+          + domainSocketDisableIntervalSeconds;
     }
     }
   }
   }
 }
 }

+ 8 - 4
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java

@@ -203,11 +203,15 @@ public class SaslDataTransferClient {
       DataEncryptionKeyFactory encryptionKeyFactory,
       DataEncryptionKeyFactory encryptionKeyFactory,
       Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
       Token<BlockTokenIdentifier> accessToken, DatanodeID datanodeId)
       throws IOException {
       throws IOException {
-    if (!trustedChannelResolver.isTrusted() &&
-        !trustedChannelResolver.isTrusted(addr)) {
+    boolean localTrusted = trustedChannelResolver.isTrusted();
+    boolean remoteTrusted = trustedChannelResolver.isTrusted(addr);
+    LOG.debug("SASL encryption trust check: localHostTrusted = {}, "
+        + "remoteHostTrusted = {}", localTrusted, remoteTrusted);
+
+    if (!localTrusted || !remoteTrusted) {
       // The encryption key factory only returns a key if encryption is enabled.
       // The encryption key factory only returns a key if encryption is enabled.
-      DataEncryptionKey encryptionKey =
-          encryptionKeyFactory.newDataEncryptionKey();
+      DataEncryptionKey encryptionKey = encryptionKeyFactory
+          .newDataEncryptionKey();
       return send(addr, underlyingOut, underlyingIn, encryptionKey, accessToken,
       return send(addr, underlyingOut, underlyingIn, encryptionKey, accessToken,
           datanodeId);
           datanodeId);
     } else {
     } else {

+ 12 - 5
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java

@@ -92,10 +92,8 @@ public class DomainSocketFactory {
   /**
   /**
    * Information about domain socket paths.
    * Information about domain socket paths.
    */
    */
-  final Cache<String, PathState> pathMap =
-      CacheBuilder.newBuilder()
-      .expireAfterWrite(10, TimeUnit.MINUTES)
-      .build();
+  private final long pathExpireSeconds;
+  private final Cache<String, PathState> pathMap;
 
 
   public DomainSocketFactory(ShortCircuitConf conf) {
   public DomainSocketFactory(ShortCircuitConf conf) {
     final String feature;
     final String feature;
@@ -121,6 +119,10 @@ public class DomainSocketFactory {
         LOG.debug(feature + " is enabled.");
         LOG.debug(feature + " is enabled.");
       }
       }
     }
     }
+
+    pathExpireSeconds = conf.getDomainSocketDisableIntervalSeconds();
+    pathMap = CacheBuilder.newBuilder()
+        .expireAfterWrite(pathExpireSeconds, TimeUnit.SECONDS).build();
   }
   }
 
 
   /**
   /**
@@ -131,7 +133,8 @@ public class DomainSocketFactory {
    *
    *
    * @return             Information about the socket path.
    * @return             Information about the socket path.
    */
    */
-  public PathInfo getPathInfo(InetSocketAddress addr, ShortCircuitConf conf) {
+  public PathInfo getPathInfo(InetSocketAddress addr, ShortCircuitConf conf)
+      throws IOException {
     // If there is no domain socket path configured, we can't use domain
     // If there is no domain socket path configured, we can't use domain
     // sockets.
     // sockets.
     if (conf.getDomainSocketPath().isEmpty()) return PathInfo.NOT_CONFIGURED;
     if (conf.getDomainSocketPath().isEmpty()) return PathInfo.NOT_CONFIGURED;
@@ -192,4 +195,8 @@ public class DomainSocketFactory {
   public void clearPathMap() {
   public void clearPathMap() {
     pathMap.invalidateAll();
     pathMap.invalidateAll();
   }
   }
+
+  public long getPathExpireSeconds() {
+    return pathExpireSeconds;
+  }
 }
 }

+ 15 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java

@@ -278,7 +278,7 @@ public class ShortCircuitCache implements Closeable {
    * Maximum total size of the cache, including both mmapped and
    * Maximum total size of the cache, including both mmapped and
    * no$-mmapped elements.
    * no$-mmapped elements.
    */
    */
-  private final int maxTotalSize;
+  private int maxTotalSize;
 
 
   /**
   /**
    * Non-mmaped elements older than this will be closed.
    * Non-mmaped elements older than this will be closed.
@@ -369,6 +369,11 @@ public class ShortCircuitCache implements Closeable {
     return staleThresholdMs;
     return staleThresholdMs;
   }
   }
 
 
+  @VisibleForTesting
+  public void setMaxTotalSize(int maxTotalSize) {
+    this.maxTotalSize = maxTotalSize;
+  }
+
   /**
   /**
    * Increment the reference count of a replica, and remove it from any free
    * Increment the reference count of a replica, and remove it from any free
    * list it may be in.
    * list it may be in.
@@ -1025,4 +1030,13 @@ public class ShortCircuitCache implements Closeable {
   public DfsClientShmManager getDfsClientShmManager() {
   public DfsClientShmManager getDfsClientShmManager() {
     return shmManager;
     return shmManager;
   }
   }
+
+  /**
+   * Can be used in testing to verify whether a read went through SCR, after
+   * the read is done and before the stream is closed.
+   */
+  @VisibleForTesting
+  public int getReplicaInfoMapSize() {
+    return replicaInfoMap.size();
+  }
 }
 }

+ 10 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java

@@ -1869,8 +1869,17 @@ public class WebHdfsFileSystem extends FileSystem
     try {
     try {
       keyProviderUri = getServerDefaults().getKeyProviderUri();
       keyProviderUri = getServerDefaults().getKeyProviderUri();
     } catch (UnsupportedOperationException e) {
     } catch (UnsupportedOperationException e) {
-      // This means server doesn't supports GETSERVERDEFAULTS call.
+      // This means server doesn't support GETSERVERDEFAULTS call.
       // Do nothing, let keyProviderUri = null.
       // Do nothing, let keyProviderUri = null.
+    } catch (RemoteException e) {
+      if (e.getClassName() != null &&
+          e.getClassName().equals("java.lang.IllegalArgumentException")) {
+        // See HDFS-13100.
+        // This means server doesn't support GETSERVERDEFAULTS call.
+        // Do nothing, let keyProviderUri = null.
+      } else {
+        throw e;
+      }
     }
     }
     return HdfsKMSUtil.getKeyProviderUri(ugi, getUri(), keyProviderUri,
     return HdfsKMSUtil.getKeyProviderUri(ugi, getUri(), keyProviderUri,
         getConf());
         getConf());

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRequestHedgingProxyProvider.java

@@ -70,10 +70,10 @@ public class TestRequestHedgingProxyProvider {
         HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns, "nn1,nn2");
         HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns, "nn1,nn2");
     conf.set(
     conf.set(
         HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn1",
         HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn1",
-        "machine1.foo.bar:9820");
+        "machine1.foo.bar:8020");
     conf.set(
     conf.set(
         HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn2",
         HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn2",
-        "machine2.foo.bar:9820");
+        "machine2.foo.bar:8020");
   }
   }
 
 
   @Test
   @Test
@@ -294,7 +294,7 @@ public class TestRequestHedgingProxyProvider {
     conf.set(HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns,
     conf.set(HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + "." + ns,
             "nn1,nn2,nn3");
             "nn1,nn2,nn3");
     conf.set(HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn3",
     conf.set(HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + ns + ".nn3",
-            "machine3.foo.bar:9820");
+            "machine3.foo.bar:8020");
 
 
     final AtomicInteger counter = new AtomicInteger(0);
     final AtomicInteger counter = new AtomicInteger(0);
     final int[] isGood = {1};
     final int[] isGood = {1};

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml

@@ -22,11 +22,11 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <artifactId>hadoop-hdfs-httpfs</artifactId>
   <artifactId>hadoop-hdfs-httpfs</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <packaging>jar</packaging>
   <packaging>jar</packaging>
 
 
   <name>Apache Hadoop HttpFS</name>
   <name>Apache Hadoop HttpFS</name>

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml

@@ -20,11 +20,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   </parent>
   <artifactId>hadoop-hdfs-native-client</artifactId>
   <artifactId>hadoop-hdfs-native-client</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop HDFS Native Client</description>
   <description>Apache Hadoop HDFS Native Client</description>
   <name>Apache Hadoop HDFS Native Client</name>
   <name>Apache Hadoop HDFS Native Client</name>
   <packaging>jar</packaging>
   <packaging>jar</packaging>

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml

@@ -20,11 +20,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <artifactId>hadoop-hdfs-nfs</artifactId>
   <artifactId>hadoop-hdfs-nfs</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop HDFS-NFS</description>
   <description>Apache Hadoop HDFS-NFS</description>
   <name>Apache Hadoop HDFS-NFS</name>
   <name>Apache Hadoop HDFS-NFS</name>
   <packaging>jar</packaging>
   <packaging>jar</packaging>

+ 10 - 15
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java

@@ -17,8 +17,6 @@
  */
  */
 package org.apache.hadoop.hdfs.nfs.nfs3;
 package org.apache.hadoop.hdfs.nfs.nfs3;
 
 
-import org.apache.commons.logging.LogFactory;
-
 import java.io.IOException;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URI;
 import java.nio.file.FileSystemException;
 import java.nio.file.FileSystemException;
@@ -32,7 +30,6 @@ import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeUnit;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
-import org.apache.commons.logging.Log;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSClient;
@@ -50,12 +47,15 @@ import com.google.common.cache.CacheLoader;
 import com.google.common.cache.LoadingCache;
 import com.google.common.cache.LoadingCache;
 import com.google.common.cache.RemovalListener;
 import com.google.common.cache.RemovalListener;
 import com.google.common.cache.RemovalNotification;
 import com.google.common.cache.RemovalNotification;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 /**
 /**
  * A cache saves DFSClient objects for different users.
  * A cache saves DFSClient objects for different users.
  */
  */
 class DFSClientCache {
 class DFSClientCache {
-  private static final Log LOG = LogFactory.getLog(DFSClientCache.class);
+  private static final Logger LOG =
+      LoggerFactory.getLogger(DFSClientCache.class);
   /**
   /**
    * Cache that maps User id to the corresponding DFSClient.
    * Cache that maps User id to the corresponding DFSClient.
    */
    */
@@ -169,8 +169,8 @@ class DFSClientCache {
       URI value = namenodeUriMap.get(namenodeId);
       URI value = namenodeUriMap.get(namenodeId);
       // if a unique nnid, add it to the map
       // if a unique nnid, add it to the map
       if (value == null) {
       if (value == null) {
-        LOG.info("Added export:" + exportPath + " FileSystem URI:" + exportURI
-              + " with namenodeId:" + namenodeId);
+        LOG.info("Added export: {} FileSystem URI: {} with namenodeId: {}",
+            exportPath, exportPath, namenodeId);
         namenodeUriMap.put(namenodeId, exportURI);
         namenodeUriMap.put(namenodeId, exportURI);
       } else {
       } else {
         // if the nnid already exists, it better be the for the same namenode
         // if the nnid already exists, it better be the for the same namenode
@@ -194,7 +194,7 @@ class DFSClientCache {
       try {
       try {
         closeAll(true);
         closeAll(true);
       } catch (IOException e) {
       } catch (IOException e) {
-        LOG.info("DFSClientCache.closeAll() threw an exception:\n", e);
+        LOG.info("DFSClientCache.closeAll() threw an exception", e);
       }
       }
     }
     }
   }
   }
@@ -269,10 +269,7 @@ class DFSClientCache {
 
 
     UserGroupInformation ugi =
     UserGroupInformation ugi =
             UserGroupInformation.createProxyUser(effectiveUser, realUser);
             UserGroupInformation.createProxyUser(effectiveUser, realUser);
-    if (LOG.isDebugEnabled()){
-      LOG.debug(String.format("Created ugi:" +
-              " %s for username: %s", ugi, effectiveUser));
-    }
+    LOG.debug("Created ugi: {} for username: {}", ugi, effectiveUser);
     return ugi;
     return ugi;
   }
   }
 
 
@@ -329,8 +326,7 @@ class DFSClientCache {
     try {
     try {
       client = clientCache.get(new DfsClientKey(userName, namenodeId));
       client = clientCache.get(new DfsClientKey(userName, namenodeId));
     } catch (ExecutionException e) {
     } catch (ExecutionException e) {
-      LOG.error("Failed to create DFSClient for user:" + userName + " Cause:"
-          + e);
+      LOG.error("Failed to create DFSClient for user: {}", userName, e);
     }
     }
     return client;
     return client;
   }
   }
@@ -343,8 +339,7 @@ class DFSClientCache {
     try {
     try {
       s = inputstreamCache.get(k);
       s = inputstreamCache.get(k);
     } catch (ExecutionException e) {
     } catch (ExecutionException e) {
-      LOG.warn("Failed to create DFSInputStream for user:" + userName
-          + " Cause:" + e);
+      LOG.warn("Failed to create DFSInputStream for user: {}", userName, e);
     }
     }
     return s;
     return s;
   }
   }

+ 127 - 187
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java

@@ -31,8 +31,6 @@ import java.util.concurrent.ConcurrentNavigableMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.atomic.AtomicLong;
 
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
@@ -61,13 +59,15 @@ import org.jboss.netty.channel.Channel;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 /**
 /**
  * OpenFileCtx saves the context of one HDFS file output stream. Access to it is
  * OpenFileCtx saves the context of one HDFS file output stream. Access to it is
  * synchronized by its member lock.
  * synchronized by its member lock.
  */
  */
 class OpenFileCtx {
 class OpenFileCtx {
-  public static final Log LOG = LogFactory.getLog(OpenFileCtx.class);
+  public static final Logger LOG = LoggerFactory.getLogger(OpenFileCtx.class);
   
   
   // Pending writes water mark for dump, 1MB
   // Pending writes water mark for dump, 1MB
   private static long DUMP_WRITE_WATER_MARK = 1024 * 1024;
   private static long DUMP_WRITE_WATER_MARK = 1024 * 1024;
@@ -210,10 +210,8 @@ class OpenFileCtx {
   /** Increase or decrease the memory occupation of non-sequential writes */
   /** Increase or decrease the memory occupation of non-sequential writes */
   private long updateNonSequentialWriteInMemory(long count) {
   private long updateNonSequentialWriteInMemory(long count) {
     long newValue = nonSequentialWriteInMemory.addAndGet(count);
     long newValue = nonSequentialWriteInMemory.addAndGet(count);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Update nonSequentialWriteInMemory by " + count + " new value: "
-          + newValue);
-    }
+    LOG.debug("Update nonSequentialWriteInMemory by {} new value: {}",
+        count, newValue);
 
 
     Preconditions.checkState(newValue >= 0,
     Preconditions.checkState(newValue >= 0,
         "nonSequentialWriteInMemory is negative " + newValue
         "nonSequentialWriteInMemory is negative " + newValue
@@ -273,9 +271,7 @@ class OpenFileCtx {
   // Check if need to dump the new writes
   // Check if need to dump the new writes
   private void waitForDump() {
   private void waitForDump() {
     if (!enabledDump) {
     if (!enabledDump) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Do nothing, dump is disabled.");
-      }
+      LOG.debug("Do nothing, dump is disabled.");
       return;
       return;
     }
     }
 
 
@@ -286,9 +282,7 @@ class OpenFileCtx {
     // wake up the dumper thread to dump the data
     // wake up the dumper thread to dump the data
     synchronized (this) {
     synchronized (this) {
       if (nonSequentialWriteInMemory.get() >= DUMP_WRITE_WATER_MARK) {
       if (nonSequentialWriteInMemory.get() >= DUMP_WRITE_WATER_MARK) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Asking dumper to dump...");
-        }
+        LOG.debug("Asking dumper to dump...");
         if (dumpThread == null) {
         if (dumpThread == null) {
           dumpThread = new Daemon(new Dumper());
           dumpThread = new Daemon(new Dumper());
           dumpThread.start();
           dumpThread.start();
@@ -312,7 +306,7 @@ class OpenFileCtx {
     private void dump() {
     private void dump() {
       // Create dump outputstream for the first time
       // Create dump outputstream for the first time
       if (dumpOut == null) {
       if (dumpOut == null) {
-        LOG.info("Create dump file: " + dumpFilePath);
+        LOG.info("Create dump file: {}", dumpFilePath);
         File dumpFile = new File(dumpFilePath);
         File dumpFile = new File(dumpFilePath);
         try {
         try {
           synchronized (this) {
           synchronized (this) {
@@ -322,13 +316,14 @@ class OpenFileCtx {
             dumpOut = new FileOutputStream(dumpFile);
             dumpOut = new FileOutputStream(dumpFile);
           }
           }
         } catch (IOException e) {
         } catch (IOException e) {
-          LOG.error("Got failure when creating dump stream " + dumpFilePath, e);
+          LOG.error("Got failure when creating dump stream {}",
+              dumpFilePath, e);
           enabledDump = false;
           enabledDump = false;
           if (dumpOut != null) {
           if (dumpOut != null) {
             try {
             try {
               dumpOut.close();
               dumpOut.close();
             } catch (IOException e1) {
             } catch (IOException e1) {
-              LOG.error("Can't close dump stream " + dumpFilePath, e);
+              LOG.error("Can't close dump stream {}", dumpFilePath, e);
             }
             }
           }
           }
           return;
           return;
@@ -340,17 +335,15 @@ class OpenFileCtx {
         try {
         try {
           raf = new RandomAccessFile(dumpFilePath, "r");
           raf = new RandomAccessFile(dumpFilePath, "r");
         } catch (FileNotFoundException e) {
         } catch (FileNotFoundException e) {
-          LOG.error("Can't get random access to file " + dumpFilePath);
+          LOG.error("Can't get random access to file {}", dumpFilePath);
           // Disable dump
           // Disable dump
           enabledDump = false;
           enabledDump = false;
           return;
           return;
         }
         }
       }
       }
 
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Start dump. Before dump, nonSequentialWriteInMemory == "
-            + nonSequentialWriteInMemory.get());
-      }
+      LOG.debug("Start dump. Before dump, nonSequentialWriteInMemory == {}",
+            nonSequentialWriteInMemory.get());
 
 
       Iterator<OffsetRange> it = pendingWrites.keySet().iterator();
       Iterator<OffsetRange> it = pendingWrites.keySet().iterator();
       while (activeState && it.hasNext()
       while (activeState && it.hasNext()
@@ -367,18 +360,16 @@ class OpenFileCtx {
             updateNonSequentialWriteInMemory(-dumpedDataSize);
             updateNonSequentialWriteInMemory(-dumpedDataSize);
           }
           }
         } catch (IOException e) {
         } catch (IOException e) {
-          LOG.error("Dump data failed: " + writeCtx + " with error: " + e
-              + " OpenFileCtx state: " + activeState);
+          LOG.error("Dump data failed: {} OpenFileCtx state: {}",
+              writeCtx, activeState, e);
           // Disable dump
           // Disable dump
           enabledDump = false;
           enabledDump = false;
           return;
           return;
         }
         }
       }
       }
 
 
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("After dump, nonSequentialWriteInMemory == "
-            + nonSequentialWriteInMemory.get());
-      }
+      LOG.debug("After dump, nonSequentialWriteInMemory == {}",
+          nonSequentialWriteInMemory.get());
     }
     }
 
 
     @Override
     @Override
@@ -393,26 +384,22 @@ class OpenFileCtx {
               OpenFileCtx.this.notifyAll();
               OpenFileCtx.this.notifyAll();
               try {
               try {
                 OpenFileCtx.this.wait();
                 OpenFileCtx.this.wait();
-                if (LOG.isDebugEnabled()) {
-                  LOG.debug("Dumper woke up");
-                }
+                LOG.debug("Dumper woke up");
               } catch (InterruptedException e) {
               } catch (InterruptedException e) {
-                LOG.info("Dumper is interrupted, dumpFilePath= "
-                    + OpenFileCtx.this.dumpFilePath);
+                LOG.info("Dumper is interrupted, dumpFilePath = {}",
+                    OpenFileCtx.this.dumpFilePath);
               }
               }
             }
             }
           }
           }
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Dumper checking OpenFileCtx activeState: " + activeState
-                + " enabledDump: " + enabledDump);
-          }
+          LOG.debug("Dumper checking OpenFileCtx activeState: {} " +
+              "enabledDump: {}", activeState, enabledDump);
         } catch (Throwable t) {
         } catch (Throwable t) {
           // unblock threads with new request
           // unblock threads with new request
           synchronized (OpenFileCtx.this) {
           synchronized (OpenFileCtx.this) {
             OpenFileCtx.this.notifyAll();
             OpenFileCtx.this.notifyAll();
           }
           }
-          LOG.info("Dumper get Throwable: " + t + ". dumpFilePath: "
-              + OpenFileCtx.this.dumpFilePath, t);
+          LOG.info("Dumper got Throwable. dumpFilePath: {}",
+              OpenFileCtx.this.dumpFilePath, t);
           activeState = false;
           activeState = false;
         }
         }
       }
       }
@@ -428,8 +415,8 @@ class OpenFileCtx {
       return null;
       return null;
     } else {
     } else {
       if (xid != writeCtx.getXid()) {
       if (xid != writeCtx.getXid()) {
-        LOG.warn("Got a repeated request, same range, with a different xid: "
-            + xid + " xid in old request: " + writeCtx.getXid());
+        LOG.warn("Got a repeated request, same range, with a different xid: " +
+            "{} xid in old request: {}", xid, writeCtx.getXid());
         //TODO: better handling.
         //TODO: better handling.
       }
       }
       return writeCtx;  
       return writeCtx;  
@@ -441,8 +428,8 @@ class OpenFileCtx {
       IdMappingServiceProvider iug) {
       IdMappingServiceProvider iug) {
     
     
     if (!activeState) {
     if (!activeState) {
-      LOG.info("OpenFileCtx is inactive, fileId: "
-          + request.getHandle().dumpFileHandle());
+      LOG.info("OpenFileCtx is inactive, fileId: {}",
+          request.getHandle().dumpFileHandle());
       WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
       WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
       WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
       WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO,
           fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
           fileWcc, 0, request.getStableHow(), Nfs3Constant.WRITE_COMMIT_VERF);
@@ -460,15 +447,11 @@ class OpenFileCtx {
           xid);
           xid);
       if (existantWriteCtx != null) {
       if (existantWriteCtx != null) {
         if (!existantWriteCtx.getReplied()) {
         if (!existantWriteCtx.getReplied()) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Repeated write request which hasn't been served: xid="
-                + xid + ", drop it.");
-          }
+          LOG.debug("Repeated write request which hasn't been served: " +
+                    "xid={}, drop it.", xid);
         } else {
         } else {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Repeated write request which is already served: xid="
-                + xid + ", resend response.");
-          }
+          LOG.debug("Repeated write request which is already served: xid={}" +
+              ", resend response.", xid);
           WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
           WccData fileWcc = new WccData(latestAttr.getWccAttr(), latestAttr);
           WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
           WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
               fileWcc, request.getCount(), request.getStableHow(),
               fileWcc, request.getCount(), request.getStableHow(),
@@ -489,13 +472,11 @@ class OpenFileCtx {
     long offset = request.getOffset();
     long offset = request.getOffset();
     int count = request.getCount();
     int count = request.getCount();
     long smallerCount = offset + count - cachedOffset;
     long smallerCount = offset + count - cachedOffset;
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(String.format("Got overwrite with appended data [%d-%d),"
-          + " current offset %d," + " drop the overlapped section [%d-%d)"
-          + " and append new data [%d-%d).", offset, (offset + count),
-          cachedOffset, offset, cachedOffset, cachedOffset, (offset
-              + count)));
-    }
+    LOG.debug("Got overwrite with appended data [{}-{}),"
+            + " current offset {}," + " drop the overlapped section [{}-{})"
+            + " and append new data [{}-{}).", offset, (offset + count),
+            cachedOffset, offset, cachedOffset, cachedOffset,
+        (offset + count));
     
     
     ByteBuffer data = request.getData();
     ByteBuffer data = request.getData();
     Preconditions.checkState(data.position() == 0,
     Preconditions.checkState(data.position() == 0,
@@ -538,10 +519,8 @@ class OpenFileCtx {
     long cachedOffset = nextOffset.get();
     long cachedOffset = nextOffset.get();
     int originalCount = WriteCtx.INVALID_ORIGINAL_COUNT;
     int originalCount = WriteCtx.INVALID_ORIGINAL_COUNT;
     
     
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("requested offset=" + offset + " and current offset="
-          + cachedOffset);
-    }
+    LOG.debug("requested offset={} and current offset={}",
+        offset, cachedOffset);
 
 
     // Ignore write request with range below the current offset
     // Ignore write request with range below the current offset
     if (offset + count <= cachedOffset) {
     if (offset + count <= cachedOffset) {
@@ -576,8 +555,8 @@ class OpenFileCtx {
     
     
     // Fail non-append call
     // Fail non-append call
     if (offset < cachedOffset) {
     if (offset < cachedOffset) {
-      LOG.warn("(offset,count,nextOffset): " + "(" + offset + "," + count + ","
-          + nextOffset + ")");
+      LOG.warn("(offset,count,nextOffset): ({},{},{})",
+          offset, count, nextOffset);
       return null;
       return null;
     } else {
     } else {
       DataState dataState = offset == cachedOffset ? WriteCtx.DataState.NO_DUMP
       DataState dataState = offset == cachedOffset ? WriteCtx.DataState.NO_DUMP
@@ -586,10 +565,8 @@ class OpenFileCtx {
           request.getOffset(), request.getCount(), originalCount,
           request.getOffset(), request.getCount(), originalCount,
           request.getStableHow(), request.getData(), channel, xid, false,
           request.getStableHow(), request.getData(), channel, xid, false,
           dataState);
           dataState);
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Add new write to the list with nextOffset " + cachedOffset
-            + " and requested offset=" + offset);
-      }
+      LOG.debug("Add new write to the list with nextOffset {}" +
+          " and requested offset={}", cachedOffset, offset);
       if (writeCtx.getDataState() == WriteCtx.DataState.ALLOW_DUMP) {
       if (writeCtx.getDataState() == WriteCtx.DataState.ALLOW_DUMP) {
         // update the memory size
         // update the memory size
         updateNonSequentialWriteInMemory(count);
         updateNonSequentialWriteInMemory(count);
@@ -598,14 +575,12 @@ class OpenFileCtx {
       WriteCtx oldWriteCtx = checkRepeatedWriteRequest(request, channel, xid);
       WriteCtx oldWriteCtx = checkRepeatedWriteRequest(request, channel, xid);
       if (oldWriteCtx == null) {
       if (oldWriteCtx == null) {
         pendingWrites.put(new OffsetRange(offset, offset + count), writeCtx);
         pendingWrites.put(new OffsetRange(offset, offset + count), writeCtx);
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("New write buffered with xid " + xid + " nextOffset "
-              + cachedOffset + " req offset=" + offset + " mapsize="
-              + pendingWrites.size());
-        }
+        LOG.debug("New write buffered with xid {} nextOffset {}" +
+            "req offset={} mapsize={}",
+            xid, cachedOffset, offset, pendingWrites.size());
       } else {
       } else {
-        LOG.warn("Got a repeated request, same range, with xid: " + xid
-            + " nextOffset " + +cachedOffset + " req offset=" + offset);
+        LOG.warn("Got a repeated request, same range, with xid: " +
+            "{} nextOffset {} req offset={}", xid, cachedOffset, offset);
       }
       }
       return writeCtx;
       return writeCtx;
     }
     }
@@ -625,9 +600,7 @@ class OpenFileCtx {
       response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL, wccData, 0,
       response = new WRITE3Response(Nfs3Status.NFS3ERR_INVAL, wccData, 0,
           WriteStableHow.UNSTABLE, Nfs3Constant.WRITE_COMMIT_VERF);
           WriteStableHow.UNSTABLE, Nfs3Constant.WRITE_COMMIT_VERF);
     } else {
     } else {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Process perfectOverWrite");
-      }
+      LOG.debug("Process perfectOverWrite");
       // TODO: let executor handle perfect overwrite
       // TODO: let executor handle perfect overwrite
       response = processPerfectOverWrite(dfsClient, offset, count, stableHow,
       response = processPerfectOverWrite(dfsClient, offset, count, stableHow,
           request.getData().array(),
           request.getData().array(),
@@ -652,17 +625,13 @@ class OpenFileCtx {
     
     
     if (writeCtx.getOffset() == nextOffset.get()) {
     if (writeCtx.getOffset() == nextOffset.get()) {
       if (!asyncStatus) {
       if (!asyncStatus) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Trigger the write back task. Current nextOffset: "
-              + nextOffset.get());
-        }
+        LOG.debug("Trigger the write back task. Current nextOffset: {}",
+            nextOffset.get());
         asyncStatus = true;
         asyncStatus = true;
         asyncWriteBackStartOffset = writeCtx.getOffset();
         asyncWriteBackStartOffset = writeCtx.getOffset();
         asyncDataService.execute(new AsyncDataService.WriteBackTask(this));
         asyncDataService.execute(new AsyncDataService.WriteBackTask(this));
       } else {
       } else {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("The write back thread is working.");
-        }
+        LOG.debug("The write back thread is working.");
       }
       }
       return true;
       return true;
     } else {
     } else {
@@ -694,15 +663,13 @@ class OpenFileCtx {
         // responses of the previous batch. So here send response immediately
         // responses of the previous batch. So here send response immediately
         // for unstable non-sequential write
         // for unstable non-sequential write
         if (stableHow != WriteStableHow.UNSTABLE) {
         if (stableHow != WriteStableHow.UNSTABLE) {
-          LOG.info("Have to change stable write to unstable write: "
-              + request.getStableHow());
+          LOG.info("Have to change stable write to unstable write: {}",
+              request.getStableHow());
           stableHow = WriteStableHow.UNSTABLE;
           stableHow = WriteStableHow.UNSTABLE;
         }
         }
 
 
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("UNSTABLE write request, send response for offset: "
-              + writeCtx.getOffset());
-        }
+        LOG.debug("UNSTABLE write request, send response for offset: {}",
+            writeCtx.getOffset());
         WccData fileWcc = new WccData(preOpAttr, latestAttr);
         WccData fileWcc = new WccData(preOpAttr, latestAttr);
         WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
         WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
             fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
             fileWcc, count, stableHow, Nfs3Constant.WRITE_COMMIT_VERF);
@@ -738,8 +705,8 @@ class OpenFileCtx {
       LOG.info("The FSDataOutputStream has been closed. "
       LOG.info("The FSDataOutputStream has been closed. "
           + "Continue processing the perfect overwrite.");
           + "Continue processing the perfect overwrite.");
     } catch (IOException e) {
     } catch (IOException e) {
-      LOG.info("hsync failed when processing possible perfect overwrite, path="
-          + path + " error: " + e);
+      LOG.info("hsync failed when processing possible perfect overwrite, " +
+              "path={} error: {}", path, e.toString());
       return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
       return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
           Nfs3Constant.WRITE_COMMIT_VERF);
           Nfs3Constant.WRITE_COMMIT_VERF);
     }
     }
@@ -748,18 +715,18 @@ class OpenFileCtx {
       fis = dfsClient.createWrappedInputStream(dfsClient.open(path));
       fis = dfsClient.createWrappedInputStream(dfsClient.open(path));
       readCount = fis.read(offset, readbuffer, 0, count);
       readCount = fis.read(offset, readbuffer, 0, count);
       if (readCount < count) {
       if (readCount < count) {
-        LOG.error("Can't read back " + count + " bytes, partial read size: "
-            + readCount);
+        LOG.error("Can't read back {} bytes, partial read size: {}",
+            count, readCount);
         return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
         return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
             Nfs3Constant.WRITE_COMMIT_VERF);
             Nfs3Constant.WRITE_COMMIT_VERF);
       }
       }
     } catch (IOException e) {
     } catch (IOException e) {
-      LOG.info("Read failed when processing possible perfect overwrite, path="
-          + path, e);
+      LOG.info("Read failed when processing possible perfect overwrite, " +
+              "path={}", path, e);
       return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
       return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
           Nfs3Constant.WRITE_COMMIT_VERF);
           Nfs3Constant.WRITE_COMMIT_VERF);
     } finally {
     } finally {
-      IOUtils.cleanup(LOG, fis);
+      IOUtils.cleanupWithLogger(LOG, fis);
     }
     }
 
 
     // Compare with the request
     // Compare with the request
@@ -776,8 +743,8 @@ class OpenFileCtx {
         dfsClient.setTimes(path, Time.monotonicNow(), -1);
         dfsClient.setTimes(path, Time.monotonicNow(), -1);
         postOpAttr = Nfs3Utils.getFileAttr(dfsClient, path, iug);
         postOpAttr = Nfs3Utils.getFileAttr(dfsClient, path, iug);
       } catch (IOException e) {
       } catch (IOException e) {
-        LOG.info("Got error when processing perfect overwrite, path=" + path
-            + " error: " + e);
+        LOG.info("Got error when processing perfect overwrite, path={} " +
+            "error: {}", path, e.toString());
         return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
         return new WRITE3Response(Nfs3Status.NFS3ERR_IO, wccData, 0, stableHow,
             Nfs3Constant.WRITE_COMMIT_VERF);
             Nfs3Constant.WRITE_COMMIT_VERF);
       }
       }
@@ -810,9 +777,7 @@ class OpenFileCtx {
 
 
     COMMIT_STATUS ret = checkCommitInternal(commitOffset, channel, xid,
     COMMIT_STATUS ret = checkCommitInternal(commitOffset, channel, xid,
         preOpAttr, fromRead);
         preOpAttr, fromRead);
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Got commit status: " + ret.name());
-    }
+    LOG.debug("Got commit status: {}", ret.name());
     // Do the sync outside the lock
     // Do the sync outside the lock
     if (ret == COMMIT_STATUS.COMMIT_DO_SYNC
     if (ret == COMMIT_STATUS.COMMIT_DO_SYNC
         || ret == COMMIT_STATUS.COMMIT_FINISHED) {
         || ret == COMMIT_STATUS.COMMIT_FINISHED) {
@@ -828,7 +793,7 @@ class OpenFileCtx {
           ret = COMMIT_STATUS.COMMIT_ERROR;
           ret = COMMIT_STATUS.COMMIT_ERROR;
         }
         }
       } catch (IOException e) {
       } catch (IOException e) {
-        LOG.error("Got stream error during data sync: " + e);
+        LOG.error("Got stream error during data sync", e);
         // Do nothing. Stream will be closed eventually by StreamMonitor.
         // Do nothing. Stream will be closed eventually by StreamMonitor.
         // status = Nfs3Status.NFS3ERR_IO;
         // status = Nfs3Status.NFS3ERR_IO;
         ret = COMMIT_STATUS.COMMIT_ERROR;
         ret = COMMIT_STATUS.COMMIT_ERROR;
@@ -867,9 +832,7 @@ class OpenFileCtx {
       CommitCtx commitCtx = new CommitCtx(commitOffset, channel, xid, preOpAttr);
       CommitCtx commitCtx = new CommitCtx(commitOffset, channel, xid, preOpAttr);
       pendingCommits.put(commitOffset, commitCtx);
       pendingCommits.put(commitOffset, commitCtx);
     }
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("return COMMIT_SPECIAL_WAIT");
-    }
+    LOG.debug("return COMMIT_SPECIAL_WAIT");
     return COMMIT_STATUS.COMMIT_SPECIAL_WAIT;
     return COMMIT_STATUS.COMMIT_SPECIAL_WAIT;
   }
   }
   
   
@@ -886,10 +849,8 @@ class OpenFileCtx {
     }
     }
     
     
     long flushed = getFlushedOffset();
     long flushed = getFlushedOffset();
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("getFlushedOffset=" + flushed + " commitOffset=" + commitOffset
-          + "nextOffset=" + nextOffset.get());
-    }
+    LOG.debug("getFlushedOffset={} commitOffset={} nextOffset={}",
+        flushed, commitOffset, nextOffset.get());
     
     
     if (pendingWrites.isEmpty()) {
     if (pendingWrites.isEmpty()) {
       if (aixCompatMode) {
       if (aixCompatMode) {
@@ -898,10 +859,8 @@ class OpenFileCtx {
         return COMMIT_STATUS.COMMIT_FINISHED;
         return COMMIT_STATUS.COMMIT_FINISHED;
       } else {
       } else {
         if (flushed < nextOffset.get()) {
         if (flushed < nextOffset.get()) {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("get commit while still writing to the requested offset,"
-                + " with empty queue");
-          }
+          LOG.debug("get commit while still writing to the requested offset,"
+              + " with empty queue");
           return handleSpecialWait(fromRead, nextOffset.get(), channel, xid,
           return handleSpecialWait(fromRead, nextOffset.get(), channel, xid,
               preOpAttr);
               preOpAttr);
         } else {
         } else {
@@ -920,18 +879,14 @@ class OpenFileCtx {
       if (co <= flushed) {
       if (co <= flushed) {
         return COMMIT_STATUS.COMMIT_DO_SYNC;
         return COMMIT_STATUS.COMMIT_DO_SYNC;
       } else if (co < nextOffset.get()) {
       } else if (co < nextOffset.get()) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("get commit while still writing to the requested offset");
-        }
+        LOG.debug("get commit while still writing to the requested offset");
         return handleSpecialWait(fromRead, co, channel, xid, preOpAttr);
         return handleSpecialWait(fromRead, co, channel, xid, preOpAttr);
       } else {
       } else {
         // co >= nextOffset
         // co >= nextOffset
         if (checkSequential(co, nextOffset.get())) {
         if (checkSequential(co, nextOffset.get())) {
           return handleSpecialWait(fromRead, co, channel, xid, preOpAttr);
           return handleSpecialWait(fromRead, co, channel, xid, preOpAttr);
         } else {
         } else {
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("return COMMIT_SPECIAL_SUCCESS");
-          }
+          LOG.debug("return COMMIT_SPECIAL_SUCCESS");
           return COMMIT_STATUS.COMMIT_SPECIAL_SUCCESS;
           return COMMIT_STATUS.COMMIT_SPECIAL_SUCCESS;
         }
         }
       }
       }
@@ -993,8 +948,8 @@ class OpenFileCtx {
     // Check the stream timeout
     // Check the stream timeout
     if (checkStreamTimeout(streamTimeout)) {
     if (checkStreamTimeout(streamTimeout)) {
       if (LOG.isDebugEnabled()) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("stream can be closed for fileId: "
-            + handle.dumpFileHandle());
+        LOG.debug("stream can be closed for fileId: {}",
+            handle.dumpFileHandle());
       }
       }
       flag = true;
       flag = true;
     }
     }
@@ -1009,10 +964,8 @@ class OpenFileCtx {
    */
    */
   private synchronized WriteCtx offerNextToWrite() {
   private synchronized WriteCtx offerNextToWrite() {
     if (pendingWrites.isEmpty()) {
     if (pendingWrites.isEmpty()) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("The async write task has no pending writes, fileId: "
-            + latestAttr.getFileId());
-      }
+      LOG.debug("The async write task has no pending writes, fileId: {}",
+          latestAttr.getFileId());
       // process pending commit again to handle this race: a commit is added
       // process pending commit again to handle this race: a commit is added
       // to pendingCommits map just after the last doSingleWrite returns.
       // to pendingCommits map just after the last doSingleWrite returns.
       // There is no pending write and the commit should be handled by the
       // There is no pending write and the commit should be handled by the
@@ -1029,49 +982,35 @@ class OpenFileCtx {
     OffsetRange range = lastEntry.getKey();
     OffsetRange range = lastEntry.getKey();
     WriteCtx toWrite = lastEntry.getValue();
     WriteCtx toWrite = lastEntry.getValue();
 
 
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("range.getMin()=" + range.getMin() + " nextOffset="
-          + nextOffset);
-    }
+    LOG.trace("range.getMin()={} nextOffset={}",
+        range.getMin(), nextOffset);
 
 
     long offset = nextOffset.get();
     long offset = nextOffset.get();
     if (range.getMin() > offset) {
     if (range.getMin() > offset) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("The next sequential write has not arrived yet");
-      }
+      LOG.debug("The next sequential write has not arrived yet");
       processCommits(nextOffset.get()); // handle race
       processCommits(nextOffset.get()); // handle race
       this.asyncStatus = false;
       this.asyncStatus = false;
     } else if (range.getMax() <= offset) {
     } else if (range.getMax() <= offset) {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Remove write " + range.toString()
-            + " which is already written from the list");
-      }
+      LOG.debug("Remove write {} which is already written from the list",
+          range);
       // remove the WriteCtx from cache
       // remove the WriteCtx from cache
       pendingWrites.remove(range);
       pendingWrites.remove(range);
     } else if (range.getMin() < offset && range.getMax() > offset) {
     } else if (range.getMin() < offset && range.getMax() > offset) {
-      LOG.warn("Got an overlapping write " + range.toString()
-          + ", nextOffset=" + offset
-          + ". Remove and trim it");
+      LOG.warn("Got an overlapping write {}, nextOffset={}. " +
+          "Remove and trim it", range, offset);
       pendingWrites.remove(range);
       pendingWrites.remove(range);
       trimWriteRequest(toWrite, offset);
       trimWriteRequest(toWrite, offset);
       // update nextOffset
       // update nextOffset
       nextOffset.addAndGet(toWrite.getCount());
       nextOffset.addAndGet(toWrite.getCount());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Change nextOffset (after trim) to " + nextOffset.get());
-      }
+      LOG.debug("Change nextOffset (after trim) to {}", nextOffset.get());
       return toWrite;
       return toWrite;
     } else {
     } else {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Remove write " + range.toString()
-            + " from the list");
-      }
+      LOG.debug("Remove write {} from the list", range);
       // after writing, remove the WriteCtx from cache
       // after writing, remove the WriteCtx from cache
       pendingWrites.remove(range);
       pendingWrites.remove(range);
       // update nextOffset
       // update nextOffset
       nextOffset.addAndGet(toWrite.getCount());
       nextOffset.addAndGet(toWrite.getCount());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Change nextOffset to " + nextOffset.get());
-      }
+      LOG.debug("Change nextOffset to {}", nextOffset.get());
       return toWrite;
       return toWrite;
     }
     }
     return null;
     return null;
@@ -1095,9 +1034,9 @@ class OpenFileCtx {
         }
         }
       }
       }
       
       
-      if (!activeState && LOG.isDebugEnabled()) {
-        LOG.debug("The openFileCtx is not active anymore, fileId: "
-            + latestAttr.getFileId());
+      if (!activeState) {
+        LOG.debug("The openFileCtx is not active anymore, fileId: {}",
+            latestAttr.getFileId());
       }
       }
     } finally {
     } finally {
       // Make sure to reset asyncStatus to false unless a race happens
       // Make sure to reset asyncStatus to false unless a race happens
@@ -1105,11 +1044,12 @@ class OpenFileCtx {
         if (startOffset == asyncWriteBackStartOffset) {
         if (startOffset == asyncWriteBackStartOffset) {
           asyncStatus = false;
           asyncStatus = false;
         } else {
         } else {
-          LOG.info("Another async task is already started before this one"
-              + " is finalized. fileId: " + latestAttr.getFileId()
-              + " asyncStatus: " + asyncStatus + " original startOffset: "
-              + startOffset + " new startOffset: " + asyncWriteBackStartOffset
-              + ". Won't change asyncStatus here.");
+          LOG.info("Another async task is already started before this one " +
+                  "is finalized. fileId: {} asyncStatus: {} " +
+                  "original startOffset: {} " +
+                  "new startOffset: {}. Won't change asyncStatus here.",
+              latestAttr.getFileId(), asyncStatus,
+              startOffset, asyncWriteBackStartOffset);
         }
         }
       }
       }
     }
     }
@@ -1132,8 +1072,8 @@ class OpenFileCtx {
       status = Nfs3Status.NFS3_OK;
       status = Nfs3Status.NFS3_OK;
     } catch (ClosedChannelException cce) {
     } catch (ClosedChannelException cce) {
       if (!pendingWrites.isEmpty()) {
       if (!pendingWrites.isEmpty()) {
-        LOG.error("Can't sync for fileId: " + latestAttr.getFileId()
-            + ". Channel closed with writes pending.", cce);
+        LOG.error("Can't sync for fileId: {}. " +
+            "Channel closed with writes pending", latestAttr.getFileId(), cce);
       }
       }
       status = Nfs3Status.NFS3ERR_IO;
       status = Nfs3Status.NFS3ERR_IO;
     } catch (IOException e) {
     } catch (IOException e) {
@@ -1152,8 +1092,8 @@ class OpenFileCtx {
     }
     }
 
 
     if (latestAttr.getSize() != offset) {
     if (latestAttr.getSize() != offset) {
-      LOG.error("After sync, the expect file size: " + offset
-          + ", however actual file size is: " + latestAttr.getSize());
+      LOG.error("After sync, the expect file size: {}, " +
+          "however actual file size is: {}", offset, latestAttr.getSize());
       status = Nfs3Status.NFS3ERR_IO;
       status = Nfs3Status.NFS3ERR_IO;
     }
     }
     WccData wccData = new WccData(Nfs3Utils.getWccAttr(latestAttr), latestAttr);
     WccData wccData = new WccData(Nfs3Utils.getWccAttr(latestAttr), latestAttr);
@@ -1170,11 +1110,11 @@ class OpenFileCtx {
       Nfs3Utils.writeChannelCommit(commit.getChannel(), response
       Nfs3Utils.writeChannelCommit(commit.getChannel(), response
           .serialize(new XDR(), commit.getXid(),
           .serialize(new XDR(), commit.getXid(),
               new VerifierNone()), commit.getXid());
               new VerifierNone()), commit.getXid());
-      
+
       if (LOG.isDebugEnabled()) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("FileId: " + latestAttr.getFileId() + " Service time: "
-            + Nfs3Utils.getElapsedTime(commit.startTime)
-            + "ns. Sent response for commit: " + commit);
+        LOG.debug("FileId: {} Service time: {}ns. " +
+                "Sent response for commit: {}", latestAttr.getFileId(),
+            Nfs3Utils.getElapsedTime(commit.startTime), commit);
       }
       }
       entry = pendingCommits.firstEntry();
       entry = pendingCommits.firstEntry();
     }
     }
@@ -1190,8 +1130,8 @@ class OpenFileCtx {
     
     
     FileHandle handle = writeCtx.getHandle();
     FileHandle handle = writeCtx.getHandle();
     if (LOG.isDebugEnabled()) {
     if (LOG.isDebugEnabled()) {
-      LOG.debug("do write, fileHandle " + handle.dumpFileHandle() + " offset: "
-          + offset + " length: " + count + " stableHow: " + stableHow.name());
+      LOG.debug("do write, fileHandle {} offset: {} length: {} stableHow: {}",
+          handle.dumpFileHandle(), offset, count, stableHow.name());
     }
     }
 
 
     try {
     try {
@@ -1215,10 +1155,10 @@ class OpenFileCtx {
             writeCtx.setDataState(WriteCtx.DataState.NO_DUMP);
             writeCtx.setDataState(WriteCtx.DataState.NO_DUMP);
             updateNonSequentialWriteInMemory(-count);
             updateNonSequentialWriteInMemory(-count);
             if (LOG.isDebugEnabled()) {
             if (LOG.isDebugEnabled()) {
-              LOG.debug("After writing " + handle.dumpFileHandle()
-                  + " at offset " + offset
-                  + ", updated the memory count, new value: "
-                  + nonSequentialWriteInMemory.get());
+              LOG.debug("After writing {} at offset {}, " +
+                      "updated the memory count, new value: {}",
+                  handle.dumpFileHandle(), offset,
+                  nonSequentialWriteInMemory.get());
             }
             }
           }
           }
         }
         }
@@ -1226,7 +1166,7 @@ class OpenFileCtx {
       
       
       if (!writeCtx.getReplied()) {
       if (!writeCtx.getReplied()) {
         if (stableHow != WriteStableHow.UNSTABLE) {
         if (stableHow != WriteStableHow.UNSTABLE) {
-          LOG.info("Do sync for stable write: " + writeCtx);
+          LOG.info("Do sync for stable write: {}", writeCtx);
           try {
           try {
             if (stableHow == WriteStableHow.DATA_SYNC) {
             if (stableHow == WriteStableHow.DATA_SYNC) {
               fos.hsync();
               fos.hsync();
@@ -1237,7 +1177,7 @@ class OpenFileCtx {
               fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
               fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
             }
             }
           } catch (IOException e) {
           } catch (IOException e) {
-            LOG.error("hsync failed with writeCtx: " + writeCtx, e);
+            LOG.error("hsync failed with writeCtx: {}", writeCtx, e);
             throw e;
             throw e;
           }
           }
         }
         }
@@ -1245,8 +1185,8 @@ class OpenFileCtx {
         WccAttr preOpAttr = latestAttr.getWccAttr();
         WccAttr preOpAttr = latestAttr.getWccAttr();
         WccData fileWcc = new WccData(preOpAttr, latestAttr);
         WccData fileWcc = new WccData(preOpAttr, latestAttr);
         if (writeCtx.getOriginalCount() != WriteCtx.INVALID_ORIGINAL_COUNT) {
         if (writeCtx.getOriginalCount() != WriteCtx.INVALID_ORIGINAL_COUNT) {
-          LOG.warn("Return original count: " + writeCtx.getOriginalCount()
-              + " instead of real data count: " + count);
+          LOG.warn("Return original count: {} instead of real data count: {}",
+              writeCtx.getOriginalCount(), count);
           count = writeCtx.getOriginalCount();
           count = writeCtx.getOriginalCount();
         }
         }
         WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
         WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK,
@@ -1260,8 +1200,8 @@ class OpenFileCtx {
       processCommits(writeCtx.getOffset() + writeCtx.getCount());
       processCommits(writeCtx.getOffset() + writeCtx.getCount());
      
      
     } catch (IOException e) {
     } catch (IOException e) {
-      LOG.error("Error writing to fileHandle " + handle.dumpFileHandle()
-          + " at offset " + offset + " and length " + count, e);
+      LOG.error("Error writing to fileHandle {} at offset {} and length {}",
+          handle.dumpFileHandle(), offset, count, e);
       if (!writeCtx.getReplied()) {
       if (!writeCtx.getReplied()) {
         WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO);
         WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO);
         Nfs3Utils.writeChannel(channel, response.serialize(
         Nfs3Utils.writeChannel(channel, response.serialize(
@@ -1269,8 +1209,8 @@ class OpenFileCtx {
         // Keep stream open. Either client retries or SteamMonitor closes it.
         // Keep stream open. Either client retries or SteamMonitor closes it.
       }
       }
 
 
-      LOG.info("Clean up open file context for fileId: "
-          + latestAttr.getFileId());
+      LOG.info("Clean up open file context for fileId: {}",
+          latestAttr.getFileId());
       cleanup();
       cleanup();
     }
     }
   }
   }
@@ -1297,17 +1237,16 @@ class OpenFileCtx {
         fos.close();
         fos.close();
       }
       }
     } catch (IOException e) {
     } catch (IOException e) {
-      LOG.info("Can't close stream for fileId: " + latestAttr.getFileId()
-          + ", error: " + e);
+      LOG.info("Can't close stream for fileId: {}, error: {}",
+          latestAttr.getFileId(), e.toString());
     }
     }
     
     
     // Reply error for pending writes
     // Reply error for pending writes
-    LOG.info("There are " + pendingWrites.size() + " pending writes.");
+    LOG.info("There are {} pending writes.", pendingWrites.size());
     WccAttr preOpAttr = latestAttr.getWccAttr();
     WccAttr preOpAttr = latestAttr.getWccAttr();
     while (!pendingWrites.isEmpty()) {
     while (!pendingWrites.isEmpty()) {
       OffsetRange key = pendingWrites.firstKey();
       OffsetRange key = pendingWrites.firstKey();
-      LOG.info("Fail pending write: " + key.toString()
-          + ", nextOffset=" + nextOffset.get());
+      LOG.info("Fail pending write: {}, nextOffset={}", key, nextOffset.get());
       
       
       WriteCtx writeCtx = pendingWrites.remove(key);
       WriteCtx writeCtx = pendingWrites.remove(key);
       if (!writeCtx.getReplied()) {
       if (!writeCtx.getReplied()) {
@@ -1325,11 +1264,12 @@ class OpenFileCtx {
       try {
       try {
         dumpOut.close();
         dumpOut.close();
       } catch (IOException e) {
       } catch (IOException e) {
-        LOG.error("Failed to close outputstream of dump file" + dumpFilePath, e);
+        LOG.error("Failed to close outputstream of dump file {}",
+            dumpFilePath, e);
       }
       }
       File dumpFile = new File(dumpFilePath);
       File dumpFile = new File(dumpFilePath);
       if (dumpFile.exists() && !dumpFile.delete()) {
       if (dumpFile.exists() && !dumpFile.delete()) {
-        LOG.error("Failed to delete dumpfile: " + dumpFile);
+        LOG.error("Failed to delete dumpfile: {}", dumpFile);
       }
       }
     }
     }
     if (raf != null) {
     if (raf != null) {

文件差異過大導致無法顯示
+ 154 - 187
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java


+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -20,11 +20,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.1.0-SNAPSHOT</version>
+    <version>3.2.0-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   </parent>
   <artifactId>hadoop-hdfs</artifactId>
   <artifactId>hadoop-hdfs</artifactId>
-  <version>3.1.0-SNAPSHOT</version>
+  <version>3.2.0-SNAPSHOT</version>
   <description>Apache Hadoop HDFS</description>
   <description>Apache Hadoop HDFS</description>
   <name>Apache Hadoop HDFS</name>
   <name>Apache Hadoop HDFS</name>
   <packaging>jar</packaging>
   <packaging>jar</packaging>

+ 7 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -34,8 +34,8 @@ import org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCPerformance
 import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver;
 import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
 import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver;
-import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl;
 import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpConfig;
 
 
 /** 
 /** 
@@ -414,6 +414,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
       "dfs.namenode.snapshotdiff.listing.limit";
       "dfs.namenode.snapshotdiff.listing.limit";
   public static final int
   public static final int
       DFS_NAMENODE_SNAPSHOT_DIFF_LISTING_LIMIT_DEFAULT = 1000;
       DFS_NAMENODE_SNAPSHOT_DIFF_LISTING_LIMIT_DEFAULT = 1000;
+
+  public static final String DFS_NAMENODE_SNAPSHOT_MAX_LIMIT =
+      "dfs.namenode.snapshot.max.limit";
+
+  public static final int DFS_NAMENODE_SNAPSHOT_MAX_LIMIT_DEFAULT = 65536;
   // Whether to enable datanode's stale state detection and usage for reads
   // Whether to enable datanode's stale state detection and usage for reads
   public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY = "dfs.namenode.avoid.read.stale.datanode";
   public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY = "dfs.namenode.avoid.read.stale.datanode";
   public static final boolean DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT = false;
   public static final boolean DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT = false;
@@ -1270,7 +1275,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String FEDERATION_STORE_DRIVER_CLASS =
   public static final String FEDERATION_STORE_DRIVER_CLASS =
       FEDERATION_STORE_PREFIX + "driver.class";
       FEDERATION_STORE_PREFIX + "driver.class";
   public static final Class<? extends StateStoreDriver>
   public static final Class<? extends StateStoreDriver>
-      FEDERATION_STORE_DRIVER_CLASS_DEFAULT = StateStoreFileImpl.class;
+      FEDERATION_STORE_DRIVER_CLASS_DEFAULT = StateStoreZooKeeperImpl.class;
 
 
   public static final String FEDERATION_STORE_CONNECTION_TEST_MS =
   public static final String FEDERATION_STORE_CONNECTION_TEST_MS =
       FEDERATION_STORE_PREFIX + "connection.test";
       FEDERATION_STORE_PREFIX + "connection.test";

+ 8 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java

@@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
 import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider;
+import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.ipc.StandbyException;
@@ -325,6 +326,7 @@ public class HAUtil {
    */
    */
   public static boolean isAtLeastOneActive(List<ClientProtocol> namenodes)
   public static boolean isAtLeastOneActive(List<ClientProtocol> namenodes)
       throws IOException {
       throws IOException {
+    List<IOException> exceptions = new ArrayList<>();
     for (ClientProtocol namenode : namenodes) {
     for (ClientProtocol namenode : namenodes) {
       try {
       try {
         namenode.getFileInfo("/");
         namenode.getFileInfo("/");
@@ -334,10 +336,15 @@ public class HAUtil {
         if (cause instanceof StandbyException) {
         if (cause instanceof StandbyException) {
           // This is expected to happen for a standby NN.
           // This is expected to happen for a standby NN.
         } else {
         } else {
-          throw re;
+          exceptions.add(re);
         }
         }
+      } catch (IOException ioe) {
+        exceptions.add(ioe);
       }
       }
     }
     }
+    if(!exceptions.isEmpty()){
+      throw MultipleIOException.createIOException(exceptions);
+    }
     return false;
     return false;
   }
   }
 }
 }

+ 143 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/BlackListBasedTrustedChannelResolver.java

@@ -0,0 +1,143 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol.datatransfer;
+
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.CombinedIPList;
+
+/**
+ * Implements {@link TrustedChannelResolver}
+ * to trust ips/host/subnets based on a blackList.
+ */
+public class BlackListBasedTrustedChannelResolver extends
+    TrustedChannelResolver {
+
+  private CombinedIPList blackListForServer;
+  private CombinedIPList blackListForClient;
+
+  private static final String FIXED_BLACK_LIST_DEFAULT_LOCATION = "/etc/hadoop"
+      + "/fixedBlackList";
+
+  private static final String VARIABLE_BLACK_LIST_DEFAULT_LOCATION = "/etc/"
+      + "hadoop/blackList";
+
+  /**
+   * Path to the file containing subnets and ip addresses to form
+   * fixed BlackList. Server side config.
+   */
+  public static final String DFS_DATATRANSFER_SERVER_FIXED_BLACK_LIST_FILE =
+      "dfs.datatransfer.server.fixedBlackList.file";
+  /**
+   * Enables/Disables variable BlackList. Server side config.
+   */
+  public static final String DFS_DATATRANSFER_SERVER_VARIABLE_BLACK_LIST_ENABLE
+      = "dfs.datatransfer.server.variableBlackList.enable";
+  /**
+   * Path to the file containing subnets and ip addresses to form
+   * variable BlackList. Server side config.
+   */
+  public static final String DFS_DATATRANSFER_SERVER_VARIABLE_BLACK_LIST_FILE =
+      "dfs.datatransfer.server.variableBlackList.file";
+  /**
+   * Time in seconds after which the variable BlackList file is checked for
+   * updates. Server side config.
+   */
+  public static final String
+      DFS_DATATRANSFER_SERVER_VARIABLE_BLACK_LIST_CACHE_SECS = "dfs."
+      + "datatransfer.server.variableBlackList.cache.secs";
+
+  /**
+   * Path to the file containing subnets and ip addresses to
+   * form fixed BlackList. This key is for client.
+   */
+  public static final String DFS_DATATRANSFER_CLIENT_FIXED_BLACK_LIST_FILE =
+      "dfs.datatransfer.client.fixedBlackList.file";
+  /**
+   * Enables/Disables variable BlackList. This key is for client.
+   */
+  public static final String DFS_DATATRANSFER_CLIENT_VARIABLE_BLACK_LIST_ENABLE
+      = "dfs.datatransfer.client.variableBlackList.enable";
+  /**
+   * Path to the file to containing subnets and ip addresses to form variable
+   * BlackList. This key is for client.
+   */
+  public static final String DFS_DATATRANSFER_CLIENT_VARIABLE_BLACK_LIST_FILE =
+      "dfs.datatransfer.client.variableBlackList.file";
+  /**
+   * Time in seconds after which the variable BlackList file is
+   * checked for updates. This key is for client.
+   */
+  public static final String
+      DFS_DATATRANSFER_CLIENT_VARIABLE_BLACK_LIST_CACHE_SECS =
+      "dfs.datatransfer.client.variableBlackList.cache.secs";
+
+  @Override
+  public void setConf(Configuration conf) {
+    super.setConf(conf);
+    String fixedFile = conf.get(DFS_DATATRANSFER_SERVER_FIXED_BLACK_LIST_FILE,
+        FIXED_BLACK_LIST_DEFAULT_LOCATION);
+    String variableFile = null;
+    long expiryTime = 0;
+
+    if (conf
+        .getBoolean(DFS_DATATRANSFER_SERVER_VARIABLE_BLACK_LIST_ENABLE,
+            false)) {
+      variableFile = conf.get(DFS_DATATRANSFER_SERVER_VARIABLE_BLACK_LIST_FILE,
+          VARIABLE_BLACK_LIST_DEFAULT_LOCATION);
+      expiryTime =
+          conf.getLong(DFS_DATATRANSFER_SERVER_VARIABLE_BLACK_LIST_CACHE_SECS,
+              3600) * 1000;
+    }
+
+    blackListForServer = new CombinedIPList(fixedFile, variableFile,
+        expiryTime);
+
+    fixedFile = conf
+        .get(DFS_DATATRANSFER_CLIENT_FIXED_BLACK_LIST_FILE, fixedFile);
+    expiryTime = 0;
+
+    if (conf
+        .getBoolean(DFS_DATATRANSFER_CLIENT_VARIABLE_BLACK_LIST_ENABLE,
+            false)) {
+      variableFile = conf
+          .get(DFS_DATATRANSFER_CLIENT_VARIABLE_BLACK_LIST_FILE, variableFile);
+      expiryTime =
+          conf.getLong(DFS_DATATRANSFER_CLIENT_VARIABLE_BLACK_LIST_CACHE_SECS,
+              3600) * 1000;
+    }
+
+    blackListForClient = new CombinedIPList(fixedFile, variableFile,
+        expiryTime);
+  }
+
+  public boolean isTrusted() {
+    try {
+      return !blackListForClient
+          .isIn(InetAddress.getLocalHost().getHostAddress());
+    } catch (UnknownHostException e) {
+      return true;
+    }
+  }
+
+  public boolean isTrusted(InetAddress clientAddress) {
+    return !blackListForServer.isIn(clientAddress.getHostAddress());
+  }
+}

+ 24 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/package-info.java

@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceStability.Evolving
+package org.apache.hadoop.hdfs.protocol.datatransfer;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * This package contains classes related to hdfs data transfer protocol.
+ */

+ 60 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolServerSideTranslatorPB.java

@@ -23,8 +23,14 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.AddMountTableEntryRequestProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.AddMountTableEntryRequestProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.AddMountTableEntryResponseProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.AddMountTableEntryResponseProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnterSafeModeRequestProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnterSafeModeResponseProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesRequestProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesRequestProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesResponseProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesResponseProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetSafeModeRequestProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetSafeModeResponseProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.LeaveSafeModeRequestProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.LeaveSafeModeResponseProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryResponseProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryResponseProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryRequestProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryRequestProto;
@@ -32,16 +38,28 @@ import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProt
 import org.apache.hadoop.hdfs.server.federation.router.RouterAdminServer;
 import org.apache.hadoop.hdfs.server.federation.router.RouterAdminServer;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetSafeModeRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetSafeModeResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.LeaveSafeModeRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.LeaveSafeModeResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.AddMountTableEntryRequestPBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.AddMountTableEntryRequestPBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.AddMountTableEntryResponsePBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.AddMountTableEntryResponsePBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.EnterSafeModeRequestPBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.EnterSafeModeResponsePBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetMountTableEntriesRequestPBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetMountTableEntriesRequestPBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetMountTableEntriesResponsePBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetMountTableEntriesResponsePBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetSafeModeRequestPBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetSafeModeResponsePBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.LeaveSafeModeRequestPBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.LeaveSafeModeResponsePBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryRequestPBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryRequestPBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryResponsePBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryResponsePBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.UpdateMountTableEntryRequestPBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.UpdateMountTableEntryRequestPBImpl;
@@ -148,4 +166,46 @@ public class RouterAdminProtocolServerSideTranslatorPB implements
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
   }
   }
+
+  @Override
+  public EnterSafeModeResponseProto enterSafeMode(RpcController controller,
+      EnterSafeModeRequestProto request) throws ServiceException {
+    try {
+      EnterSafeModeRequest req = new EnterSafeModeRequestPBImpl(request);
+      EnterSafeModeResponse response = server.enterSafeMode(req);
+      EnterSafeModeResponsePBImpl responsePB =
+          (EnterSafeModeResponsePBImpl) response;
+      return responsePB.getProto();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public LeaveSafeModeResponseProto leaveSafeMode(RpcController controller,
+      LeaveSafeModeRequestProto request) throws ServiceException {
+    try {
+      LeaveSafeModeRequest req = new LeaveSafeModeRequestPBImpl(request);
+      LeaveSafeModeResponse response = server.leaveSafeMode(req);
+      LeaveSafeModeResponsePBImpl responsePB =
+          (LeaveSafeModeResponsePBImpl) response;
+      return responsePB.getProto();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public GetSafeModeResponseProto getSafeMode(RpcController controller,
+      GetSafeModeRequestProto request) throws ServiceException {
+    try {
+      GetSafeModeRequest req = new GetSafeModeRequestPBImpl(request);
+      GetSafeModeResponse response = server.getSafeMode(req);
+      GetSafeModeResponsePBImpl responsePB =
+          (GetSafeModeResponsePBImpl) response;
+      return responsePB.getProto();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
 }
 }

+ 59 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RouterAdminProtocolTranslatorPB.java

@@ -24,25 +24,41 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.AddMountTableEntryRequestProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.AddMountTableEntryRequestProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.AddMountTableEntryResponseProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.AddMountTableEntryResponseProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnterSafeModeRequestProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.EnterSafeModeResponseProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesRequestProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesRequestProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesResponseProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetMountTableEntriesResponseProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetSafeModeRequestProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.GetSafeModeResponseProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.LeaveSafeModeRequestProto;
+import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.LeaveSafeModeResponseProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryResponseProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryResponseProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryRequestProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryRequestProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryResponseProto;
 import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.UpdateMountTableEntryResponseProto;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
 import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager;
+import org.apache.hadoop.hdfs.server.federation.router.RouterStateManager;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.EnterSafeModeResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetSafeModeRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.GetSafeModeResponse;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.LeaveSafeModeRequest;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.LeaveSafeModeResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryResponse;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.AddMountTableEntryRequestPBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.AddMountTableEntryRequestPBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.AddMountTableEntryResponsePBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.AddMountTableEntryResponsePBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.EnterSafeModeResponsePBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetMountTableEntriesRequestPBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetMountTableEntriesRequestPBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetMountTableEntriesResponsePBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetMountTableEntriesResponsePBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.GetSafeModeResponsePBImpl;
+import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.LeaveSafeModeResponsePBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryRequestPBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryRequestPBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryResponsePBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.RemoveMountTableEntryResponsePBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.UpdateMountTableEntryRequestPBImpl;
 import org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb.UpdateMountTableEntryRequestPBImpl;
@@ -64,7 +80,7 @@ import com.google.protobuf.ServiceException;
 @InterfaceStability.Stable
 @InterfaceStability.Stable
 public class RouterAdminProtocolTranslatorPB
 public class RouterAdminProtocolTranslatorPB
     implements ProtocolMetaInterface, MountTableManager,
     implements ProtocolMetaInterface, MountTableManager,
-    Closeable, ProtocolTranslator {
+    Closeable, ProtocolTranslator, RouterStateManager {
   final private RouterAdminProtocolPB rpcProxy;
   final private RouterAdminProtocolPB rpcProxy;
 
 
   public RouterAdminProtocolTranslatorPB(RouterAdminProtocolPB proxy) {
   public RouterAdminProtocolTranslatorPB(RouterAdminProtocolPB proxy) {
@@ -147,4 +163,46 @@ public class RouterAdminProtocolTranslatorPB
       throw new IOException(ProtobufHelper.getRemoteException(e).getMessage());
       throw new IOException(ProtobufHelper.getRemoteException(e).getMessage());
     }
     }
   }
   }
+
+  @Override
+  public EnterSafeModeResponse enterSafeMode(EnterSafeModeRequest request)
+      throws IOException {
+    EnterSafeModeRequestProto proto =
+        EnterSafeModeRequestProto.newBuilder().build();
+    try {
+      EnterSafeModeResponseProto response =
+          rpcProxy.enterSafeMode(null, proto);
+      return new EnterSafeModeResponsePBImpl(response);
+    } catch (ServiceException e) {
+      throw new IOException(ProtobufHelper.getRemoteException(e).getMessage());
+    }
+  }
+
+  @Override
+  public LeaveSafeModeResponse leaveSafeMode(LeaveSafeModeRequest request)
+      throws IOException {
+    LeaveSafeModeRequestProto proto =
+        LeaveSafeModeRequestProto.newBuilder().build();
+    try {
+      LeaveSafeModeResponseProto response =
+          rpcProxy.leaveSafeMode(null, proto);
+      return new LeaveSafeModeResponsePBImpl(response);
+    } catch (ServiceException e) {
+      throw new IOException(ProtobufHelper.getRemoteException(e).getMessage());
+    }
+  }
+
+  @Override
+  public GetSafeModeResponse getSafeMode(GetSafeModeRequest request)
+      throws IOException {
+    GetSafeModeRequestProto proto =
+        GetSafeModeRequestProto.newBuilder().build();
+    try {
+      GetSafeModeResponseProto response =
+          rpcProxy.getSafeMode(null, proto);
+      return new GetSafeModeResponsePBImpl(response);
+    } catch (ServiceException e) {
+      throw new IOException(ProtobufHelper.getRemoteException(e).getMessage());
+    }
+  }
 }
 }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/InterQJournalProtocol.java

@@ -21,7 +21,7 @@ package org.apache.hadoop.hdfs.qjournal.protocol;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.qjournal.server.JournalNode;
 import org.apache.hadoop.hdfs.qjournal.server.JournalNode;
-import org.apache.hadoop.hdfs.qjournal.protocol.InterQJournalProtocolProtos.GetEditLogManifestFromJournalResponseProto;
+import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.KerberosInfo;
 
 
 import java.io.IOException;
 import java.io.IOException;
@@ -47,7 +47,7 @@ public interface InterQJournalProtocol {
    *        segment
    *        segment
    * @return a list of edit log segments since the given transaction ID.
    * @return a list of edit log segments since the given transaction ID.
    */
    */
-  GetEditLogManifestFromJournalResponseProto getEditLogManifestFromJournal(
+  GetEditLogManifestResponseProto getEditLogManifestFromJournal(
       String jid, String nameServiceId, long sinceTxId, boolean inProgressOk)
       String jid, String nameServiceId, long sinceTxId, boolean inProgressOk)
       throws IOException;
       throws IOException;
 
 

+ 5 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/InterQJournalProtocolServerSideTranslatorPB.java

@@ -24,8 +24,8 @@ import com.google.protobuf.ServiceException;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.qjournal.protocol.InterQJournalProtocol;
 import org.apache.hadoop.hdfs.qjournal.protocol.InterQJournalProtocol;
-import org.apache.hadoop.hdfs.qjournal.protocol.InterQJournalProtocolProtos.GetEditLogManifestFromJournalRequestProto;
-import org.apache.hadoop.hdfs.qjournal.protocol.InterQJournalProtocolProtos.GetEditLogManifestFromJournalResponseProto;
+import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto;
+import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto;
 
 
 import java.io.IOException;
 import java.io.IOException;
 
 
@@ -47,10 +47,9 @@ public class InterQJournalProtocolServerSideTranslatorPB implements
   }
   }
 
 
   @Override
   @Override
-  public GetEditLogManifestFromJournalResponseProto
-      getEditLogManifestFromJournal(RpcController controller,
-                                    GetEditLogManifestFromJournalRequestProto
-                                        request) throws ServiceException {
+  public GetEditLogManifestResponseProto getEditLogManifestFromJournal(
+      RpcController controller, GetEditLogManifestRequestProto request)
+      throws ServiceException {
     try {
     try {
       return impl.getEditLogManifestFromJournal(
       return impl.getEditLogManifestFromJournal(
           request.getJid().getIdentifier(),
           request.getJid().getIdentifier(),

+ 6 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/InterQJournalProtocolTranslatorPB.java

@@ -24,8 +24,8 @@ import com.google.protobuf.ServiceException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.qjournal.protocol.InterQJournalProtocol;
 import org.apache.hadoop.hdfs.qjournal.protocol.InterQJournalProtocol;
-import org.apache.hadoop.hdfs.qjournal.protocol.InterQJournalProtocolProtos.GetEditLogManifestFromJournalResponseProto;
-import org.apache.hadoop.hdfs.qjournal.protocol.InterQJournalProtocolProtos.GetEditLogManifestFromJournalRequestProto;
+import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto;
+import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
@@ -60,13 +60,12 @@ public class InterQJournalProtocolTranslatorPB implements ProtocolMetaInterface,
 
 
 
 
   @Override
   @Override
-  public GetEditLogManifestFromJournalResponseProto
-      getEditLogManifestFromJournal(String jid, String nameServiceId,
-                                    long sinceTxId, boolean inProgressOk)
+  public GetEditLogManifestResponseProto getEditLogManifestFromJournal(
+      String jid, String nameServiceId, long sinceTxId, boolean inProgressOk)
       throws IOException {
       throws IOException {
     try {
     try {
-      GetEditLogManifestFromJournalRequestProto.Builder req;
-      req = GetEditLogManifestFromJournalRequestProto.newBuilder()
+      GetEditLogManifestRequestProto.Builder req;
+      req = GetEditLogManifestRequestProto.newBuilder()
           .setJid(convertJournalId(jid))
           .setJid(convertJournalId(jid))
           .setSinceTxId(sinceTxId)
           .setSinceTxId(sinceTxId)
           .setInProgressOk(inProgressOk);
           .setInProgressOk(inProgressOk);

+ 4 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java

@@ -193,10 +193,9 @@ class JNStorage extends Storage {
           // /\d+/ in the regex itself.
           // /\d+/ in the regex itself.
           long txid = Long.parseLong(matcher.group(1));
           long txid = Long.parseLong(matcher.group(1));
           if (txid < minTxIdToKeep) {
           if (txid < minTxIdToKeep) {
-            LOG.info("Purging no-longer needed file " + txid);
+            LOG.info("Purging no-longer needed file {}", txid);
             if (!f.delete()) {
             if (!f.delete()) {
-              LOG.warn("Unable to delete no-longer-needed data " +
-                  f);
+              LOG.warn("Unable to delete no-longer-needed data {}", f);
             }
             }
             break;
             break;
           }
           }
@@ -214,7 +213,7 @@ class JNStorage extends Storage {
     }
     }
     setStorageInfo(nsInfo);
     setStorageInfo(nsInfo);
 
 
-    LOG.info("Formatting journal " + sd + " with nsid: " + getNamespaceID());
+    LOG.info("Formatting journal {} with nsid: {}", sd, getNamespaceID());
     // Unlock the directory before formatting, because we will
     // Unlock the directory before formatting, because we will
     // re-analyze it after format(). The analyzeStorage() call
     // re-analyze it after format(). The analyzeStorage() call
     // below is reponsible for re-locking it. This is a no-op
     // below is reponsible for re-locking it. This is a no-op
@@ -278,7 +277,7 @@ class JNStorage extends Storage {
   }
   }
 
 
   public void close() throws IOException {
   public void close() throws IOException {
-    LOG.info("Closing journal storage for " + sd);
+    LOG.info("Closing journal storage for {}", sd);
     unlockAll();
     unlockAll();
   }
   }
 
 

+ 87 - 42
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java

@@ -17,18 +17,10 @@
  */
  */
 package org.apache.hadoop.hdfs.qjournal.server;
 package org.apache.hadoop.hdfs.qjournal.server;
 
 
-import static org.apache.hadoop.util.ExitUtil.terminate;
-
-import java.io.File;
-import java.io.FileFilter;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.HashMap;
-import java.util.Map;
-
-import javax.management.ObjectName;
-
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -47,14 +39,22 @@ import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.tracing.TraceUtils;
 import org.apache.hadoop.tracing.TraceUtils;
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.DiskChecker;
+import static org.apache.hadoop.util.ExitUtil.terminate;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.htrace.core.Tracer;
 import org.apache.htrace.core.Tracer;
 import org.eclipse.jetty.util.ajax.JSON;
 import org.eclipse.jetty.util.ajax.JSON;
 
 
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Maps;
+import javax.management.ObjectName;
+import java.io.File;
+import java.io.FileFilter;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
 
 
 /**
 /**
  * The JournalNode is a daemon which allows namenodes using
  * The JournalNode is a daemon which allows namenodes using
@@ -74,7 +74,7 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
       .newHashMap();
       .newHashMap();
   private ObjectName journalNodeInfoBeanName;
   private ObjectName journalNodeInfoBeanName;
   private String httpServerURI;
   private String httpServerURI;
-  private File localDir;
+  private final ArrayList<File> localDir = Lists.newArrayList();
   Tracer tracer;
   Tracer tracer;
 
 
   static {
   static {
@@ -94,11 +94,10 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
     
     
     Journal journal = journalsById.get(jid);
     Journal journal = journalsById.get(jid);
     if (journal == null) {
     if (journal == null) {
-      File logDir = getLogDir(jid);
-      LOG.info("Initializing journal in directory " + logDir);      
+      File logDir = getLogDir(jid, nameServiceId);
+      LOG.info("Initializing journal in directory " + logDir);
       journal = new Journal(conf, logDir, jid, startOpt, new ErrorReporter());
       journal = new Journal(conf, logDir, jid, startOpt, new ErrorReporter());
       journalsById.put(jid, journal);
       journalsById.put(jid, journal);
-
       // Start SyncJouranl thread, if JournalNode Sync is enabled
       // Start SyncJouranl thread, if JournalNode Sync is enabled
       if (conf.getBoolean(
       if (conf.getBoolean(
           DFSConfigKeys.DFS_JOURNALNODE_ENABLE_SYNC_KEY,
           DFSConfigKeys.DFS_JOURNALNODE_ENABLE_SYNC_KEY,
@@ -148,9 +147,34 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
   @Override
   @Override
   public void setConf(Configuration conf) {
   public void setConf(Configuration conf) {
     this.conf = conf;
     this.conf = conf;
-    this.localDir = new File(
-        conf.get(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY,
-        DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_DEFAULT).trim());
+
+    String journalNodeDir = null;
+    Collection<String> nameserviceIds;
+
+    nameserviceIds = conf.getTrimmedStringCollection(
+        DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY);
+
+    if (nameserviceIds.size() == 0) {
+      nameserviceIds = conf.getTrimmedStringCollection(
+          DFSConfigKeys.DFS_NAMESERVICES);
+    }
+
+    //if nameservicesIds size is less than 2, it means it is not a federated
+    // setup
+    if (nameserviceIds.size() < 2) {
+      // Check in HA, if journal edit dir is set by appending with
+      // nameserviceId
+      for (String nameService : nameserviceIds) {
+        journalNodeDir = conf.get(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY +
+        "." + nameService);
+      }
+      if (journalNodeDir == null) {
+        journalNodeDir = conf.get(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY,
+            DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_DEFAULT);
+      }
+      localDir.add(new File(journalNodeDir.trim()));
+    }
+
     if (this.tracer == null) {
     if (this.tracer == null) {
       this.tracer = new Tracer.Builder("JournalNode").
       this.tracer = new Tracer.Builder("JournalNode").
           conf(TraceUtils.wrapHadoopConf("journalnode.htrace", conf)).
           conf(TraceUtils.wrapHadoopConf("journalnode.htrace", conf)).
@@ -158,12 +182,13 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
     }
     }
   }
   }
 
 
-  private static void validateAndCreateJournalDir(File dir) throws IOException {
+  private static void validateAndCreateJournalDir(File dir)
+      throws IOException {
+
     if (!dir.isAbsolute()) {
     if (!dir.isAbsolute()) {
       throw new IllegalArgumentException(
       throw new IllegalArgumentException(
           "Journal dir '" + dir + "' should be an absolute path");
           "Journal dir '" + dir + "' should be an absolute path");
     }
     }
-
     DiskChecker.checkDir(dir);
     DiskChecker.checkDir(dir);
   }
   }
 
 
@@ -186,8 +211,9 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
 
 
     try {
     try {
 
 
-      validateAndCreateJournalDir(localDir);
-
+      for (File journalDir : localDir) {
+        validateAndCreateJournalDir(journalDir);
+      }
       DefaultMetricsSystem.initialize("JournalNode");
       DefaultMetricsSystem.initialize("JournalNode");
       JvmMetrics.create("JournalNode",
       JvmMetrics.create("JournalNode",
           conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
           conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
@@ -297,16 +323,33 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
    * @param jid the journal identifier
    * @param jid the journal identifier
    * @return the file, which may or may not exist yet
    * @return the file, which may or may not exist yet
    */
    */
-  private File getLogDir(String jid) {
-    String dir = conf.get(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY,
-        DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_DEFAULT);
+  private File getLogDir(String jid, String nameServiceId) throws IOException{
+    String dir = null;
+    if (nameServiceId != null) {
+      dir = conf.get(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY + "." +
+          nameServiceId);
+    }
+    if (dir == null) {
+      dir = conf.get(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY,
+          DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_DEFAULT);
+    }
+
+    File journalDir = new File(dir.trim());
+    if (!localDir.contains(journalDir)) {
+      //It is a federated setup, we need to validate journalDir
+      validateAndCreateJournalDir(journalDir);
+      localDir.add(journalDir);
+    }
+
     Preconditions.checkArgument(jid != null &&
     Preconditions.checkArgument(jid != null &&
         !jid.isEmpty(),
         !jid.isEmpty(),
         "bad journal identifier: %s", jid);
         "bad journal identifier: %s", jid);
     assert jid != null;
     assert jid != null;
-    return new File(new File(dir), jid);
+    return new File(journalDir, jid);
   }
   }
 
 
+
+
   @Override // JournalNodeMXBean
   @Override // JournalNodeMXBean
   public String getJournalsStatus() {
   public String getJournalsStatus() {
     // jid:{Formatted:True/False}
     // jid:{Formatted:True/False}
@@ -328,20 +371,22 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
     // Also note that we do not need to check localDir here since
     // Also note that we do not need to check localDir here since
     // validateAndCreateJournalDir has been called before we register the
     // validateAndCreateJournalDir has been called before we register the
     // MXBean.
     // MXBean.
-    File[] journalDirs = localDir.listFiles(new FileFilter() {
-      @Override
-      public boolean accept(File file) {
-        return file.isDirectory();
-      }
-    });
-
-    if (journalDirs != null) {
-      for (File journalDir : journalDirs) {
-        String jid = journalDir.getName();
-        if (!status.containsKey(jid)) {
-          Map<String, String> jMap = new HashMap<String, String>();
-          jMap.put("Formatted", "true");
-          status.put(jid, jMap);
+    for (File jDir : localDir) {
+      File[] journalDirs = jDir.listFiles(new FileFilter() {
+        @Override
+        public boolean accept(File file) {
+          return file.isDirectory();
+        }
+      });
+
+      if (journalDirs != null) {
+        for (File journalDir : journalDirs) {
+          String jid = journalDir.getName();
+          if (!status.containsKey(jid)) {
+            Map<String, String> jMap = new HashMap<String, String>();
+            jMap.put("Formatted", "true");
+            status.put(jid, jMap);
+          }
         }
         }
       }
       }
     }
     }

+ 10 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java

@@ -17,10 +17,8 @@
  */
  */
 package org.apache.hadoop.hdfs.qjournal.server;
 package org.apache.hadoop.hdfs.qjournal.server;
 
 
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.net.URL;
-
+import com.google.common.annotations.VisibleForTesting;
+import com.google.protobuf.BlockingService;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -31,7 +29,6 @@ import org.apache.hadoop.hdfs.HDFSPolicyProvider;
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.qjournal.protocol.InterQJournalProtocol;
 import org.apache.hadoop.hdfs.qjournal.protocol.InterQJournalProtocol;
 import org.apache.hadoop.hdfs.qjournal.protocol.InterQJournalProtocolProtos.InterQJournalProtocolService;
 import org.apache.hadoop.hdfs.qjournal.protocol.InterQJournalProtocolProtos.InterQJournalProtocolService;
-import org.apache.hadoop.hdfs.qjournal.protocol.InterQJournalProtocolProtos.GetEditLogManifestFromJournalResponseProto;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto;
@@ -52,8 +49,9 @@ import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC.Server;
 import org.apache.hadoop.ipc.RPC.Server;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.protobuf.BlockingService;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.net.URL;
 
 
 
 
 @InterfaceAudience.Private
 @InterfaceAudience.Private
@@ -286,14 +284,15 @@ public class JournalNodeRpcServer implements QJournalProtocol,
 
 
   @SuppressWarnings("deprecation")
   @SuppressWarnings("deprecation")
   @Override
   @Override
-  public GetEditLogManifestFromJournalResponseProto
-      getEditLogManifestFromJournal(String jid, String nameServiceId,
-                                    long sinceTxId, boolean inProgressOk)
+  public GetEditLogManifestResponseProto getEditLogManifestFromJournal(
+      String jid, String nameServiceId,
+      long sinceTxId, boolean inProgressOk)
       throws IOException {
       throws IOException {
+
     RemoteEditLogManifest manifest = jn.getOrCreateJournal(jid, nameServiceId)
     RemoteEditLogManifest manifest = jn.getOrCreateJournal(jid, nameServiceId)
         .getEditLogManifest(sinceTxId, inProgressOk);
         .getEditLogManifest(sinceTxId, inProgressOk);
 
 
-    return GetEditLogManifestFromJournalResponseProto.newBuilder()
+    return GetEditLogManifestResponseProto.newBuilder()
         .setManifest(PBHelper.convert(manifest))
         .setManifest(PBHelper.convert(manifest))
         .setHttpPort(jn.getBoundHttpAddress().getPort())
         .setHttpPort(jn.getBoundHttpAddress().getPort())
         .setFromURL(jn.getHttpServerURI())
         .setFromURL(jn.getHttpServerURI())

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java

@@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.qjournal.protocol.InterQJournalProtocol;
 import org.apache.hadoop.hdfs.qjournal.protocol.InterQJournalProtocol;
-import org.apache.hadoop.hdfs.qjournal.protocol.InterQJournalProtocolProtos.GetEditLogManifestFromJournalResponseProto;
+import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto;
 import org.apache.hadoop.hdfs.qjournal.protocolPB.InterQJournalProtocolPB;
 import org.apache.hadoop.hdfs.qjournal.protocolPB.InterQJournalProtocolPB;
 import org.apache.hadoop.hdfs.qjournal.protocolPB.InterQJournalProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.qjournal.protocolPB.InterQJournalProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.common.Util;
@@ -245,7 +245,7 @@ public class JournalNodeSyncer {
       return;
       return;
     }
     }
 
 
-    GetEditLogManifestFromJournalResponseProto editLogManifest;
+    GetEditLogManifestResponseProto editLogManifest;
     try {
     try {
       editLogManifest = jnProxy.getEditLogManifestFromJournal(jid,
       editLogManifest = jnProxy.getEditLogManifestFromJournal(jid,
           nameServiceId, 0, false);
           nameServiceId, 0, false);
@@ -318,8 +318,8 @@ public class JournalNodeSyncer {
   }
   }
 
 
   private void getMissingLogSegments(List<RemoteEditLog> thisJournalEditLogs,
   private void getMissingLogSegments(List<RemoteEditLog> thisJournalEditLogs,
-      GetEditLogManifestFromJournalResponseProto response,
-      JournalNodeProxy remoteJNproxy) {
+                                     GetEditLogManifestResponseProto response,
+                                     JournalNodeProxy remoteJNproxy) {
 
 
     List<RemoteEditLog> otherJournalEditLogs = PBHelper.convert(
     List<RemoteEditLog> otherJournalEditLogs = PBHelper.convert(
         response.getManifest()).getLogs();
         response.getManifest()).getLogs();

+ 38 - 15
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java

@@ -21,7 +21,6 @@ package org.apache.hadoop.hdfs.security.token.delegation;
 import java.io.DataInput;
 import java.io.DataInput;
 import java.io.DataOutputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.Iterator;
@@ -366,34 +365,58 @@ public class DelegationTokenSecretManager
   @Override //AbstractDelegationTokenManager
   @Override //AbstractDelegationTokenManager
   protected void logUpdateMasterKey(DelegationKey key)
   protected void logUpdateMasterKey(DelegationKey key)
       throws IOException {
       throws IOException {
-    synchronized (noInterruptsLock) {
+    try {
       // The edit logging code will fail catastrophically if it
       // The edit logging code will fail catastrophically if it
       // is interrupted during a logSync, since the interrupt
       // is interrupted during a logSync, since the interrupt
       // closes the edit log files. Doing this inside the
       // closes the edit log files. Doing this inside the
-      // above lock and then checking interruption status
-      // prevents this bug.
-      if (Thread.interrupted()) {
-        throw new InterruptedIOException(
-            "Interrupted before updating master key");
+      // fsn lock will prevent being interrupted when stopping
+      // the secret manager.
+      namesystem.readLockInterruptibly();
+      try {
+        // this monitor isn't necessary if stopped while holding write lock
+        // but for safety, guard against a stop with read lock.
+        synchronized (noInterruptsLock) {
+          if (Thread.currentThread().isInterrupted()) {
+            return; // leave flag set so secret monitor exits.
+          }
+          namesystem.logUpdateMasterKey(key);
+        }
+      } finally {
+        namesystem.readUnlock();
       }
       }
-      namesystem.logUpdateMasterKey(key);
+    } catch (InterruptedException ie) {
+      // AbstractDelegationTokenManager may crash if an exception is thrown.
+      // The interrupt flag will be detected when it attempts to sleep.
+      Thread.currentThread().interrupt();
     }
     }
   }
   }
   
   
   @Override //AbstractDelegationTokenManager
   @Override //AbstractDelegationTokenManager
   protected void logExpireToken(final DelegationTokenIdentifier dtId)
   protected void logExpireToken(final DelegationTokenIdentifier dtId)
       throws IOException {
       throws IOException {
-    synchronized (noInterruptsLock) {
+    try {
       // The edit logging code will fail catastrophically if it
       // The edit logging code will fail catastrophically if it
       // is interrupted during a logSync, since the interrupt
       // is interrupted during a logSync, since the interrupt
       // closes the edit log files. Doing this inside the
       // closes the edit log files. Doing this inside the
-      // above lock and then checking interruption status
-      // prevents this bug.
-      if (Thread.interrupted()) {
-        throw new InterruptedIOException(
-            "Interrupted before expiring delegation token");
+      // fsn lock will prevent being interrupted when stopping
+      // the secret manager.
+      namesystem.readLockInterruptibly();
+      try {
+        // this monitor isn't necessary if stopped while holding write lock
+        // but for safety, guard against a stop with read lock.
+        synchronized (noInterruptsLock) {
+          if (Thread.currentThread().isInterrupted()) {
+            return; // leave flag set so secret monitor exits.
+          }
+          namesystem.logExpireDelegationToken(dtId);
+        }
+      } finally {
+        namesystem.readUnlock();
       }
       }
-      namesystem.logExpireDelegationToken(dtId);
+    } catch (InterruptedException ie) {
+      // AbstractDelegationTokenManager may crash if an exception is thrown.
+      // The interrupt flag will be detected when it attempts to sleep.
+      Thread.currentThread().interrupt();
     }
     }
   }
   }
 
 

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java

@@ -33,6 +33,7 @@ abstract class BlockReconstructionWork {
   private final BlockInfo block;
   private final BlockInfo block;
 
 
   private final String srcPath;
   private final String srcPath;
+  private final long blockSize;
   private final byte storagePolicyID;
   private final byte storagePolicyID;
 
 
   /**
   /**
@@ -59,6 +60,7 @@ abstract class BlockReconstructionWork {
       int priority) {
       int priority) {
     this.block = block;
     this.block = block;
     this.srcPath = bc.getName();
     this.srcPath = bc.getName();
+    this.blockSize = block.getNumBytes();
     this.storagePolicyID = bc.getStoragePolicyID();
     this.storagePolicyID = bc.getStoragePolicyID();
     this.srcNodes = srcNodes;
     this.srcNodes = srcNodes;
     this.containingNodes = containingNodes;
     this.containingNodes = containingNodes;
@@ -100,6 +102,10 @@ abstract class BlockReconstructionWork {
     return srcPath;
     return srcPath;
   }
   }
 
 
+  public long getBlockSize() {
+    return blockSize;
+  }
+
   public byte getStoragePolicyID() {
   public byte getStoragePolicyID() {
     return storagePolicyID;
     return storagePolicyID;
   }
   }

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java

@@ -59,8 +59,7 @@ class ErasureCodingWork extends BlockReconstructionWork {
     // TODO: new placement policy for EC considering multiple writers
     // TODO: new placement policy for EC considering multiple writers
     DatanodeStorageInfo[] chosenTargets = blockplacement.chooseTarget(
     DatanodeStorageInfo[] chosenTargets = blockplacement.chooseTarget(
         getSrcPath(), getAdditionalReplRequired(), getSrcNodes()[0],
         getSrcPath(), getAdditionalReplRequired(), getSrcNodes()[0],
-        getLiveReplicaStorages(), false, excludedNodes,
-        getBlock().getNumBytes(),
+        getLiveReplicaStorages(), false, excludedNodes, getBlockSize(),
         storagePolicySuite.getPolicy(getStoragePolicyID()), null);
         storagePolicySuite.getPolicy(getStoragePolicyID()), null);
     setTargets(chosenTargets);
     setTargets(chosenTargets);
   }
   }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/LowRedundancyBlocks.java

@@ -365,7 +365,7 @@ class LowRedundancyBlocks implements Iterable<BlockInfo> {
           NameNode.blockStateChangeLog.debug(
           NameNode.blockStateChangeLog.debug(
               "BLOCK* NameSystem.LowRedundancyBlock.remove: Removing block" +
               "BLOCK* NameSystem.LowRedundancyBlock.remove: Removing block" +
                   " {} from priority queue {}", block, i);
                   " {} from priority queue {}", block, i);
-          decrementBlockStat(block, priLevel, oldExpectedReplicas);
+          decrementBlockStat(block, i, oldExpectedReplicas);
           return true;
           return true;
         }
         }
       }
       }

+ 2 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java

@@ -45,10 +45,8 @@ class ReplicationWork extends BlockReconstructionWork {
     try {
     try {
       DatanodeStorageInfo[] chosenTargets = blockplacement.chooseTarget(
       DatanodeStorageInfo[] chosenTargets = blockplacement.chooseTarget(
           getSrcPath(), getAdditionalReplRequired(), getSrcNodes()[0],
           getSrcPath(), getAdditionalReplRequired(), getSrcNodes()[0],
-          getLiveReplicaStorages(), false, excludedNodes,
-          getBlock().getNumBytes(),
-          storagePolicySuite.getPolicy(getStoragePolicyID()),
-          null);
+          getLiveReplicaStorages(), false, excludedNodes, getBlockSize(),
+          storagePolicySuite.getPolicy(getStoragePolicyID()), null);
       setTargets(chosenTargets);
       setTargets(chosenTargets);
     } finally {
     } finally {
       getSrcNodes()[0].decrementPendingReplicationWithoutTargets();
       getSrcNodes()[0].decrementPendingReplicationWithoutTargets();

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowDiskTracker.java

@@ -53,7 +53,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
 @InterfaceStability.Unstable
 @InterfaceStability.Unstable
 public class SlowDiskTracker {
 public class SlowDiskTracker {
   public static final Logger LOG =
   public static final Logger LOG =
-      LoggerFactory.getLogger(SlowPeerTracker.class);
+      LoggerFactory.getLogger(SlowDiskTracker.class);
 
 
   /**
   /**
    * Time duration after which a report is considered stale. This is
    * Time duration after which a report is considered stale. This is

+ 37 - 38
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java

@@ -35,8 +35,6 @@ import java.util.Properties;
 import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.CopyOnWriteArrayList;
 
 
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.FileUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
@@ -53,7 +51,8 @@ import org.apache.hadoop.util.VersionInfo;
 
 
 import com.google.common.base.Charsets;
 import com.google.common.base.Charsets;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
-
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 
 
 /**
 /**
@@ -76,7 +75,9 @@ import com.google.common.base.Preconditions;
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 public abstract class Storage extends StorageInfo {
 public abstract class Storage extends StorageInfo {
-  public static final Log LOG = LogFactory.getLog(Storage.class.getName());
+
+  public static final Logger LOG = LoggerFactory
+      .getLogger(Storage.class.getName());
 
 
   // last layout version that did not support upgrades
   // last layout version that did not support upgrades
   public static final int LAST_PRE_UPGRADE_LAYOUT_VERSION = -3;
   public static final int LAST_PRE_UPGRADE_LAYOUT_VERSION = -3;
@@ -396,7 +397,7 @@ public abstract class Storage extends StorageInfo {
           return FileUtils.sizeOfDirectory(root);
           return FileUtils.sizeOfDirectory(root);
         }
         }
       } catch (Exception e) {
       } catch (Exception e) {
-        LOG.warn("Failed to get directory size :" + root, e);
+        LOG.warn("Failed to get directory size : {}", root, e);
       }
       }
       return 0;
       return 0;
     }
     }
@@ -427,7 +428,7 @@ public abstract class Storage extends StorageInfo {
       }
       }
       if (curDir.exists()) {
       if (curDir.exists()) {
         File[] files = FileUtil.listFiles(curDir);
         File[] files = FileUtil.listFiles(curDir);
-        LOG.info("Will remove files: " + Arrays.toString(files));
+        LOG.info("Will remove files: {}", Arrays.toString(files));
         if (!(FileUtil.fullyDelete(curDir)))
         if (!(FileUtil.fullyDelete(curDir)))
           throw new IOException("Cannot remove current directory: " + curDir);
           throw new IOException("Cannot remove current directory: " + curDir);
       }
       }
@@ -650,25 +651,25 @@ public abstract class Storage extends StorageInfo {
           // storage directory does not exist
           // storage directory does not exist
           if (startOpt != StartupOption.FORMAT &&
           if (startOpt != StartupOption.FORMAT &&
               startOpt != StartupOption.HOTSWAP) {
               startOpt != StartupOption.HOTSWAP) {
-            LOG.warn("Storage directory " + rootPath + " does not exist");
+            LOG.warn("Storage directory {} does not exist", rootPath);
             return StorageState.NON_EXISTENT;
             return StorageState.NON_EXISTENT;
           }
           }
-          LOG.info(rootPath + " does not exist. Creating ...");
+          LOG.info("{} does not exist. Creating ...", rootPath);
           if (!root.mkdirs())
           if (!root.mkdirs())
             throw new IOException("Cannot create directory " + rootPath);
             throw new IOException("Cannot create directory " + rootPath);
           hadMkdirs = true;
           hadMkdirs = true;
         }
         }
         // or is inaccessible
         // or is inaccessible
         if (!root.isDirectory()) {
         if (!root.isDirectory()) {
-          LOG.warn(rootPath + "is not a directory");
+          LOG.warn("{} is not a directory", rootPath);
           return StorageState.NON_EXISTENT;
           return StorageState.NON_EXISTENT;
         }
         }
         if (!FileUtil.canWrite(root)) {
         if (!FileUtil.canWrite(root)) {
-          LOG.warn("Cannot access storage directory " + rootPath);
+          LOG.warn("Cannot access storage directory {}", rootPath);
           return StorageState.NON_EXISTENT;
           return StorageState.NON_EXISTENT;
         }
         }
       } catch(SecurityException ex) {
       } catch(SecurityException ex) {
-        LOG.warn("Cannot access storage directory " + rootPath, ex);
+        LOG.warn("Cannot access storage directory {}", rootPath, ex);
         return StorageState.NON_EXISTENT;
         return StorageState.NON_EXISTENT;
       }
       }
 
 
@@ -770,43 +771,43 @@ public abstract class Storage extends StorageInfo {
       String rootPath = root.getCanonicalPath();
       String rootPath = root.getCanonicalPath();
       switch(curState) {
       switch(curState) {
       case COMPLETE_UPGRADE:  // mv previous.tmp -> previous
       case COMPLETE_UPGRADE:  // mv previous.tmp -> previous
-        LOG.info("Completing previous upgrade for storage directory " 
-                 + rootPath);
+        LOG.info("Completing previous upgrade for storage directory {}",
+            rootPath);
         rename(getPreviousTmp(), getPreviousDir());
         rename(getPreviousTmp(), getPreviousDir());
         return;
         return;
       case RECOVER_UPGRADE:   // mv previous.tmp -> current
       case RECOVER_UPGRADE:   // mv previous.tmp -> current
-        LOG.info("Recovering storage directory " + rootPath
-                 + " from previous upgrade");
+        LOG.info("Recovering storage directory {} from previous upgrade",
+            rootPath);
         if (curDir.exists())
         if (curDir.exists())
           deleteDir(curDir);
           deleteDir(curDir);
         rename(getPreviousTmp(), curDir);
         rename(getPreviousTmp(), curDir);
         return;
         return;
       case COMPLETE_ROLLBACK: // rm removed.tmp
       case COMPLETE_ROLLBACK: // rm removed.tmp
-        LOG.info("Completing previous rollback for storage directory "
-                 + rootPath);
+        LOG.info("Completing previous rollback for storage directory {}",
+            rootPath);
         deleteDir(getRemovedTmp());
         deleteDir(getRemovedTmp());
         return;
         return;
       case RECOVER_ROLLBACK:  // mv removed.tmp -> current
       case RECOVER_ROLLBACK:  // mv removed.tmp -> current
-        LOG.info("Recovering storage directory " + rootPath
-                 + " from previous rollback");
+        LOG.info("Recovering storage directory {} from previous rollback",
+            rootPath);
         rename(getRemovedTmp(), curDir);
         rename(getRemovedTmp(), curDir);
         return;
         return;
       case COMPLETE_FINALIZE: // rm finalized.tmp
       case COMPLETE_FINALIZE: // rm finalized.tmp
-        LOG.info("Completing previous finalize for storage directory "
-                 + rootPath);
+        LOG.info("Completing previous finalize for storage directory {}",
+            rootPath);
         deleteDir(getFinalizedTmp());
         deleteDir(getFinalizedTmp());
         return;
         return;
       case COMPLETE_CHECKPOINT: // mv lastcheckpoint.tmp -> previous.checkpoint
       case COMPLETE_CHECKPOINT: // mv lastcheckpoint.tmp -> previous.checkpoint
-        LOG.info("Completing previous checkpoint for storage directory " 
-                 + rootPath);
+        LOG.info("Completing previous checkpoint for storage directory {}",
+            rootPath);
         File prevCkptDir = getPreviousCheckpoint();
         File prevCkptDir = getPreviousCheckpoint();
         if (prevCkptDir.exists())
         if (prevCkptDir.exists())
           deleteDir(prevCkptDir);
           deleteDir(prevCkptDir);
         rename(getLastCheckpointTmp(), prevCkptDir);
         rename(getLastCheckpointTmp(), prevCkptDir);
         return;
         return;
       case RECOVER_CHECKPOINT:  // mv lastcheckpoint.tmp -> current
       case RECOVER_CHECKPOINT:  // mv lastcheckpoint.tmp -> current
-        LOG.info("Recovering storage directory " + rootPath
-                 + " from failed checkpoint");
+        LOG.info("Recovering storage directory {} from failed checkpoint",
+            rootPath);
         if (curDir.exists())
         if (curDir.exists())
           deleteDir(curDir);
           deleteDir(curDir);
         rename(getLastCheckpointTmp(), curDir);
         rename(getLastCheckpointTmp(), curDir);
@@ -860,12 +861,12 @@ public abstract class Storage extends StorageInfo {
      */
      */
     public void lock() throws IOException {
     public void lock() throws IOException {
       if (isShared()) {
       if (isShared()) {
-        LOG.info("Locking is disabled for " + this.root);
+        LOG.info("Locking is disabled for {}", this.root);
         return;
         return;
       }
       }
       FileLock newLock = tryLock();
       FileLock newLock = tryLock();
       if (newLock == null) {
       if (newLock == null) {
-        String msg = "Cannot lock storage " + this.root 
+        String msg = "Cannot lock storage " + this.root
           + ". The directory is already locked";
           + ". The directory is already locked";
         LOG.info(msg);
         LOG.info(msg);
         throw new IOException(msg);
         throw new IOException(msg);
@@ -897,22 +898,22 @@ public abstract class Storage extends StorageInfo {
       try {
       try {
         res = file.getChannel().tryLock();
         res = file.getChannel().tryLock();
         if (null == res) {
         if (null == res) {
-          LOG.error("Unable to acquire file lock on path " + lockF.toString());
+          LOG.error("Unable to acquire file lock on path {}", lockF);
           throw new OverlappingFileLockException();
           throw new OverlappingFileLockException();
         }
         }
         file.write(jvmName.getBytes(Charsets.UTF_8));
         file.write(jvmName.getBytes(Charsets.UTF_8));
-        LOG.info("Lock on " + lockF + " acquired by nodename " + jvmName);
+        LOG.info("Lock on {} acquired by nodename {}", lockF, jvmName);
       } catch(OverlappingFileLockException oe) {
       } catch(OverlappingFileLockException oe) {
         // Cannot read from the locked file on Windows.
         // Cannot read from the locked file on Windows.
         String lockingJvmName = Path.WINDOWS ? "" : (" " + file.readLine());
         String lockingJvmName = Path.WINDOWS ? "" : (" " + file.readLine());
-        LOG.error("It appears that another node " + lockingJvmName
-            + " has already locked the storage directory: " + root, oe);
+        LOG.error("It appears that another node {} has already locked the "
+            + "storage directory: {}", lockingJvmName, root, oe);
         file.close();
         file.close();
         return null;
         return null;
       } catch(IOException e) {
       } catch(IOException e) {
-        LOG.error("Failed to acquire lock on " + lockF
-            + ". If this storage directory is mounted via NFS, " 
-            + "ensure that the appropriate nfs lock services are running.", e);
+        LOG.error("Failed to acquire lock on {}. If this storage directory is"
+            + " mounted via NFS, ensure that the appropriate nfs lock services"
+            + " are running.", lockF, e);
         file.close();
         file.close();
         throw e;
         throw e;
       }
       }
@@ -1331,10 +1332,8 @@ public abstract class Storage extends StorageInfo {
     }
     }
     if (preserveFileDate) {
     if (preserveFileDate) {
       if (destFile.setLastModified(srcFile.lastModified()) == false) {
       if (destFile.setLastModified(srcFile.lastModified()) == false) {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("Failed to preserve last modified date from'" + srcFile
-            + "' to '" + destFile + "'");
-        }
+        LOG.debug("Failed to preserve last modified date from'{}' to '{}'",
+            srcFile, destFile);
       }
       }
     }
     }
   }
   }

+ 14 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java

@@ -268,7 +268,20 @@ class BlockPoolManager {
           lifelineAddrs.add(nnIdToLifelineAddr != null ?
           lifelineAddrs.add(nnIdToLifelineAddr != null ?
               nnIdToLifelineAddr.get(nnId) : null);
               nnIdToLifelineAddr.get(nnId) : null);
         }
         }
-        bpos.refreshNNList(addrs, lifelineAddrs);
+        try {
+          UserGroupInformation.getLoginUser()
+              .doAs(new PrivilegedExceptionAction<Object>() {
+                @Override
+                public Object run() throws Exception {
+                  bpos.refreshNNList(addrs, lifelineAddrs);
+                  return null;
+                }
+              });
+        } catch (InterruptedException ex) {
+          IOException ioe = new IOException();
+          ioe.initCause(ex.getCause());
+          throw ioe;
+        }
       }
       }
     }
     }
   }
   }

+ 43 - 45
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java

@@ -159,17 +159,15 @@ public class BlockPoolSliceStorage extends Storage {
       case NORMAL:
       case NORMAL:
         break;
         break;
       case NON_EXISTENT:
       case NON_EXISTENT:
-        LOG.info("Block pool storage directory for location " + location +
-            " and block pool id " + nsInfo.getBlockPoolID() +
-            " does not exist");
+        LOG.info("Block pool storage directory for location {} and block pool"
+            + " id {} does not exist", location, nsInfo.getBlockPoolID());
         throw new IOException("Storage directory for location " + location +
         throw new IOException("Storage directory for location " + location +
             " and block pool id " + nsInfo.getBlockPoolID() +
             " and block pool id " + nsInfo.getBlockPoolID() +
             " does not exist");
             " does not exist");
       case NOT_FORMATTED: // format
       case NOT_FORMATTED: // format
-        LOG.info("Block pool storage directory for location " + location +
-            " and block pool id " + nsInfo.getBlockPoolID()
-            + " is not formatted for " + nsInfo.getBlockPoolID()
-            + ". Formatting ...");
+        LOG.info("Block pool storage directory for location {} and block pool"
+                + " id {} is not formatted. Formatting ...", location,
+            nsInfo.getBlockPoolID());
         format(sd, nsInfo);
         format(sd, nsInfo);
         break;
         break;
       default:  // recovery part is common
       default:  // recovery part is common
@@ -227,8 +225,8 @@ public class BlockPoolSliceStorage extends Storage {
           nsInfo, location, startOpt, callables, conf);
           nsInfo, location, startOpt, callables, conf);
       succeedDirs.add(sd);
       succeedDirs.add(sd);
     } catch (IOException e) {
     } catch (IOException e) {
-      LOG.warn("Failed to analyze storage directories for block pool "
-          + nsInfo.getBlockPoolID(), e);
+      LOG.warn("Failed to analyze storage directories for block pool {}",
+          nsInfo.getBlockPoolID(), e);
       throw e;
       throw e;
     }
     }
     return succeedDirs;
     return succeedDirs;
@@ -250,7 +248,8 @@ public class BlockPoolSliceStorage extends Storage {
       StorageLocation location, StartupOption startOpt,
       StorageLocation location, StartupOption startOpt,
       List<Callable<StorageDirectory>> callables, Configuration conf)
       List<Callable<StorageDirectory>> callables, Configuration conf)
           throws IOException {
           throws IOException {
-    LOG.info("Analyzing storage directories for bpid " + nsInfo.getBlockPoolID());
+    LOG.info("Analyzing storage directories for bpid {}", nsInfo
+        .getBlockPoolID());
     final List<StorageDirectory> loaded = loadBpStorageDirectories(
     final List<StorageDirectory> loaded = loadBpStorageDirectories(
         nsInfo, location, startOpt, callables, conf);
         nsInfo, location, startOpt, callables, conf);
     for (StorageDirectory sd : loaded) {
     for (StorageDirectory sd : loaded) {
@@ -278,8 +277,8 @@ public class BlockPoolSliceStorage extends Storage {
    * @throws IOException Signals that an I/O exception has occurred.
    * @throws IOException Signals that an I/O exception has occurred.
    */
    */
   private void format(StorageDirectory bpSdir, NamespaceInfo nsInfo) throws IOException {
   private void format(StorageDirectory bpSdir, NamespaceInfo nsInfo) throws IOException {
-    LOG.info("Formatting block pool " + blockpoolID + " directory "
-        + bpSdir.getCurrentDir());
+    LOG.info("Formatting block pool {} directory {}", blockpoolID, bpSdir
+        .getCurrentDir());
     bpSdir.clearDirectory(); // create directory
     bpSdir.clearDirectory(); // create directory
     this.layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION;
     this.layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION;
     this.cTime = nsInfo.getCTime();
     this.cTime = nsInfo.getCTime();
@@ -295,7 +294,7 @@ public class BlockPoolSliceStorage extends Storage {
    */
    */
   void remove(File absPathToRemove) {
   void remove(File absPathToRemove) {
     Preconditions.checkArgument(absPathToRemove.isAbsolute());
     Preconditions.checkArgument(absPathToRemove.isAbsolute());
-    LOG.info("Removing block level storage: " + absPathToRemove);
+    LOG.info("Removing block level storage: {}", absPathToRemove);
     for (Iterator<StorageDirectory> it = getStorageDirs().iterator();
     for (Iterator<StorageDirectory> it = getStorageDirs().iterator();
          it.hasNext(); ) {
          it.hasNext(); ) {
       StorageDirectory sd = it.next();
       StorageDirectory sd = it.next();
@@ -375,7 +374,7 @@ public class BlockPoolSliceStorage extends Storage {
       // during rolling upgrade rollback. They are deleted during rolling
       // during rolling upgrade rollback. They are deleted during rolling
       // upgrade downgrade.
       // upgrade downgrade.
       int restored = restoreBlockFilesFromTrash(getTrashRootDir(sd));
       int restored = restoreBlockFilesFromTrash(getTrashRootDir(sd));
-      LOG.info("Restored " + restored + " block files from trash.");
+      LOG.info("Restored {} block files from trash.", restored);
     }
     }
     readProperties(sd);
     readProperties(sd);
     checkVersionUpgradable(this.layoutVersion);
     checkVersionUpgradable(this.layoutVersion);
@@ -399,9 +398,9 @@ public class BlockPoolSliceStorage extends Storage {
     }
     }
     if (this.layoutVersion > HdfsServerConstants.DATANODE_LAYOUT_VERSION) {
     if (this.layoutVersion > HdfsServerConstants.DATANODE_LAYOUT_VERSION) {
       int restored = restoreBlockFilesFromTrash(getTrashRootDir(sd));
       int restored = restoreBlockFilesFromTrash(getTrashRootDir(sd));
-      LOG.info("Restored " + restored + " block files from trash " +
-        "before the layout upgrade. These blocks will be moved to " +
-        "the previous directory during the upgrade");
+      LOG.info("Restored {} block files from trash " +
+          "before the layout upgrade. These blocks will be moved to " +
+          "the previous directory during the upgrade", restored);
     }
     }
     if (this.layoutVersion > HdfsServerConstants.DATANODE_LAYOUT_VERSION
     if (this.layoutVersion > HdfsServerConstants.DATANODE_LAYOUT_VERSION
         || this.cTime < nsInfo.getCTime()) {
         || this.cTime < nsInfo.getCTime()) {
@@ -448,11 +447,10 @@ public class BlockPoolSliceStorage extends Storage {
       return;
       return;
     }
     }
     final int oldLV = getLayoutVersion();
     final int oldLV = getLayoutVersion();
-    LOG.info("Upgrading block pool storage directory " + bpSd.getRoot()
-        + ".\n   old LV = " + oldLV
-        + "; old CTime = " + this.getCTime()
-        + ".\n   new LV = " + HdfsServerConstants.DATANODE_LAYOUT_VERSION
-        + "; new CTime = " + nsInfo.getCTime());
+    LOG.info("Upgrading block pool storage directory {}.\n   old LV = {}; old"
+        + " CTime = {}.\n   new LV = {}; new CTime = {}",
+        bpSd.getRoot(), oldLV, this.getCTime(), HdfsServerConstants
+            .DATANODE_LAYOUT_VERSION, nsInfo.getCTime());
     // get <SD>/previous directory
     // get <SD>/previous directory
     String dnRoot = getDataNodeStorageRoot(bpSd.getRoot().getCanonicalPath());
     String dnRoot = getDataNodeStorageRoot(bpSd.getRoot().getCanonicalPath());
     StorageDirectory dnSdStorage = new StorageDirectory(new File(dnRoot));
     StorageDirectory dnSdStorage = new StorageDirectory(new File(dnRoot));
@@ -508,7 +506,7 @@ public class BlockPoolSliceStorage extends Storage {
     // 4.rename <SD>/current/<bpid>/previous.tmp to
     // 4.rename <SD>/current/<bpid>/previous.tmp to
     // <SD>/current/<bpid>/previous
     // <SD>/current/<bpid>/previous
     rename(bpTmpDir, bpPrevDir);
     rename(bpTmpDir, bpPrevDir);
-    LOG.info("Upgrade of " + name + " is complete");
+    LOG.info("Upgrade of {} is complete", name);
   }
   }
 
 
   /**
   /**
@@ -569,8 +567,8 @@ public class BlockPoolSliceStorage extends Storage {
         // Failsafe - we should not hit this case but let's make sure
         // Failsafe - we should not hit this case but let's make sure
         // we never overwrite a newer version of a block file with an
         // we never overwrite a newer version of a block file with an
         // older version.
         // older version.
-        LOG.info("Not overwriting " + newChild + " with smaller file from " +
-                     "trash directory. This message can be safely ignored.");
+        LOG.info("Not overwriting {} with smaller file from " +
+            "trash directory. This message can be safely ignored.", newChild);
       } else if (!child.renameTo(newChild)) {
       } else if (!child.renameTo(newChild)) {
         throw new IOException("Failed to rename " + child + " to " + newChild);
         throw new IOException("Failed to rename " + child + " to " + newChild);
       } else {
       } else {
@@ -616,10 +614,10 @@ public class BlockPoolSliceStorage extends Storage {
               + " is newer than the namespace state: LV = "
               + " is newer than the namespace state: LV = "
               + HdfsServerConstants.DATANODE_LAYOUT_VERSION + " CTime = " + nsInfo.getCTime());
               + HdfsServerConstants.DATANODE_LAYOUT_VERSION + " CTime = " + nsInfo.getCTime());
     }
     }
-    
-    LOG.info("Rolling back storage directory " + bpSd.getRoot()
-        + ".\n   target LV = " + nsInfo.getLayoutVersion()
-        + "; target CTime = " + nsInfo.getCTime());
+
+    LOG.info("Rolling back storage directory {}.\n   target LV = {}; target "
+            + "CTime = {}", bpSd.getRoot(), nsInfo.getLayoutVersion(),
+        nsInfo.getCTime());
     File tmpDir = bpSd.getRemovedTmp();
     File tmpDir = bpSd.getRemovedTmp();
     assert !tmpDir.exists() : "removed.tmp directory must not exist.";
     assert !tmpDir.exists() : "removed.tmp directory must not exist.";
     // 1. rename current to tmp
     // 1. rename current to tmp
@@ -632,7 +630,7 @@ public class BlockPoolSliceStorage extends Storage {
     
     
     // 3. delete removed.tmp dir
     // 3. delete removed.tmp dir
     deleteDir(tmpDir);
     deleteDir(tmpDir);
-    LOG.info("Rollback of " + bpSd.getRoot() + " is complete");
+    LOG.info("Rollback of {} is complete", bpSd.getRoot());
   }
   }
 
 
   /*
   /*
@@ -651,9 +649,9 @@ public class BlockPoolSliceStorage extends Storage {
       return; // already finalized
       return; // already finalized
     }
     }
     final String dataDirPath = bpSd.getRoot().getCanonicalPath();
     final String dataDirPath = bpSd.getRoot().getCanonicalPath();
-    LOG.info("Finalizing upgrade for storage directory " + dataDirPath
-        + ".\n   cur LV = " + this.getLayoutVersion() + "; cur CTime = "
-        + this.getCTime());
+    LOG.info("Finalizing upgrade for storage directory {}.\n   cur LV = {}; "
+            + "cur CTime = {}", dataDirPath, this.getLayoutVersion(),
+        this.getCTime());
     assert bpSd.getCurrentDir().exists() : "Current directory must exist.";
     assert bpSd.getCurrentDir().exists() : "Current directory must exist.";
     
     
     // rename previous to finalized.tmp
     // rename previous to finalized.tmp
@@ -667,9 +665,9 @@ public class BlockPoolSliceStorage extends Storage {
         try {
         try {
           deleteDir(tmpDir);
           deleteDir(tmpDir);
         } catch (IOException ex) {
         } catch (IOException ex) {
-          LOG.error("Finalize upgrade for " + dataDirPath + " failed.", ex);
+          LOG.error("Finalize upgrade for {} failed.", dataDirPath, ex);
         }
         }
-        LOG.info("Finalize upgrade for " + dataDirPath + " is complete.");
+        LOG.info("Finalize upgrade for {} is complete.", dataDirPath);
       }
       }
 
 
       @Override
       @Override
@@ -695,8 +693,8 @@ public class BlockPoolSliceStorage extends Storage {
         diskLayoutVersion, hardLink, conf);
         diskLayoutVersion, hardLink, conf);
     DataStorage.linkBlocks(fromDir, toDir, DataStorage.STORAGE_DIR_RBW,
     DataStorage.linkBlocks(fromDir, toDir, DataStorage.STORAGE_DIR_RBW,
         diskLayoutVersion, hardLink, conf);
         diskLayoutVersion, hardLink, conf);
-    LOG.info("Linked blocks from " + fromDir + " to " + toDir + ". "
-        + hardLink.linkStats.report());
+    LOG.info("Linked blocks from {} to {}. {}", fromDir, toDir,
+        hardLink.linkStats.report());
   }
   }
 
 
   /**
   /**
@@ -763,7 +761,7 @@ public class BlockPoolSliceStorage extends Storage {
       File blockFile = new File(blockURI);
       File blockFile = new File(blockURI);
       return getTrashDirectory(blockFile);
       return getTrashDirectory(blockFile);
     } catch (IllegalArgumentException e) {
     } catch (IllegalArgumentException e) {
-      LOG.warn("Failed to get block file for replica " + info, e);
+      LOG.warn("Failed to get block file for replica {}", info, e);
     }
     }
 
 
     return null;
     return null;
@@ -791,7 +789,7 @@ public class BlockPoolSliceStorage extends Storage {
   String getRestoreDirectory(File blockFile) {
   String getRestoreDirectory(File blockFile) {
     Matcher matcher = BLOCK_POOL_TRASH_PATH_PATTERN.matcher(blockFile.getParent());
     Matcher matcher = BLOCK_POOL_TRASH_PATH_PATTERN.matcher(blockFile.getParent());
     String restoreDirectory = matcher.replaceFirst("$1$2" + STORAGE_DIR_CURRENT + "$4");
     String restoreDirectory = matcher.replaceFirst("$1$2" + STORAGE_DIR_CURRENT + "$4");
-    LOG.info("Restoring " + blockFile + " to " + restoreDirectory);
+    LOG.info("Restoring {} to {}", blockFile, restoreDirectory);
     return restoreDirectory;
     return restoreDirectory;
   }
   }
 
 
@@ -804,7 +802,7 @@ public class BlockPoolSliceStorage extends Storage {
       File trashRoot = getTrashRootDir(sd);
       File trashRoot = getTrashRootDir(sd);
       if (trashRoot.exists() && sd.getPreviousDir().exists()) {
       if (trashRoot.exists() && sd.getPreviousDir().exists()) {
         LOG.error("Trash and PreviousDir shouldn't both exist for storage "
         LOG.error("Trash and PreviousDir shouldn't both exist for storage "
-            + "directory " + sd);
+            + "directory {}", sd);
         assert false;
         assert false;
       } else {
       } else {
         trashRoots.add(trashRoot);
         trashRoots.add(trashRoot);
@@ -817,7 +815,7 @@ public class BlockPoolSliceStorage extends Storage {
       public void run() {
       public void run() {
         for(File trashRoot : trashRoots){
         for(File trashRoot : trashRoots){
           FileUtil.fullyDelete(trashRoot);
           FileUtil.fullyDelete(trashRoot);
-          LOG.info("Cleared trash for storage directory " + trashRoot);
+          LOG.info("Cleared trash for storage directory {}", trashRoot);
         }
         }
       }
       }
 
 
@@ -860,9 +858,9 @@ public class BlockPoolSliceStorage extends Storage {
       File markerFile = new File(bpRoot, ROLLING_UPGRADE_MARKER_FILE);
       File markerFile = new File(bpRoot, ROLLING_UPGRADE_MARKER_FILE);
       if (!storagesWithRollingUpgradeMarker.contains(bpRoot.toString())) {
       if (!storagesWithRollingUpgradeMarker.contains(bpRoot.toString())) {
         if (!markerFile.exists() && markerFile.createNewFile()) {
         if (!markerFile.exists() && markerFile.createNewFile()) {
-          LOG.info("Created " + markerFile);
+          LOG.info("Created {}", markerFile);
         } else {
         } else {
-          LOG.info(markerFile + " already exists.");
+          LOG.info("{} already exists.", markerFile);
         }
         }
         storagesWithRollingUpgradeMarker.add(bpRoot.toString());
         storagesWithRollingUpgradeMarker.add(bpRoot.toString());
         storagesWithoutRollingUpgradeMarker.remove(bpRoot.toString());
         storagesWithoutRollingUpgradeMarker.remove(bpRoot.toString());
@@ -885,10 +883,10 @@ public class BlockPoolSliceStorage extends Storage {
       File markerFile = new File(bpRoot, ROLLING_UPGRADE_MARKER_FILE);
       File markerFile = new File(bpRoot, ROLLING_UPGRADE_MARKER_FILE);
       if (!storagesWithoutRollingUpgradeMarker.contains(bpRoot.toString())) {
       if (!storagesWithoutRollingUpgradeMarker.contains(bpRoot.toString())) {
         if (markerFile.exists()) {
         if (markerFile.exists()) {
-          LOG.info("Deleting " + markerFile);
+          LOG.info("Deleting {}", markerFile);
           doFinalize(sd.getCurrentDir());
           doFinalize(sd.getCurrentDir());
           if (!markerFile.delete()) {
           if (!markerFile.delete()) {
-            LOG.warn("Failed to delete " + markerFile);
+            LOG.warn("Failed to delete {}", markerFile);
           }
           }
         }
         }
         storagesWithoutRollingUpgradeMarker.add(bpRoot.toString());
         storagesWithoutRollingUpgradeMarker.add(bpRoot.toString());

+ 43 - 13
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java

@@ -175,8 +175,13 @@ class BlockSender implements java.io.Closeable {
    * See {{@link BlockSender#isLongRead()}
    * See {{@link BlockSender#isLongRead()}
    */
    */
   private static final long LONG_READ_THRESHOLD_BYTES = 256 * 1024;
   private static final long LONG_READ_THRESHOLD_BYTES = 256 * 1024;
-  
 
 
+  // The number of bytes per checksum here determines the alignment
+  // of reads: we always start reading at a checksum chunk boundary,
+  // even if the checksum type is NULL. So, choosing too big of a value
+  // would risk sending too much unnecessary data. 512 (1 disk sector)
+  // is likely to result in minimal extra IO.
+  private static final long CHUNK_SIZE = 512;
   /**
   /**
    * Constructor
    * Constructor
    * 
    * 
@@ -250,18 +255,16 @@ class BlockSender implements java.io.Closeable {
       try(AutoCloseableLock lock = datanode.data.acquireDatasetLock()) {
       try(AutoCloseableLock lock = datanode.data.acquireDatasetLock()) {
         replica = getReplica(block, datanode);
         replica = getReplica(block, datanode);
         replicaVisibleLength = replica.getVisibleLength();
         replicaVisibleLength = replica.getVisibleLength();
-        if (replica instanceof FinalizedReplica) {
-          // Load last checksum in case the replica is being written
-          // concurrently
-          final FinalizedReplica frep = (FinalizedReplica) replica;
-          chunkChecksum = frep.getLastChecksumAndDataLen();
-        }
       }
       }
       if (replica.getState() == ReplicaState.RBW) {
       if (replica.getState() == ReplicaState.RBW) {
         final ReplicaInPipeline rbw = (ReplicaInPipeline) replica;
         final ReplicaInPipeline rbw = (ReplicaInPipeline) replica;
         waitForMinLength(rbw, startOffset + length);
         waitForMinLength(rbw, startOffset + length);
         chunkChecksum = rbw.getLastChecksumAndDataLen();
         chunkChecksum = rbw.getLastChecksumAndDataLen();
       }
       }
+      if (replica instanceof FinalizedReplica) {
+        chunkChecksum = getPartialChunkChecksumForFinalized(
+            (FinalizedReplica)replica);
+      }
 
 
       if (replica.getGenerationStamp() < block.getGenerationStamp()) {
       if (replica.getGenerationStamp() < block.getGenerationStamp()) {
         throw new IOException("Replica gen stamp < block genstamp, block="
         throw new IOException("Replica gen stamp < block genstamp, block="
@@ -348,12 +351,8 @@ class BlockSender implements java.io.Closeable {
         }
         }
       }
       }
       if (csum == null) {
       if (csum == null) {
-        // The number of bytes per checksum here determines the alignment
-        // of reads: we always start reading at a checksum chunk boundary,
-        // even if the checksum type is NULL. So, choosing too big of a value
-        // would risk sending too much unnecessary data. 512 (1 disk sector)
-        // is likely to result in minimal extra IO.
-        csum = DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 512);
+        csum = DataChecksum.newDataChecksum(DataChecksum.Type.NULL,
+            (int)CHUNK_SIZE);
       }
       }
 
 
       /*
       /*
@@ -427,6 +426,37 @@ class BlockSender implements java.io.Closeable {
     }
     }
   }
   }
 
 
+  private ChunkChecksum getPartialChunkChecksumForFinalized(
+      FinalizedReplica finalized) throws IOException {
+    // There are a number of places in the code base where a finalized replica
+    // object is created. If last partial checksum is loaded whenever a
+    // finalized replica is created, it would increase latency in DataNode
+    // initialization. Therefore, the last partial chunk checksum is loaded
+    // lazily.
+
+    // Load last checksum in case the replica is being written concurrently
+    final long replicaVisibleLength = replica.getVisibleLength();
+    if (replicaVisibleLength % CHUNK_SIZE != 0 &&
+        finalized.getLastPartialChunkChecksum() == null) {
+      // the finalized replica does not have precomputed last partial
+      // chunk checksum. Recompute now.
+      try {
+        finalized.loadLastPartialChunkChecksum();
+        return new ChunkChecksum(finalized.getVisibleLength(),
+            finalized.getLastPartialChunkChecksum());
+      } catch (FileNotFoundException e) {
+        // meta file is lost. Continue anyway to preserve existing behavior.
+        DataNode.LOG.warn(
+            "meta file " + finalized.getMetaFile() + " is missing!");
+        return null;
+      }
+    } else {
+      // If the checksum is null, BlockSender will use on-disk checksum.
+      return new ChunkChecksum(finalized.getVisibleLength(),
+          finalized.getLastPartialChunkChecksum());
+    }
+  }
+
   /**
   /**
    * close opened files.
    * close opened files.
    */
    */

部分文件因文件數量過多而無法顯示