Browse Source

Merge branch 'trunk' into HADOOP-13345

Steve Loughran 8 years ago
parent
commit
cf36cbd356
100 changed files with 1237 additions and 225 deletions
  1. 31 5
      dev-support/bin/create-release
  2. 2 2
      hadoop-assemblies/pom.xml
  3. 1 1
      hadoop-build-tools/pom.xml
  4. 2 2
      hadoop-client-modules/hadoop-client-api/pom.xml
  5. 2 2
      hadoop-client-modules/hadoop-client-check-invariants/pom.xml
  6. 2 2
      hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml
  7. 2 2
      hadoop-client-modules/hadoop-client-integration-tests/pom.xml
  8. 2 2
      hadoop-client-modules/hadoop-client-minicluster/pom.xml
  9. 2 2
      hadoop-client-modules/hadoop-client-runtime/pom.xml
  10. 2 2
      hadoop-client-modules/hadoop-client/pom.xml
  11. 1 1
      hadoop-client-modules/pom.xml
  12. 2 2
      hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml
  13. 2 2
      hadoop-cloud-storage-project/pom.xml
  14. 2 2
      hadoop-common-project/hadoop-annotations/pom.xml
  15. 2 2
      hadoop-common-project/hadoop-auth-examples/pom.xml
  16. 2 2
      hadoop-common-project/hadoop-auth/pom.xml
  17. 2 2
      hadoop-common-project/hadoop-common/pom.xml
  18. 47 16
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
  19. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureEncodingStep.java
  20. 0 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
  21. 2 3
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_xor_decoder.c
  22. 1 1
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_xor_encoder.c
  23. 3 1
      hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
  24. 11 4
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCodecRawCoderMapping.java
  25. 5 1
      hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
  26. 2 2
      hadoop-common-project/hadoop-kms/pom.xml
  27. 4 1
      hadoop-common-project/hadoop-kms/src/main/conf/kms-log4j.properties
  28. 2 2
      hadoop-common-project/hadoop-minikdc/pom.xml
  29. 2 2
      hadoop-common-project/hadoop-nfs/pom.xml
  30. 2 2
      hadoop-common-project/pom.xml
  31. 2 2
      hadoop-dist/pom.xml
  32. 2 2
      hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
  33. 14 5
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  34. 3 2
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
  35. 22 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
  36. 22 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java
  37. 17 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
  38. 30 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
  39. 3 2
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
  40. 2 3
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java
  41. 4 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
  42. 14 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto
  43. 2 2
      hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
  44. 2 2
      hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
  45. 2 2
      hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
  46. 0 5
      hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml
  47. 2 2
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  48. 28 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
  49. 6 10
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
  50. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
  51. 9 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  52. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
  53. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
  54. 7 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java
  55. 1 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
  56. 4 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
  57. 12 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
  58. 8 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
  59. 56 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java
  60. 12 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
  61. 55 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  62. 12 15
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java
  63. 16 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
  64. 18 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
  65. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java
  66. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
  67. 98 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java
  68. 3 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java
  69. 4 0
      hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
  70. 14 6
      hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md
  71. 48 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
  72. 50 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java
  73. 35 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
  74. 51 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java
  75. 84 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
  76. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
  77. 196 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml
  78. 2 2
      hadoop-hdfs-project/pom.xml
  79. 1 1
      hadoop-mapreduce-project/bin/mapred-config.sh
  80. 2 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml
  81. 4 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
  82. 69 4
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
  83. 2 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
  84. 2 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
  85. 2 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml
  86. 2 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
  87. 2 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml
  88. 2 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml
  89. 2 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml
  90. 2 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
  91. 2 2
      hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml
  92. 2 2
      hadoop-mapreduce-project/pom.xml
  93. 1 1
      hadoop-maven-plugins/pom.xml
  94. 2 2
      hadoop-minicluster/pom.xml
  95. 2 2
      hadoop-project-dist/pom.xml
  96. 3 3
      hadoop-project/pom.xml
  97. 1 1
      hadoop-tools/hadoop-aliyun/pom.xml
  98. 2 2
      hadoop-tools/hadoop-archive-logs/pom.xml
  99. 2 2
      hadoop-tools/hadoop-archives/pom.xml
  100. 2 2
      hadoop-tools/hadoop-aws/pom.xml

+ 31 - 5
dev-support/bin/create-release

@@ -50,6 +50,7 @@ function hadoop_abs
   declare obj=$1
   declare dir
   declare fn
+  declare ret
 
   if [[ ! -e ${obj} ]]; then
     return 1
@@ -62,7 +63,8 @@ function hadoop_abs
   fi
 
   dir=$(cd -P -- "${dir}" >/dev/null 2>/dev/null && pwd -P)
-  if [[ $? = 0 ]]; then
+  ret=$?
+  if [[ ${ret} = 0 ]]; then
     echo "${dir}${fn}"
     return 0
   fi
@@ -287,6 +289,7 @@ function usage
   echo "--mvncache=[path]       Path to the maven cache to use"
   echo "--native                Also build the native components"
   echo "--rc-label=[label]      Add this label to the builds"
+  echo "--security              Emergency security release"
   echo "--sign                  Use .gnupg dir to sign the artifacts and jars"
   echo "--version=[version]     Use an alternative version string"
 }
@@ -330,6 +333,9 @@ function option_parse
       --rc-label=*)
         RC_LABEL=${i#*=}
       ;;
+      --security)
+        SECURITYRELEASE=true
+      ;;
       --sign)
         SIGN=true
       ;;
@@ -397,6 +403,14 @@ function option_parse
       MVN_ARGS=("-Dmaven.repo.local=${MVNCACHE}")
     fi
   fi
+
+  if [[ "${SECURITYRELEASE}" = true ]]; then
+    if [[ ! -d "${BASEDIR}/hadoop-common-project/hadoop-common/src/site/markdown/release/${HADOOP_VERSION}" ]]; then
+      hadoop_error "ERROR: ${BASEDIR}/hadoop-common-project/hadoop-common/src/site/markdown/release/${HADOOP_VERSION} does not exist."
+      hadoop_error "ERROR: This directory and its contents are required to be manually created for a security release."
+      exit 1
+    fi
+  fi
 }
 
 function dockermode
@@ -523,7 +537,7 @@ function makearelease
   big_console_header "Maven Build and Install"
 
   if [[ "${SIGN}" = true ]]; then
-    signflags=("-Psign" "-Dgpg.useagent=true" -Dgpg.executable="${GPG}")
+    signflags=("-Psign" "-Dgpg.useagent=true" "-Dgpg.executable=${GPG}")
   fi
 
   # Create SRC and BIN tarballs for release,
@@ -534,6 +548,14 @@ function makearelease
       "${signflags[@]}" \
       -DskipTests -Dtar $(hadoop_native_flags)
 
+  if [[ "${SECURITYRELEASE}" = true ]]; then
+    DOCFLAGS="-Pdocs"
+    hadoop_error "WARNING: Skipping automatic changelog and release notes generation due to --security"
+  else
+    DOCFLAGS="-Preleasedocs,docs"
+  fi
+
+
   # Create site for release
   # we need to do install again so that jdiff and
   # a few other things get registered in the maven
@@ -542,7 +564,8 @@ function makearelease
     "${MVN}" "${MVN_ARGS[@]}" install \
       site site:stage \
       -DskipTests \
-      -Pdist,src,releasedocs,docs
+      -Pdist,src \
+      "${DOCFLAGS}"
 
   big_console_header "Staging the release"
 
@@ -586,6 +609,7 @@ function makearelease
 function signartifacts
 {
   declare i
+  declare ret
 
   if [[ "${SIGN}" = false ]]; then
     for i in ${ARTIFACTS_DIR}/*; do
@@ -612,7 +636,8 @@ function signartifacts
     ${GPG} --verify --trustdb "${BASEDIR}/target/testkeysdb" \
       "${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}.tar.gz.asc" \
         "${ARTIFACTS_DIR}/hadoop-${HADOOP_VERSION}${RC_LABEL}.tar.gz"
-    if [[ $? != 0 ]]; then
+    ret=$?
+    if [[ ${ret} != 0 ]]; then
       hadoop_error "ERROR: GPG key is not present in ${PUBKEYFILE}."
       hadoop_error "ERROR: This MUST be fixed. Exiting."
       exit 1
@@ -641,6 +666,7 @@ if [[ "${INDOCKER}" = true || "${DOCKERRAN}" = false ]]; then
   startgpgagent
 
   makearelease
+  releaseret=$?
 
   signartifacts
 
@@ -651,7 +677,7 @@ if [[ "${INDOCKER}" = true ]]; then
   exit $?
 fi
 
-if [[ $? == 0 ]]; then
+if [[ ${releaseret} == 0 ]]; then
   echo
   echo "Congratulations, you have successfully built the release"
   echo "artifacts for Apache Hadoop ${HADOOP_VERSION}${RC_LABEL}"

+ 2 - 2
hadoop-assemblies/pom.xml

@@ -23,11 +23,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-assemblies</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop Assemblies</name>
   <description>Apache Hadoop Assemblies</description>
 

+ 1 - 1
hadoop-build-tools/pom.xml

@@ -18,7 +18,7 @@
   <parent>
     <artifactId>hadoop-main</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-build-tools</artifactId>

+ 2 - 2
hadoop-client-modules/hadoop-client-api/pom.xml

@@ -18,11 +18,11 @@
 <parent>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-project</artifactId>
-   <version>3.0.0-alpha4-SNAPSHOT</version>
+   <version>3.0.0-beta1-SNAPSHOT</version>
    <relativePath>../../hadoop-project</relativePath>
 </parent>
   <artifactId>hadoop-client-api</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <description>Apache Hadoop Client</description>

+ 2 - 2
hadoop-client-modules/hadoop-client-check-invariants/pom.xml

@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-client-check-invariants</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>pom</packaging>
 
   <description>Enforces our invariants for the api and runtime client modules.</description>

+ 2 - 2
hadoop-client-modules/hadoop-client-check-test-invariants/pom.xml

@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-client-check-test-invariants</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>pom</packaging>
 
   <description>Enforces our invariants for the testing client modules.</description>

+ 2 - 2
hadoop-client-modules/hadoop-client-integration-tests/pom.xml

@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-client-integration-tests</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
 
   <description>Checks that we can use the generated artifacts</description>
   <name>Apache Hadoop Client Packaging Integration Tests</name>

+ 2 - 2
hadoop-client-modules/hadoop-client-minicluster/pom.xml

@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-client-minicluster</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <description>Apache Hadoop Minicluster for Clients</description>

+ 2 - 2
hadoop-client-modules/hadoop-client-runtime/pom.xml

@@ -18,11 +18,11 @@
 <parent>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-project</artifactId>
-   <version>3.0.0-alpha4-SNAPSHOT</version>
+   <version>3.0.0-beta1-SNAPSHOT</version>
    <relativePath>../../hadoop-project</relativePath>
 </parent>
   <artifactId>hadoop-client-runtime</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <description>Apache Hadoop Client</description>

+ 2 - 2
hadoop-client-modules/hadoop-client/pom.xml

@@ -18,11 +18,11 @@
 <parent>
    <groupId>org.apache.hadoop</groupId>
    <artifactId>hadoop-project-dist</artifactId>
-   <version>3.0.0-alpha4-SNAPSHOT</version>
+   <version>3.0.0-beta1-SNAPSHOT</version>
    <relativePath>../../hadoop-project-dist</relativePath>
 </parent>
   <artifactId>hadoop-client</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
 
   <description>Apache Hadoop Client aggregation pom with dependencies exposed</description>
   <name>Apache Hadoop Client Aggregator</name>

+ 1 - 1
hadoop-client-modules/pom.xml

@@ -18,7 +18,7 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-client-modules</artifactId>

+ 2 - 2
hadoop-cloud-storage-project/hadoop-cloud-storage/pom.xml

@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-cloud-storage</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <description>Apache Hadoop Cloud Storage</description>

+ 2 - 2
hadoop-cloud-storage-project/pom.xml

@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-cloud-storage-project</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Cloud Storage Project</description>
   <name>Apache Hadoop Cloud Storage Project</name>
   <packaging>pom</packaging>

+ 2 - 2
hadoop-common-project/hadoop-annotations/pom.xml

@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-annotations</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Annotations</description>
   <name>Apache Hadoop Annotations</name>
   <packaging>jar</packaging>

+ 2 - 2
hadoop-common-project/hadoop-auth-examples/pom.xml

@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-auth-examples</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>war</packaging>
 
   <name>Apache Hadoop Auth Examples</name>

+ 2 - 2
hadoop-common-project/hadoop-auth/pom.xml

@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-auth</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <name>Apache Hadoop Auth</name>

+ 2 - 2
hadoop-common-project/hadoop-common/pom.xml

@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   <artifactId>hadoop-common</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Common</description>
   <name>Apache Hadoop Common</name>
   <packaging>jar</packaging>

+ 47 - 16
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java

@@ -32,6 +32,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.ContentSummary;
 
 /**
  * Get a listing of all files in that match the file patterns.
@@ -54,13 +55,14 @@ class Ls extends FsCommand {
   private static final String OPTION_MTIME = "t";
   private static final String OPTION_ATIME = "u";
   private static final String OPTION_SIZE = "S";
+  private static final String OPTION_ECPOLICY = "e";
 
   public static final String NAME = "ls";
   public static final String USAGE = "[-" + OPTION_PATHONLY + "] [-" +
       OPTION_DIRECTORY + "] [-" + OPTION_HUMAN + "] [-" +
       OPTION_HIDENONPRINTABLE + "] [-" + OPTION_RECURSIVE + "] [-" +
       OPTION_MTIME + "] [-" + OPTION_SIZE + "] [-" + OPTION_REVERSE + "] [-" +
-      OPTION_ATIME + "] [<path> ...]";
+      OPTION_ATIME + "] [-" + OPTION_ECPOLICY +"] [<path> ...]";
 
   public static final String DESCRIPTION =
       "List the contents that match the specified file pattern. If " +
@@ -91,7 +93,9 @@ class Ls extends FsCommand {
           "  Reverse the order of the sort.\n" +
           "  -" + OPTION_ATIME +
           "  Use time of last access instead of modification for\n" +
-          "      display and sorting.";
+          "      display and sorting.\n"+
+          "  -" + OPTION_ECPOLICY +
+          "  Display the erasure coding policy of files and directories.\n";
 
   protected final SimpleDateFormat dateFormat =
     new SimpleDateFormat("yyyy-MM-dd HH:mm");
@@ -104,6 +108,7 @@ class Ls extends FsCommand {
   private boolean orderTime;
   private boolean orderSize;
   private boolean useAtime;
+  private boolean displayECPolicy;
   private Comparator<PathData> orderComparator;
 
   protected boolean humanReadable = false;
@@ -129,7 +134,7 @@ class Ls extends FsCommand {
     CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE,
         OPTION_PATHONLY, OPTION_DIRECTORY, OPTION_HUMAN,
         OPTION_HIDENONPRINTABLE, OPTION_RECURSIVE, OPTION_REVERSE,
-        OPTION_MTIME, OPTION_SIZE, OPTION_ATIME);
+        OPTION_MTIME, OPTION_SIZE, OPTION_ATIME, OPTION_ECPOLICY);
     cf.parse(args);
     pathOnly = cf.getOpt(OPTION_PATHONLY);
     dirRecurse = !cf.getOpt(OPTION_DIRECTORY);
@@ -140,6 +145,7 @@ class Ls extends FsCommand {
     orderTime = cf.getOpt(OPTION_MTIME);
     orderSize = !orderTime && cf.getOpt(OPTION_SIZE);
     useAtime = cf.getOpt(OPTION_ATIME);
+    displayECPolicy = cf.getOpt(OPTION_ECPOLICY);
     if (args.isEmpty()) args.add(Path.CUR_DIR);
 
     initialiseOrderComparator();
@@ -245,25 +251,42 @@ class Ls extends FsCommand {
       return;
     }
     FileStatus stat = item.stat;
-    String line = String.format(lineFormat,
-        (stat.isDirectory() ? "d" : "-"),
-        stat.getPermission() + (stat.getPermission().getAclBit() ? "+" : " "),
-        (stat.isFile() ? stat.getReplication() : "-"),
-        stat.getOwner(),
-        stat.getGroup(),
-        formatSize(stat.getLen()),
-        dateFormat.format(new Date(isUseAtime()
-            ? stat.getAccessTime()
-            : stat.getModificationTime())),
-        isHideNonPrintable() ? new PrintableString(item.toString()) : item);
-    out.println(line);
+    if (displayECPolicy) {
+      ContentSummary contentSummary = item.fs.getContentSummary(item.path);
+      String line = String.format(lineFormat,
+          (stat.isDirectory() ? "d" : "-"),
+          stat.getPermission() + (stat.getPermission().getAclBit() ? "+" : " "),
+          (stat.isFile() ? stat.getReplication() : "-"),
+          stat.getOwner(),
+          stat.getGroup(),
+          contentSummary.getErasureCodingPolicy(),
+          formatSize(stat.getLen()),
+          dateFormat.format(new Date(isUseAtime()
+              ? stat.getAccessTime()
+              : stat.getModificationTime())),
+          isHideNonPrintable() ? new PrintableString(item.toString()) : item);
+      out.println(line);
+    } else {
+      String line = String.format(lineFormat,
+          (stat.isDirectory() ? "d" : "-"),
+          stat.getPermission() + (stat.getPermission().getAclBit() ? "+" : " "),
+          (stat.isFile() ? stat.getReplication() : "-"),
+          stat.getOwner(),
+          stat.getGroup(),
+          formatSize(stat.getLen()),
+          dateFormat.format(new Date(isUseAtime()
+              ? stat.getAccessTime()
+              : stat.getModificationTime())),
+          isHideNonPrintable() ? new PrintableString(item.toString()) : item);
+      out.println(line);
+    }
   }
 
   /**
    * Compute column widths and rebuild the format string
    * @param items to find the max field width for each column
    */
-  private void adjustColumnWidths(PathData items[]) {
+  private void adjustColumnWidths(PathData items[]) throws IOException {
     for (PathData item : items) {
       FileStatus stat = item.stat;
       maxRepl  = maxLength(maxRepl, stat.getReplication());
@@ -278,6 +301,14 @@ class Ls extends FsCommand {
     // Do not use '%-0s' as a formatting conversion, since it will throw a
     // a MissingFormatWidthException if it is used in String.format().
     // http://docs.oracle.com/javase/1.5.0/docs/api/java/util/Formatter.html#intFlags
+    if(displayECPolicy){
+      int maxEC=0;
+      for (PathData item : items) {
+          ContentSummary contentSummary = item.fs.getContentSummary(item.path);
+          maxEC=maxLength(maxEC,contentSummary.getErasureCodingPolicy().length());
+      }
+      fmt.append(" %"+maxEC+"s ");
+    }
     fmt.append((maxOwner > 0) ? "%-" + maxOwner + "s " : "%s");
     fmt.append((maxGroup > 0) ? "%-" + maxGroup + "s " : "%s");
     fmt.append("%"  + maxLen   + "s ");

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureEncodingStep.java

@@ -62,6 +62,6 @@ public class ErasureEncodingStep implements ErasureCodingStep {
 
   @Override
   public void finish() {
-    rawEncoder.release();
+    // do nothing
   }
 }

+ 0 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java

@@ -45,7 +45,6 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 @InterfaceStability.Unstable
 public class NetworkTopology {
   public final static String DEFAULT_RACK = "/default-rack";
-  public final static int DEFAULT_HOST_LEVEL = 2;
   public static final Logger LOG =
       LoggerFactory.getLogger(NetworkTopology.class);
 

+ 2 - 3
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_xor_decoder.c

@@ -31,7 +31,7 @@
 typedef struct _XOREncoder {
   IsalCoder isalCoder;
   unsigned char* inputs[MMAX];
-  unsigned char* outputs[1];
+  unsigned char* outputs[KMAX];
 } XORDecoder;
 
 JNIEXPORT void JNICALL
@@ -58,8 +58,7 @@ Java_org_apache_hadoop_io_erasurecode_rawcoder_NativeXORRawDecoder_decodeImpl(
   numParityUnits = ((IsalCoder*)xorDecoder)->numParityUnits;
   chunkSize = (int)dataLen;
 
-  getInputs(env, inputs, inputOffsets, xorDecoder->inputs,
-                                               numDataUnits + numParityUnits);
+  getInputs(env, inputs, inputOffsets, xorDecoder->inputs, numDataUnits);
   getOutputs(env, outputs, outputOffsets, xorDecoder->outputs, numParityUnits);
 
   for (i = 0; i < numDataUnits + numParityUnits; i++) {

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/erasurecode/jni_xor_encoder.c

@@ -31,7 +31,7 @@
 typedef struct _XOREncoder {
   IsalCoder isalCoder;
   unsigned char* inputs[MMAX];
-  unsigned char* outputs[1];
+  unsigned char* outputs[KMAX];
 } XOREncoder;
 
 JNIEXPORT void JNICALL

+ 3 - 1
hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md

@@ -410,7 +410,7 @@ Return usage output.
 ls
 ----
 
-Usage: `hadoop fs -ls [-C] [-d] [-h] [-q] [-R] [-t] [-S] [-r] [-u] <args> `
+Usage: `hadoop fs -ls [-C] [-d] [-h] [-q] [-R] [-t] [-S] [-r] [-u] [-e] <args> `
 
 Options:
 
@@ -423,6 +423,7 @@ Options:
 * -S: Sort output by file size.
 * -r: Reverse the sort order.
 * -u: Use access time rather than modification time for display and sorting.  
+* -e: Display the erasure coding policy of files and directories only.
 
 For a file ls returns stat on the file with the following format:
 
@@ -437,6 +438,7 @@ Files within a directory are order by filename by default.
 Example:
 
 * `hadoop fs -ls /user/hadoop/file1`
+* `hadoop fs -ls -e /ecdir`
 
 Exit Code:
 

+ 11 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCodecRawCoderMapping.java

@@ -18,11 +18,13 @@
 package org.apache.hadoop.io.erasurecode;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawEncoder;
+import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawDecoder;
 import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
-import org.apache.hadoop.io.erasurecode.rawcoder.RSRawDecoder;
 import org.apache.hadoop.io.erasurecode.rawcoder.RSLegacyRawDecoder;
-import org.apache.hadoop.io.erasurecode.rawcoder.RSRawEncoder;
 import org.apache.hadoop.io.erasurecode.rawcoder.RSLegacyRawEncoder;
+import org.apache.hadoop.io.erasurecode.rawcoder.RSRawDecoder;
+import org.apache.hadoop.io.erasurecode.rawcoder.RSRawEncoder;
 import org.apache.hadoop.io.erasurecode.rawcoder.RSRawErasureCoderFactory;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
@@ -55,10 +57,15 @@ public class TestCodecRawCoderMapping {
     // should return default raw coder of rs codec
     RawErasureEncoder encoder = CodecUtil.createRawEncoder(
         conf, ErasureCodeConstants.RS_CODEC_NAME, coderOptions);
-    Assert.assertTrue(encoder instanceof RSRawEncoder);
     RawErasureDecoder decoder = CodecUtil.createRawDecoder(
         conf, ErasureCodeConstants.RS_CODEC_NAME, coderOptions);
-    Assert.assertTrue(decoder instanceof RSRawDecoder);
+    if (ErasureCodeNative.isNativeCodeLoaded()) {
+      Assert.assertTrue(encoder instanceof NativeRSRawEncoder);
+      Assert.assertTrue(decoder instanceof NativeRSRawDecoder);
+    } else {
+      Assert.assertTrue(encoder instanceof RSRawEncoder);
+      Assert.assertTrue(decoder instanceof RSRawDecoder);
+    }
 
     // should return default raw coder of rs-legacy codec
     encoder = CodecUtil.createRawEncoder(conf,

+ 5 - 1
hadoop-common-project/hadoop-common/src/test/resources/testConf.xml

@@ -54,7 +54,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-ls \[-C\] \[-d\] \[-h\] \[-q\] \[-R\] \[-t\] \[-S\] \[-r\] \[-u\] \[&lt;path&gt; \.\.\.\] :( |\t)*</expected-output>
+          <expected-output>^-ls \[-C\] \[-d\] \[-h\] \[-q\] \[-R\] \[-t\] \[-S\] \[-r\] \[-u\] \[-e\] \[&lt;path&gt; \.\.\.\] :( |\t)*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
@@ -136,6 +136,10 @@
           <type>RegexpComparator</type>
           <expected-output>^( |\t)*display and sorting\.</expected-output>
         </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*-e\s+Display the erasure coding policy of files and directories\.</expected-output>
+        </comparator>
       </comparators>
     </test>
 

+ 2 - 2
hadoop-common-project/hadoop-kms/pom.xml

@@ -22,11 +22,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-kms</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <name>Apache Hadoop KMS</name>

+ 4 - 1
hadoop-common-project/hadoop-kms/src/main/conf/kms-log4j.properties

@@ -34,4 +34,7 @@ log4j.additivity.kms-audit=false
 
 log4j.rootLogger=INFO, kms
 log4j.logger.org.apache.hadoop=INFO
-log4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF
+log4j.logger.com.sun.jersey.server.wadl.generators.WadlGeneratorJAXBGrammarGenerator=OFF
+# make zookeeper log level an explicit config, and not changing with rootLogger.
+log4j.logger.org.apache.zookeeper=INFO
+log4j.logger.org.apache.curator=INFO

+ 2 - 2
hadoop-common-project/hadoop-minikdc/pom.xml

@@ -18,12 +18,12 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-minikdc</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop MiniKDC</description>
   <name>Apache Hadoop MiniKDC</name>
   <packaging>jar</packaging>

+ 2 - 2
hadoop-common-project/hadoop-nfs/pom.xml

@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-nfs</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <name>Apache Hadoop NFS</name>

+ 2 - 2
hadoop-common-project/pom.xml

@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-common-project</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Common Project</description>
   <name>Apache Hadoop Common Project</name>
   <packaging>pom</packaging>

+ 2 - 2
hadoop-dist/pom.xml

@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-dist</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Distribution</description>
   <name>Apache Hadoop Distribution</name>
   <packaging>jar</packaging>

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-client/pom.xml

@@ -20,11 +20,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   <artifactId>hadoop-hdfs-client</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop HDFS Client</description>
   <name>Apache Hadoop HDFS Client</name>
   <packaging>jar</packaging>

+ 14 - 5
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -2783,6 +2783,18 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     namenode.removeErasureCodingPolicy(ecPolicyName);
   }
 
+  public void enableErasureCodingPolicy(String ecPolicyName)
+      throws IOException {
+    checkOpen();
+    namenode.enableErasureCodingPolicy(ecPolicyName);
+  }
+
+  public void disableErasureCodingPolicy(String ecPolicyName)
+      throws IOException {
+    checkOpen();
+    namenode.disableErasureCodingPolicy(ecPolicyName);
+  }
+
   public DFSInotifyEventInputStream getInotifyEventStream() throws IOException {
     checkOpen();
     return new DFSInotifyEventInputStream(namenode, tracer);
@@ -2871,12 +2883,9 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     }
     synchronized (DFSClient.class) {
       if (STRIPED_READ_THREAD_POOL == null) {
-        // Only after thread pool is fully constructed then save it to
-        // volatile field.
-        ThreadPoolExecutor threadPool = DFSUtilClient.getThreadPoolExecutor(1,
+        STRIPED_READ_THREAD_POOL = DFSUtilClient.getThreadPoolExecutor(1,
             numThreads, 60, "StripedRead-", true);
-        threadPool.allowCoreThreadTimeOut(true);
-        STRIPED_READ_THREAD_POOL = threadPool;
+        STRIPED_READ_THREAD_POOL.allowCoreThreadTimeOut(true);
       }
     }
   }

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java

@@ -585,8 +585,9 @@ public class DFSInputStream extends FSInputStream
           fetchBlockAt(target);
         } else {
           connectFailedOnce = true;
-          DFSClient.LOG.warn("Failed to connect to " + targetAddr + " for block"
-              + ", add to deadNodes and continue. " + ex, ex);
+          DFSClient.LOG.warn("Failed to connect to {} for block {}, " +
+              "add to deadNodes and continue. ", targetAddr,
+              targetBlock.getBlock(), ex);
           // Put chosen node into dead list, continue
           addToDeadNodes(chosenNode);
         }

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -2618,6 +2618,28 @@ public class DistributedFileSystem extends FileSystem {
     dfs.removeErasureCodingPolicy(ecPolicyName);
   }
 
+  /**
+   * Enable erasure coding policy.
+   *
+   * @param ecPolicyName The name of the policy to be enabled.
+   * @throws IOException
+   */
+  public void enableErasureCodingPolicy(String ecPolicyName)
+      throws IOException {
+    dfs.enableErasureCodingPolicy(ecPolicyName);
+  }
+
+  /**
+   * Disable erasure coding policy.
+   *
+   * @param ecPolicyName The name of the policy to be disabled.
+   * @throws IOException
+   */
+  public void disableErasureCodingPolicy(String ecPolicyName)
+      throws IOException {
+    dfs.disableErasureCodingPolicy(ecPolicyName);
+  }
+
   /**
    * Unset the erasure coding policy from the source path.
    *

+ 22 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java

@@ -548,6 +548,28 @@ public class HdfsAdmin {
     dfs.removeErasureCodingPolicy(ecPolicyName);
   }
 
+  /**
+   * Enable erasure coding policy.
+   *
+   * @param ecPolicyName The name of the policy to be enabled.
+   * @throws IOException
+   */
+  public void enableErasureCodingPolicy(String ecPolicyName)
+      throws IOException {
+    dfs.enableErasureCodingPolicy(ecPolicyName);
+  }
+
+  /**
+   * Disable erasure coding policy.
+   *
+   * @param ecPolicyName The name of the policy to be disabled.
+   * @throws IOException
+   */
+  public void disableErasureCodingPolicy(String ecPolicyName)
+      throws IOException {
+    dfs.disableErasureCodingPolicy(ecPolicyName);
+  }
+
   private void provisionEZTrash(Path path) throws IOException {
     // make sure the path is an EZ
     EncryptionZone ez = dfs.getEZForPath(path);

+ 17 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

@@ -1570,6 +1570,23 @@ public interface ClientProtocol {
   @AtMostOnce
   void removeErasureCodingPolicy(String ecPolicyName) throws IOException;
 
+  /**
+   * Enable erasure coding policy.
+   * @param ecPolicyName The name of the policy to be enabled.
+   * @throws IOException
+   */
+  @AtMostOnce
+  void enableErasureCodingPolicy(String ecPolicyName) throws IOException;
+
+  /**
+   * Disable erasure coding policy.
+   * @param ecPolicyName The name of the policy to be disabled.
+   * @throws IOException
+   */
+  @AtMostOnce
+  void disableErasureCodingPolicy(String ecPolicyName) throws IOException;
+
+
   /**
    * Get the erasure coding policies loaded in Namenode.
    *

+ 30 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

@@ -186,6 +186,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodin
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.RemoveErasureCodingPolicyRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.EnableErasureCodingPolicyRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.DisableErasureCodingPolicyRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingCodecsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyRequestProto;
@@ -1708,6 +1710,34 @@ public class ClientNamenodeProtocolTranslatorPB implements
     }
   }
 
+  @Override
+  public void enableErasureCodingPolicy(String ecPolicyName)
+      throws IOException {
+    EnableErasureCodingPolicyRequestProto.Builder builder =
+        EnableErasureCodingPolicyRequestProto.newBuilder();
+    builder.setEcPolicyName(ecPolicyName);
+    EnableErasureCodingPolicyRequestProto req = builder.build();
+    try {
+      rpcProxy.enableErasureCodingPolicy(null, req);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
+  @Override
+  public void disableErasureCodingPolicy(String ecPolicyName)
+      throws IOException {
+    DisableErasureCodingPolicyRequestProto.Builder builder =
+        DisableErasureCodingPolicyRequestProto.newBuilder();
+    builder.setEcPolicyName(ecPolicyName);
+    DisableErasureCodingPolicyRequestProto req = builder.build();
+    try {
+      rpcProxy.disableErasureCodingPolicy(null, req);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
   @Override
   public ErasureCodingPolicy[] getErasureCodingPolicies() throws IOException {
     try {

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java

@@ -186,6 +186,7 @@ import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.util.ChunkedArrayList;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.LimitInputStream;
 
@@ -1455,7 +1456,7 @@ public class PBHelperClient {
     String toSnapshot = reportProto.getToSnapshot();
     List<SnapshotDiffReportEntryProto> list = reportProto
         .getDiffReportEntriesList();
-    List<DiffReportEntry> entries = new ArrayList<>();
+    List<DiffReportEntry> entries = new ChunkedArrayList<>();
     for (SnapshotDiffReportEntryProto entryProto : list) {
       DiffReportEntry entry = convert(entryProto);
       if (entry != null)
@@ -2392,7 +2393,7 @@ public class PBHelperClient {
       return null;
     }
     List<DiffReportEntry> entries = report.getDiffList();
-    List<SnapshotDiffReportEntryProto> entryProtos = new ArrayList<>();
+    List<SnapshotDiffReportEntryProto> entryProtos = new ChunkedArrayList<>();
     for (DiffReportEntry entry : entries) {
       SnapshotDiffReportEntryProto entryProto = convert(entry);
       if (entryProto != null)

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowDiskReports.java

@@ -101,9 +101,8 @@ public final class SlowDiskReports {
     }
 
     boolean areEqual;
-    for (Map.Entry<String, Map<DiskOp, Double>> entry : this.slowDisks
-        .entrySet()) {
-      if (!entry.getValue().equals(that.slowDisks.get(entry.getKey()))) {
+    for (String disk : this.slowDisks.keySet()) {
+      if (!this.slowDisks.get(disk).equals(that.slowDisks.get(disk))) {
         return false;
       }
     }

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto

@@ -957,6 +957,10 @@ service ClientNamenodeProtocol {
       returns(AddErasureCodingPoliciesResponseProto);
   rpc removeErasureCodingPolicy(RemoveErasureCodingPolicyRequestProto)
       returns(RemoveErasureCodingPolicyResponseProto);
+  rpc enableErasureCodingPolicy(EnableErasureCodingPolicyRequestProto)
+      returns(EnableErasureCodingPolicyResponseProto);
+  rpc disableErasureCodingPolicy(DisableErasureCodingPolicyRequestProto)
+      returns(DisableErasureCodingPolicyResponseProto);
   rpc getErasureCodingPolicy(GetErasureCodingPolicyRequestProto)
       returns(GetErasureCodingPolicyResponseProto);
   rpc getErasureCodingCodecs(GetErasureCodingCodecsRequestProto)

+ 14 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/erasurecoding.proto

@@ -68,6 +68,20 @@ message RemoveErasureCodingPolicyRequestProto {
 message RemoveErasureCodingPolicyResponseProto {
 }
 
+message EnableErasureCodingPolicyRequestProto {
+  required string ecPolicyName = 1;
+}
+
+message EnableErasureCodingPolicyResponseProto {
+}
+
+message DisableErasureCodingPolicyRequestProto {
+  required string ecPolicyName = 1;
+}
+
+message DisableErasureCodingPolicyResponseProto {
+}
+
 message UnsetErasureCodingPolicyRequestProto {
   required string src = 1;
 }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml

@@ -22,11 +22,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-hdfs-httpfs</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <name>Apache Hadoop HttpFS</name>

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml

@@ -20,11 +20,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   <artifactId>hadoop-hdfs-native-client</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop HDFS Native Client</description>
   <name>Apache Hadoop HDFS Native Client</name>
   <packaging>jar</packaging>

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml

@@ -20,11 +20,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-hdfs-nfs</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop HDFS-NFS</description>
   <name>Apache Hadoop HDFS-NFS</name>
   <packaging>jar</packaging>

+ 0 - 5
hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml

@@ -252,9 +252,4 @@
         <Class name="org.apache.hadoop.hdfs.server.datanode.checker.AbstractFuture" />
         <Bug pattern="NS_DANGEROUS_NON_SHORT_CIRCUIT" />
     </Match>
-    <Match>
-        <Class name="org.apache.hadoop.hdfs.server.namenode.NNUpgradeUtil$1" />
-        <Method name="visitFile" />
-        <Bug pattern="NP_NULL_ON_SOME_PATH_FROM_RETURN_VALUE" />
-    </Match>
  </FindBugsFilter>

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -20,11 +20,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   <artifactId>hadoop-hdfs</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop HDFS</description>
   <name>Apache Hadoop HDFS</name>
   <packaging>jar</packaging>

+ 28 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java

@@ -230,6 +230,10 @@ import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodin
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.GetErasureCodingPolicyResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.RemoveErasureCodingPolicyRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.RemoveErasureCodingPolicyResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.EnableErasureCodingPolicyRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.EnableErasureCodingPolicyResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.DisableErasureCodingPolicyRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.DisableErasureCodingPolicyResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.SetErasureCodingPolicyResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.UnsetErasureCodingPolicyRequestProto;
@@ -1707,6 +1711,30 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
   }
 
+  @Override
+  public EnableErasureCodingPolicyResponseProto enableErasureCodingPolicy(
+      RpcController controller, EnableErasureCodingPolicyRequestProto request)
+      throws ServiceException {
+    try {
+      server.enableErasureCodingPolicy(request.getEcPolicyName());
+      return EnableErasureCodingPolicyResponseProto.newBuilder().build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  @Override
+  public DisableErasureCodingPolicyResponseProto disableErasureCodingPolicy(
+      RpcController controller, DisableErasureCodingPolicyRequestProto request)
+      throws ServiceException {
+    try {
+      server.disableErasureCodingPolicy(request.getEcPolicyName());
+      return DisableErasureCodingPolicyResponseProto.newBuilder().build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
   @Override
   public GetErasureCodingPolicyResponseProto getErasureCodingPolicy(RpcController controller,
       GetErasureCodingPolicyRequestProto request) throws ServiceException {

+ 6 - 10
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java

@@ -299,18 +299,14 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
         return file.isDirectory();
       }
     });
-
-    if (journalDirs != null) {
-      for (File journalDir : journalDirs) {
-        String jid = journalDir.getName();
-        if (!status.containsKey(jid)) {
-          Map<String, String> jMap = new HashMap<String, String>();
-          jMap.put("Formatted", "true");
-          status.put(jid, jMap);
-        }
+    for (File journalDir : journalDirs) {
+      String jid = journalDir.getName();
+      if (!status.containsKey(jid)) {
+        Map<String, String> jMap = new HashMap<String, String>();
+        jMap.put("Formatted", "true");
+        status.put(jid, jMap);
       }
     }
-
     return JSON.toString(status);
   }
   

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java

@@ -112,6 +112,7 @@ public class BlockTokenSecretManager extends
    * @param blockPoolId block pool ID
    * @param encryptionAlgorithm encryption algorithm to use
    * @param numNNs number of namenodes possible
+   * @param useProto should we use new protobuf style tokens
    */
   public BlockTokenSecretManager(long keyUpdateInterval,
       long tokenLifetime, int nnIndex, int numNNs,  String blockPoolId,

+ 9 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -1851,7 +1851,7 @@ public class BlockManager implements BlockStatsMXBean {
         (pendingReplicaNum > 0 || isPlacementPolicySatisfied(block));
   }
 
-  private BlockReconstructionWork scheduleReconstruction(BlockInfo block,
+  BlockReconstructionWork scheduleReconstruction(BlockInfo block,
       int priority) {
     // skip abandoned block or block reopened for append
     if (block.isDeleted() || !block.isCompleteOrCommitted()) {
@@ -1873,6 +1873,7 @@ public class BlockManager implements BlockStatsMXBean {
     if(srcNodes == null || srcNodes.length == 0) {
       // block can not be reconstructed from any node
       LOG.debug("Block {} cannot be reconstructed from any node", block);
+      NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
       return null;
     }
 
@@ -1885,6 +1886,7 @@ public class BlockManager implements BlockStatsMXBean {
       neededReconstruction.remove(block, priority);
       blockLog.debug("BLOCK* Removing {} from neededReconstruction as" +
           " it has enough replicas", block);
+      NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
       return null;
     }
 
@@ -1900,6 +1902,7 @@ public class BlockManager implements BlockStatsMXBean {
     if (block.isStriped()) {
       if (pendingNum > 0) {
         // Wait the previous reconstruction to finish.
+        NameNode.getNameNodeMetrics().incNumTimesReReplicationNotScheduled();
         return null;
       }
 
@@ -3727,8 +3730,8 @@ public class BlockManager implements BlockStatsMXBean {
    * The given node is reporting that it received a certain block.
    */
   @VisibleForTesting
-  void addBlock(DatanodeStorageInfo storageInfo, Block block, String delHint)
-      throws IOException {
+  public void addBlock(DatanodeStorageInfo storageInfo, Block block,
+      String delHint) throws IOException {
     DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
     // Decrement number of blocks scheduled to this datanode.
     // for a retry request (of DatanodeProtocol#blockReceivedAndDeleted with 
@@ -3751,7 +3754,9 @@ public class BlockManager implements BlockStatsMXBean {
     BlockInfo storedBlock = getStoredBlock(block);
     if (storedBlock != null &&
         block.getGenerationStamp() == storedBlock.getGenerationStamp()) {
-      pendingReconstruction.decrement(storedBlock, node);
+      if (pendingReconstruction.decrement(storedBlock, node)) {
+        NameNode.getNameNodeMetrics().incSuccessfulReReplications();
+      }
     }
     processAndHandleReportedBlock(storageInfo, block, ReplicaState.FINALIZED,
         delHintNode);

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java

@@ -644,14 +644,14 @@ public class DatanodeDescriptor extends DatanodeInfo {
   }
 
   /**
-   * The number of work items that are pending to be replicated
+   * The number of work items that are pending to be replicated.
    */
   int getNumberOfBlocksToBeReplicated() {
     return pendingReplicationWithoutTargets + replicateBlocks.size();
   }
 
   /**
-   * The number of work items that are pending to be replicated
+   * The number of work items that are pending to be reconstructed.
    */
   @VisibleForTesting
   public int getNumberOfBlocksToBeErasureCoded() {

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java

@@ -1661,6 +1661,7 @@ public class DatanodeManager {
     if (pendingList != null) {
       cmds.add(new BlockCommand(DatanodeProtocol.DNA_TRANSFER, blockPoolId,
           pendingList));
+      maxTransfers -= pendingList.size();
     }
     // check pending erasure coding tasks
     List<BlockECReconstructionInfo> pendingECList = nodeinfo

+ 7 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReconstructionBlocks.java

@@ -30,6 +30,7 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.util.Daemon;
 import org.slf4j.Logger;
 
@@ -97,8 +98,10 @@ class PendingReconstructionBlocks {
    * for this block.
    *
    * @param dn The DataNode that finishes the reconstruction
+   * @return true if the block is decremented to 0 and got removed.
    */
-  void decrement(BlockInfo block, DatanodeDescriptor dn) {
+  boolean decrement(BlockInfo block, DatanodeDescriptor dn) {
+    boolean removed = false;
     synchronized (pendingReconstructions) {
       PendingBlockInfo found = pendingReconstructions.get(block);
       if (found != null) {
@@ -106,9 +109,11 @@ class PendingReconstructionBlocks {
         found.decrementReplicas(dn);
         if (found.getNumReplicas() <= 0) {
           pendingReconstructions.remove(block);
+          removed = true;
         }
       }
     }
+    return removed;
   }
 
   /**
@@ -263,6 +268,7 @@ class PendingReconstructionBlocks {
               timedOutItems.add(block);
             }
             LOG.warn("PendingReconstructionMonitor timed out " + block);
+            NameNode.getNameNodeMetrics().incTimeoutReReplications();
             iter.remove();
           }
         }

+ 1 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java

@@ -188,10 +188,8 @@ public interface HdfsServerConstants {
         return NamenodeRole.NAMENODE;
       }
     }
-
+    
     public void setClusterId(String cid) {
-      Preconditions.checkState(this == UPGRADE || this == UPGRADEONLY
-          || this == FORMAT);
       clusterId = cid;
     }
 
@@ -216,7 +214,6 @@ public interface HdfsServerConstants {
     }
 
     public void setForce(int force) {
-      Preconditions.checkState(this == RECOVER);
       this.force = force;
     }
     
@@ -229,7 +226,6 @@ public interface HdfsServerConstants {
     }
     
     public void setForceFormat(boolean force) {
-      Preconditions.checkState(this == FORMAT);
       isForceFormat = force;
     }
     
@@ -238,7 +234,6 @@ public interface HdfsServerConstants {
     }
     
     public void setInteractiveFormat(boolean interactive) {
-      Preconditions.checkState(this == FORMAT);
       isInteractiveFormat = interactive;
     }
     

+ 4 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java

@@ -1336,14 +1336,10 @@ public class DataStorage extends Storage {
           return name.startsWith(BLOCK_SUBDIR_PREFIX);
         }
       });
-
-    if (otherNames != null) {
-      for (int i = 0; i < otherNames.length; i++) {
-        linkBlocksHelper(new File(from, otherNames[i]),
-            new File(to, otherNames[i]), oldLV, hl, upgradeToIdBasedLayout,
-            blockRoot, idBasedLayoutSingleLinks);
-      }
-    }
+    for(int i = 0; i < otherNames.length; i++)
+      linkBlocksHelper(new File(from, otherNames[i]),
+          new File(to, otherNames[i]), oldLV, hl, upgradeToIdBasedLayout,
+          blockRoot, idBasedLayoutSingleLinks);
   }
 
   /**

+ 12 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java

@@ -695,11 +695,18 @@ class DataXceiver extends Receiver implements Runnable {
     if (targetStorageTypes.length > 0) {
       System.arraycopy(targetStorageTypes, 0, storageTypes, 1, nst);
     }
-    int nsi = targetStorageIds.length;
-    String[] storageIds = new String[nsi + 1];
-    storageIds[0] = storageId;
-    if (targetStorageTypes.length > 0) {
-      System.arraycopy(targetStorageIds, 0, storageIds, 1, nsi);
+
+    // To support older clients, we don't pass in empty storageIds
+    final int nsi = targetStorageIds.length;
+    final String[] storageIds;
+    if (nsi > 0) {
+      storageIds = new String[nsi + 1];
+      storageIds[0] = storageId;
+      if (targetStorageTypes.length > 0) {
+        System.arraycopy(targetStorageIds, 0, storageIds, 1, nsi);
+      }
+    } else {
+      storageIds = new String[0];
     }
     checkAccess(replyOut, isClient, block, blockToken, Op.WRITE_BLOCK,
         BlockTokenIdentifier.AccessMode.WRITE,

+ 8 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java

@@ -1011,15 +1011,19 @@ public class DiskBalancer {
         return;
       }
 
+      if (source.isTransientStorage() || dest.isTransientStorage()) {
+        final String errMsg = "Disk Balancer - Unable to support " +
+                "transient storage type.";
+        LOG.error(errMsg);
+        item.setErrMsg(errMsg);
+        return;
+      }
+
       List<FsVolumeSpi.BlockIterator> poolIters = new LinkedList<>();
       startTime = Time.now();
       item.setStartTime(startTime);
       secondsElapsed = 0;
 
-      if (source.isTransientStorage() || dest.isTransientStorage()) {
-        return;
-      }
-
       try {
         openPoolIters(source, poolIters);
         if (poolIters.size() == 0) {

+ 56 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java

@@ -212,6 +212,10 @@ public final class ErasureCodingPolicyManager {
     // This is a placeholder for HDFS-7337.
   }
 
+  /**
+   * Add an erasure coding policy.
+   * @return the added policy
+   */
   public synchronized ErasureCodingPolicy addPolicy(ErasureCodingPolicy policy)
       throws IllegalECPolicyException {
     if (!CodecUtil.hasCodec(policy.getCodecName())) {
@@ -251,6 +255,9 @@ public final class ErasureCodingPolicyManager {
     return (byte) (currentId + 1);
   }
 
+  /**
+   * Remove an User erasure coding policy by policyName.
+   */
   public synchronized void removePolicy(String name) {
     if (SystemErasureCodingPolicies.getByName(name) != null) {
       throw new IllegalArgumentException("System erasure coding policy " +
@@ -268,4 +275,53 @@ public final class ErasureCodingPolicyManager {
   public List<ErasureCodingPolicy> getRemovedPolicies() {
     return removedPoliciesByName.values().stream().collect(Collectors.toList());
   }
+
+  /**
+   * Disable an erasure coding policy by policyName.
+   */
+  public synchronized void disablePolicy(String name) {
+    ErasureCodingPolicy sysEcPolicy = SystemErasureCodingPolicies
+        .getByName(name);
+    ErasureCodingPolicy userEcPolicy = userPoliciesByName.get(name);
+    LOG.info("Disable the erasure coding policy " + name);
+    if (sysEcPolicy == null &&
+        userEcPolicy == null) {
+      throw new IllegalArgumentException("The policy name " +
+          name + " does not exists");
+    }
+
+    if(sysEcPolicy != null){
+      enabledPoliciesByName.remove(name);
+      removedPoliciesByName.put(name, sysEcPolicy);
+    }
+    if(userEcPolicy != null){
+      enabledPoliciesByName.remove(name);
+      removedPoliciesByName.put(name, userEcPolicy);
+    }
+  }
+
+  /**
+   * Enable an erasure coding policy by policyName.
+   */
+  public synchronized void enablePolicy(String name) {
+    ErasureCodingPolicy sysEcPolicy = SystemErasureCodingPolicies
+        .getByName(name);
+    ErasureCodingPolicy userEcPolicy = userPoliciesByName.get(name);
+    LOG.info("Enable the erasure coding policy " + name);
+    if (sysEcPolicy == null &&
+        userEcPolicy == null) {
+      throw new IllegalArgumentException("The policy name " +
+          name + " does not exists");
+    }
+
+    if(sysEcPolicy != null){
+      enabledPoliciesByName.put(name, sysEcPolicy);
+      removedPoliciesByName.remove(name);
+    }
+    if(userEcPolicy != null) {
+      enabledPoliciesByName.put(name, userEcPolicy);
+      removedPoliciesByName.remove(name);
+    }
+  }
+
 }

+ 12 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java

@@ -231,6 +231,18 @@ final class FSDirErasureCodingOp {
     fsn.getErasureCodingPolicyManager().removePolicy(ecPolicyName);
   }
 
+  static void enableErasureCodePolicy(final FSNamesystem fsn,
+      String ecPolicyName) throws IOException {
+    Preconditions.checkNotNull(ecPolicyName);
+    fsn.getErasureCodingPolicyManager().enablePolicy(ecPolicyName);
+  }
+
+  static void disableErasureCodePolicy(final FSNamesystem fsn,
+      String ecPolicyName) throws IOException {
+    Preconditions.checkNotNull(ecPolicyName);
+    fsn.getErasureCodingPolicyManager().disablePolicy(ecPolicyName);
+  }
+
   private static List<XAttr> removeErasureCodingPolicyXAttr(
       final FSNamesystem fsn, final INodesInPath srcIIP) throws IOException {
     FSDirectory fsd = fsn.getFSDirectory();

+ 55 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -7060,6 +7060,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
   AddECPolicyResponse[] addECPolicies(ErasureCodingPolicy[] policies)
       throws IOException {
     final String operationName = "addECPolicies";
+    String addECPolicyName = "";
     checkOperation(OperationCategory.WRITE);
     List<AddECPolicyResponse> responses = new ArrayList<>();
     boolean success = false;
@@ -7070,6 +7071,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
         try {
           ErasureCodingPolicy newPolicy =
               FSDirErasureCodingOp.addErasureCodePolicy(this, policy);
+          addECPolicyName = newPolicy.getName();
           responses.add(new AddECPolicyResponse(newPolicy));
         } catch (IllegalECPolicyException e) {
           responses.add(new AddECPolicyResponse(policy, e));
@@ -7082,7 +7084,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
       if (success) {
         getEditLog().logSync();
       }
-      logAuditEvent(success, operationName, null, null, null);
+      logAuditEvent(success, operationName, addECPolicyName, null, null);
     }
   }
 
@@ -7108,6 +7110,58 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     }
   }
 
+  /**
+   * Enable an erasure coding policy.
+   * @param ecPolicyName the name of the policy to be enabled
+   * @throws IOException
+   */
+  void enableErasureCodingPolicy(String ecPolicyName) throws IOException {
+    final String operationName = "enableErasureCodingPolicy";
+    checkOperation(OperationCategory.WRITE);
+    boolean success = false;
+    LOG.info("Enable the erasure coding policy " + ecPolicyName);
+    writeLock();
+    try {
+      checkOperation(OperationCategory.WRITE);
+      checkNameNodeSafeMode("Cannot enable erasure coding policy "
+          + ecPolicyName);
+      FSDirErasureCodingOp.enableErasureCodePolicy(this, ecPolicyName);
+      success = true;
+    } finally {
+      writeUnlock(operationName);
+      if (success) {
+        getEditLog().logSync();
+      }
+      logAuditEvent(success, operationName, ecPolicyName, null, null);
+    }
+  }
+
+  /**
+   * Disable an erasure coding policy.
+   * @param ecPolicyName the name of the policy to be disabled
+   * @throws IOException
+   */
+  void disableErasureCodingPolicy(String ecPolicyName) throws IOException {
+    final String operationName = "disableErasureCodingPolicy";
+    checkOperation(OperationCategory.WRITE);
+    boolean success = false;
+    LOG.info("Disable the erasure coding policy " + ecPolicyName);
+    writeLock();
+    try {
+      checkOperation(OperationCategory.WRITE);
+      checkNameNodeSafeMode("Cannot disable erasure coding policy "
+          + ecPolicyName);
+      FSDirErasureCodingOp.disableErasureCodePolicy(this, ecPolicyName);
+      success = true;
+    } finally {
+      writeUnlock(operationName);
+      if (success) {
+        getEditLog().logSync();
+      }
+      logAuditEvent(success, operationName, ecPolicyName, null, null);
+    }
+  }
+
   /**
    * Unset an erasure coding policy from the given path.
    * @param srcArg  The path of the target directory.

+ 12 - 15
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java

@@ -255,27 +255,24 @@ public class NNStorageRetentionManager {
     });
 
     // Check whether there is any work to do.
-    if (filesInStorage != null
-        && filesInStorage.length <= numCheckpointsToRetain) {
+    if (filesInStorage.length <= numCheckpointsToRetain) {
       return;
     }
 
     // Create a sorted list of txids from the file names.
     TreeSet<Long> sortedTxIds = new TreeSet<Long>();
-    if (filesInStorage != null) {
-      for (String fName : filesInStorage) {
-        // Extract the transaction id from the file name.
-        long fTxId;
-        try {
-          fTxId = Long.parseLong(fName.substring(oivImagePrefix.length() + 1));
-        } catch (NumberFormatException nfe) {
-          // This should not happen since we have already filtered it.
-          // Log and continue.
-          LOG.warn("Invalid file name. Skipping " + fName);
-          continue;
-        }
-        sortedTxIds.add(Long.valueOf(fTxId));
+    for (String fName : filesInStorage) {
+      // Extract the transaction id from the file name.
+      long fTxId;
+      try {
+        fTxId = Long.parseLong(fName.substring(oivImagePrefix.length() + 1));
+      } catch (NumberFormatException nfe) {
+        // This should not happen since we have already filtered it.
+        // Log and continue.
+        LOG.warn("Invalid file name. Skipping " + fName);
+        continue;
       }
+      sortedTxIds.add(Long.valueOf(fTxId));
     }
 
     int numFilesToDelete = sortedTxIds.size() - numCheckpointsToRetain;

+ 16 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

@@ -2307,6 +2307,22 @@ public class NameNodeRpcServer implements NamenodeProtocols {
     namesystem.removeErasureCodingPolicy(ecPolicyName);
   }
 
+  @Override // ClientProtocol
+  public void enableErasureCodingPolicy(String ecPolicyName)
+      throws IOException {
+    checkNNStartup();
+    namesystem.checkSuperuserPrivilege();
+    namesystem.enableErasureCodingPolicy(ecPolicyName);
+  }
+
+  @Override // ClientProtocol
+  public void disableErasureCodingPolicy(String ecPolicyName)
+      throws IOException {
+    checkNNStartup();
+    namesystem.checkSuperuserPrivilege();
+    namesystem.disableErasureCodingPolicy(ecPolicyName);
+  }
+
   @Override // ReconfigurationProtocol
   public void startReconfiguration() throws IOException {
     checkNNStartup();

+ 18 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java

@@ -58,6 +58,12 @@ public class NameNodeMetrics {
   @Metric MutableCounterLong createSymlinkOps;
   @Metric MutableCounterLong getLinkTargetOps;
   @Metric MutableCounterLong filesInGetListingOps;
+  @Metric ("Number of successful re-replications")
+  MutableCounterLong successfulReReplications;
+  @Metric ("Number of times we failed to schedule a block re-replication.")
+  MutableCounterLong numTimesReReplicationNotScheduled;
+  @Metric("Number of timed out block re-replications")
+  MutableCounterLong timeoutReReplications;
   @Metric("Number of allowSnapshot operations")
   MutableCounterLong allowSnapshotOps;
   @Metric("Number of disallowSnapshot operations")
@@ -300,6 +306,18 @@ public class NameNodeMetrics {
     transactionsBatchedInSync.incr(count);
   }
 
+  public void incSuccessfulReReplications() {
+    successfulReReplications.incr();
+  }
+
+  public void incNumTimesReReplicationNotScheduled() {
+    numTimesReReplicationNotScheduled.incr();
+  }
+
+  public void incTimeoutReReplications() {
+    timeoutReReplications.incr();
+  }
+
   public void addSync(long elapsed) {
     syncs.add(elapsed);
     for (MutableQuantiles q : syncsQuantiles) {

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java

@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.util.Diff.ListType;
 
 import com.google.common.base.Preconditions;
 import com.google.common.primitives.SignedBytes;
+import org.apache.hadoop.util.ChunkedArrayList;
 
 /**
  * A class describing the difference between snapshots of a snapshottable
@@ -186,7 +187,7 @@ class SnapshotDiffInfo {
    * @return A {@link SnapshotDiffReport} describing the difference
    */
   public SnapshotDiffReport generateReport() {
-    List<DiffReportEntry> diffReportList = new ArrayList<DiffReportEntry>();
+    List<DiffReportEntry> diffReportList = new ChunkedArrayList<>();
     for (Map.Entry<INode,byte[][]> drEntry : diffMap.entrySet()) {
       INode node = drEntry.getKey();
       byte[][] path = drEntry.getValue();
@@ -213,7 +214,7 @@ class SnapshotDiffInfo {
    */
   private List<DiffReportEntry> generateReport(ChildrenDiff dirDiff,
       byte[][] parentPath, boolean fromEarlier, Map<Long, RenameEntry> renameMap) {
-    List<DiffReportEntry> list = new ArrayList<DiffReportEntry>();
+    List<DiffReportEntry> list = new ChunkedArrayList<>();
     List<INode> created = dirDiff.getList(ListType.CREATED);
     List<INode> deleted = dirDiff.getList(ListType.DELETED);
     byte[][] fullPath = new byte[parentPath.length + 1][];

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java

@@ -1977,7 +1977,7 @@ public class DFSAdmin extends FsShell {
         return exitCode;
       }
     } else if ("-report".equals(cmd)) {
-      if (argv.length > 6) {
+      if (argv.length < 1) {
         printUsage(cmd);
         return exitCode;
       }
@@ -2007,7 +2007,7 @@ public class DFSAdmin extends FsShell {
         return exitCode;
       }
     } else if (RollingUpgradeCommand.matches(cmd)) {
-      if (argv.length > 2) {
+      if (argv.length < 1 || argv.length > 2) {
         printUsage(cmd);
         return exitCode;
       }
@@ -2082,7 +2082,7 @@ public class DFSAdmin extends FsShell {
         return exitCode;
       }
     } else if ("-triggerBlockReport".equals(cmd)) {
-      if ((argv.length != 2) && (argv.length != 3)) {
+      if (argv.length < 1) {
         printUsage(cmd);
         return exitCode;
       }

+ 98 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/ECAdmin.java

@@ -463,6 +463,101 @@ public class ECAdmin extends Configured implements Tool {
     }
   }
 
+  /** Command to enable an existing erasure coding policy. */
+  private static class EnableECPolicyCommand implements AdminHelper.Command {
+    @Override
+    public String getName() {
+      return "-enablePolicy";
+    }
+
+    @Override
+    public String getShortUsage() {
+      return "[" + getName() + " -policy <policy>]\n";
+    }
+
+    @Override
+    public String getLongUsage() {
+      TableListing listing = AdminHelper.getOptionDescriptionListing();
+      listing.addRow("<policy>", "The name of the erasure coding policy");
+      return getShortUsage() + "\n" +
+          "Enable the erasure coding policy.\n\n" +
+          listing.toString();
+    }
+
+    @Override
+    public int run(Configuration conf, List<String> args) throws IOException {
+      final String ecPolicyName = StringUtils.popOptionWithArgument("-policy",
+          args);
+      if (ecPolicyName == null) {
+        System.err.println("Please specify the policy name.\nUsage: " +
+            getLongUsage());
+        return 1;
+      }
+      if (args.size() > 0) {
+        System.err.println(getName() + ": Too many arguments");
+        return 1;
+      }
+
+      final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+      try {
+        dfs.enableErasureCodingPolicy(ecPolicyName);
+        System.out.println("Erasure coding policy " + ecPolicyName +
+            " is enabled");
+      } catch (IOException e) {
+        System.err.println(AdminHelper.prettifyException(e));
+        return 2;
+      }
+      return 0;
+    }
+  }
+
+  /** Command to disable an existing erasure coding policy. */
+  private static class DisableECPolicyCommand implements AdminHelper.Command {
+    @Override
+    public String getName() {
+      return "-disablePolicy";
+    }
+
+    @Override
+    public String getShortUsage() {
+      return "[" + getName() + " -policy <policy>]\n";
+    }
+
+    @Override
+    public String getLongUsage() {
+      TableListing listing = AdminHelper.getOptionDescriptionListing();
+      listing.addRow("<policy>", "The name of the erasure coding policy");
+      return getShortUsage() + "\n" +
+          "Disable the erasure coding policy.\n\n" +
+          listing.toString();
+    }
+
+    @Override
+    public int run(Configuration conf, List<String> args) throws IOException {
+      final String ecPolicyName = StringUtils.popOptionWithArgument("-policy",
+          args);
+      if (ecPolicyName == null) {
+        System.err.println("Please specify the policy name.\nUsage: " +
+            getLongUsage());
+        return 1;
+      }
+      if (args.size() > 0) {
+        System.err.println(getName() + ": Too many arguments");
+        return 1;
+      }
+
+      final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+      try {
+        dfs.disableErasureCodingPolicy(ecPolicyName);
+        System.out.println("Erasure coding policy " + ecPolicyName +
+            " is disabled");
+      } catch (IOException e) {
+        System.err.println(AdminHelper.prettifyException(e));
+        return 2;
+      }
+      return 0;
+    }
+  }
 
   private static final AdminHelper.Command[] COMMANDS = {
       new ListECPoliciesCommand(),
@@ -471,6 +566,8 @@ public class ECAdmin extends Configured implements Tool {
       new RemoveECPolicyCommand(),
       new SetECPolicyCommand(),
       new UnsetECPolicyCommand(),
-      new ListECCodecsCommand()
+      new ListECCodecsCommand(),
+      new EnableECPolicyCommand(),
+      new DisableECPolicyCommand()
   };
 }

+ 3 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java

@@ -722,13 +722,9 @@ class ImageLoaderCurrent implements ImageLoader {
       if (supportSnapshot && supportInodeId) {
         dirNodeMap.put(inodeId, pathName);
       }
-
-      v.visit(ImageElement.NS_QUOTA, in.readLong());
-      if (NameNodeLayoutVersion.supports(Feature.DISKSPACE_QUOTA,
-          imageVersion)) {
-        v.visit(ImageElement.DS_QUOTA, in.readLong());
-      }
-
+      v.visit(ImageElement.NS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
+      if (NameNodeLayoutVersion.supports(Feature.DISKSPACE_QUOTA, imageVersion))
+        v.visit(ImageElement.DS_QUOTA, numBlocks == -1 ? in.readLong() : -1);
       if (supportSnapshot) {
         boolean snapshottable = in.readBoolean();
         if (!snapshottable) {

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md

@@ -446,6 +446,8 @@ Usage:
          [-listPolicies]
          [-addPolicies -policyFile <file>]
          [-listCodecs]
+         [-enablePolicy -policy <policyName>]
+         [-disablePolicy -policy <policyName>]
          [-help [cmd ...]]
 
 | COMMAND\_OPTION | Description |
@@ -456,6 +458,8 @@ Usage:
 |-listPolicies| Lists all supported ErasureCoding policies|
 |-addPolicies| Add a list of erasure coding policies|
 |-listCodecs| Get the list of supported erasure coding codecs and coders in system|
+|-enablePolicy| Enable an ErasureCoding policy in system|
+|-disablePolicy| Disable an ErasureCoding policy in system|
 
 Runs the ErasureCoding CLI. See [HDFS ErasureCoding](./HDFSErasureCoding.html#Administrative_commands) for more information on this command.
 

+ 14 - 6
hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSErasureCoding.md

@@ -123,7 +123,7 @@ Deployment
   `io.erasurecode.codec.xor.rawcoders` for the XOR codec.
   User can also configure self-defined codec with configuration key like:
   `io.erasurecode.codec.self-defined-codec.rawcoders`.
-  The values for these key are lists of coder names with a fall-back mechanism.
+  The values for these key are lists of coder names with a fall-back mechanism. These codec factories are loaded in the order specified by the configuration values, until a codec is loaded successfully. The default RS and XOR codec configuration prefers native implementation over the pure Java one. There is no RS-LEGACY native codec implementation so the default is pure Java implementation only.
   All these codecs have implementations in pure Java. For default RS codec, there is also a native implementation which leverages Intel ISA-L library to improve the performance of codec. For XOR codec, a native implementation which leverages Intel ISA-L library to improve the performance of codec is also supported. Please refer to section "Enable Intel ISA-L" for more detail information.
   The default implementation for RS Legacy is pure Java, and the default implementations for default RS and XOR are native implementations using Intel ISA-L library.
 
@@ -138,13 +138,11 @@ Deployment
 
   HDFS native implementation of default RS codec leverages Intel ISA-L library to improve the encoding and decoding calculation. To enable and use Intel ISA-L, there are three steps.
   1. Build ISA-L library. Please refer to the official site "https://github.com/01org/isa-l/" for detail information.
-  2. Build Hadoop with ISA-L support. Please refer to "Intel ISA-L build options" section in "Build instructions for Hadoop" in (BUILDING.txt) in the source code. Use `-Dbundle.isal` to copy the contents of the `isal.lib` directory into the final tar file. Deploy Hadoop with the tar file. Make sure ISA-L is available on HDFS clients and DataNodes.
-  3. Configure the `io.erasurecode.codec.rs.rawcoder` key with value `org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory` on HDFS clients and DataNodes.
+  2. Build Hadoop with ISA-L support. Please refer to "Intel ISA-L build options" section in "Build instructions for Hadoop" in (BUILDING.txt) in the source code.
+  3. Use `-Dbundle.isal` to copy the contents of the `isal.lib` directory into the final tar file. Deploy Hadoop with the tar file. Make sure ISA-L is available on HDFS clients and DataNodes.
 
   To verify that ISA-L is correctly detected by Hadoop, run the `hadoop checknative` command.
 
-  To enable the native implementation of the XOR codec, perform the same first two steps as above to build and deploy Hadoop with ISA-L support. Afterwards, configure the `io.erasurecode.codec.xor.rawcoder` key with `org.apache.hadoop.io.erasurecode.rawcoder.NativeXORRawErasureCoderFactory` on both HDFS client and DataNodes.
-
 ### Administrative commands
 
   HDFS provides an `ec` subcommand to perform administrative commands related to erasure coding.
@@ -156,6 +154,8 @@ Deployment
          [-listPolicies]
          [-addPolicies -policyFile <file>]
          [-listCodecs]
+         [-enablePolicy -policy <policyName>]
+         [-disablePolicy -policy <policyName>]
          [-help [cmd ...]]
 
 Below are the details about each command.
@@ -190,4 +190,12 @@ Below are the details about each command.
 
 *  `[-removePolicy -policy <policyName>]`
 
-     Remove an erasure coding policy.
+     Remove an erasure coding policy.
+
+*  `[-enablePolicy -policy <policyName>]`
+
+     Enable an erasure coding policy.
+
+*  `[-disablePolicy -policy <policyName>]`
+
+     Disable an erasure coding policy.

+ 48 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java

@@ -1567,4 +1567,52 @@ public class TestDistributedFileSystem {
       }
     }
   }
+
+  @Test
+  public void testEnableAndDisableErasureCodingPolicy() throws Exception {
+    Configuration conf = getTestConfiguration();
+    MiniDFSCluster cluster = null;
+
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+      DistributedFileSystem fs = cluster.getFileSystem();
+      ECSchema toAddSchema = new ECSchema("rs", 3, 2);
+      ErasureCodingPolicy toAddPolicy =
+          new ErasureCodingPolicy(toAddSchema, 128 * 1024, (byte) 254);
+      String policyName = toAddPolicy.getName();
+      ErasureCodingPolicy[] policies =
+          new ErasureCodingPolicy[]{toAddPolicy};
+      fs.addErasureCodingPolicies(policies);
+      assertEquals(policyName, ErasureCodingPolicyManager.getInstance().
+          getByName(policyName).getName());
+      fs.disableErasureCodingPolicy(policyName);
+      assertEquals(policyName, ErasureCodingPolicyManager.getInstance().
+          getRemovedPolicies().get(0).getName());
+      fs.enableErasureCodingPolicy(policyName);
+      assertEquals(policyName, ErasureCodingPolicyManager.getInstance().
+          getByName(policyName).getName());
+
+      //test enable a policy that doesn't exist
+      try {
+        fs.enableErasureCodingPolicy("notExistECName");
+        Assert.fail("enable the policy that doesn't exist should fail");
+      } catch (Exception e) {
+        GenericTestUtils.assertExceptionContains("does not exists", e);
+        // pass
+      }
+
+      //test disable a policy that doesn't exist
+      try {
+        fs.disableErasureCodingPolicy("notExistECName");
+        Assert.fail("disable the policy that doesn't exist should fail");
+      } catch (Exception e) {
+        GenericTestUtils.assertExceptionContains("does not exists", e);
+        // pass
+      }
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }

+ 50 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSnapshotCommands.java

@@ -25,6 +25,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff;
+import org.apache.hadoop.util.ChunkedArrayList;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
@@ -195,4 +196,53 @@ public class TestSnapshotCommands {
         "Disallowing snaphot on " + path + " succeeded", config);
     fs.delete(new Path("/Fully/QPath"), true);
   }
+
+  @Test (timeout=60000)
+  public void testSnapshotDiff()throws Exception {
+    Configuration config = new HdfsConfiguration();
+    Path snapDirPath = new Path(fs.getUri().toString() + "/snap_dir");
+    String snapDir = snapDirPath.toString();
+    fs.mkdirs(snapDirPath);
+
+    DFSTestUtil.DFSAdminRun("-allowSnapshot " + snapDirPath, 0,
+        "Allowing snaphot on " + snapDirPath + " succeeded", config);
+    DFSTestUtil.createFile(fs, new Path(snapDirPath, "file1"),
+        1024, (short) 1, 100);
+    DFSTestUtil.FsShellRun("-createSnapshot " + snapDirPath + " sn1", config);
+    DFSTestUtil.createFile(fs, new Path(snapDirPath, "file2"),
+        1024, (short) 1, 100);
+    DFSTestUtil.createFile(fs, new Path(snapDirPath, "file3"),
+        1024, (short) 1, 100);
+    DFSTestUtil.FsShellRun("-createSnapshot " + snapDirPath + " sn2", config);
+
+    // verify the snapshot diff using api and command line
+    SnapshotDiffReport report_s1_s2 =
+        fs.getSnapshotDiffReport(snapDirPath, "sn1", "sn2");
+    DFSTestUtil.toolRun(new SnapshotDiff(config), snapDir +
+        " sn1 sn2", 0, report_s1_s2.toString());
+    DFSTestUtil.FsShellRun("-renameSnapshot " + snapDirPath + " sn2 sn3",
+        config);
+
+    SnapshotDiffReport report_s1_s3 =
+        fs.getSnapshotDiffReport(snapDirPath, "sn1", "sn3");
+    DFSTestUtil.toolRun(new SnapshotDiff(config), snapDir +
+        " sn1 sn3", 0, report_s1_s3.toString());
+
+    // Creating 100 more files so as to force DiffReport generation
+    // backend ChunkedArrayList to create multiple chunks.
+    for (int i = 0; i < 100; i++) {
+      DFSTestUtil.createFile(fs, new Path(snapDirPath, "file_" + i),
+          1, (short) 1, 100);
+    }
+    DFSTestUtil.FsShellRun("-createSnapshot " + snapDirPath + " sn4", config);
+    DFSTestUtil.toolRun(new SnapshotDiff(config), snapDir +
+        " sn1 sn4", 0, null);
+
+    DFSTestUtil.FsShellRun("-deleteSnapshot " + snapDir + " sn1", config);
+    DFSTestUtil.FsShellRun("-deleteSnapshot " + snapDir + " sn3", config);
+    DFSTestUtil.FsShellRun("-deleteSnapshot " + snapDir + " sn4", config);
+    DFSTestUtil.DFSAdminRun("-disallowSnapshot " + snapDir, 0,
+        "Disallowing snaphot on " + snapDirPath + " succeeded", config);
+    fs.delete(new Path("/Fully/QPath"), true);
+  }
 }

+ 35 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java

@@ -774,4 +774,39 @@ public class TestBlockToken {
     testBlockTokenSerialization(false);
     testBlockTokenSerialization(true);
   }
+
+  private void testBadStorageIDCheckAccess(boolean enableProtobuf)
+      throws IOException {
+    BlockTokenSecretManager sm = new BlockTokenSecretManager(
+        blockKeyUpdateInterval, blockTokenLifetime, 0, 1, "fake-pool", null,
+        enableProtobuf);
+    StorageType[] storageTypes = new StorageType[] {StorageType.DISK};
+    String[] storageIds = new String[] {"fake-storage-id"};
+    String[] badStorageIds = new String[] {"BAD-STORAGE-ID"};
+    String[] emptyStorageIds = new String[] {};
+    BlockTokenIdentifier.AccessMode mode = BlockTokenIdentifier.AccessMode.READ;
+    BlockTokenIdentifier id = generateTokenId(sm, block3,
+        EnumSet.of(mode), storageTypes, storageIds);
+    sm.checkAccess(id, null, block3, mode, storageTypes, storageIds);
+
+    try {
+      sm.checkAccess(id, null, block3, mode, storageTypes, badStorageIds);
+      fail("Expected strict BlockTokenSecretManager to fail");
+    } catch(SecretManager.InvalidToken e) {
+    }
+    // We allow empty storageId tokens for backwards compatibility. i.e. old
+    // clients may not have known to pass the storageId parameter to the
+    // writeBlock api.
+    sm.checkAccess(id, null, block3, mode, storageTypes,
+        emptyStorageIds);
+    sm.checkAccess(id, null, block3, mode, storageTypes,
+        null);
+  }
+
+  @Test
+  public void testBadStorageIDCheckAccess() throws IOException {
+    testBadStorageIDCheckAccess(false);
+    testBadStorageIDCheckAccess(true);
+  }
+
 }

+ 51 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java

@@ -44,13 +44,21 @@ import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
+import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand;
+import org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
+import org.apache.hadoop.hdfs.server.protocol.SlowPeerReports;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.util.Shell;
 import org.junit.Assert;
@@ -491,4 +499,47 @@ public class TestDatanodeManager {
     Assert.assertEquals("Unexpected host or host in unexpected position",
         "127.0.0.1:23456", bothAgain.get(1).getInfoAddr());
   }
+
+  @Test
+  public void testPendingRecoveryTasks() throws IOException {
+    FSNamesystem fsn = Mockito.mock(FSNamesystem.class);
+    Mockito.when(fsn.hasWriteLock()).thenReturn(true);
+    Configuration conf = new Configuration();
+    DatanodeManager dm = Mockito.spy(mockDatanodeManager(fsn, conf));
+
+    int maxTransfers = 20;
+    int numPendingTasks = 7;
+    int numECTasks = maxTransfers - numPendingTasks;
+
+    DatanodeDescriptor nodeInfo = Mockito.mock(DatanodeDescriptor.class);
+    Mockito.when(nodeInfo.isRegistered()).thenReturn(true);
+    Mockito.when(nodeInfo.getStorageInfos())
+        .thenReturn(new DatanodeStorageInfo[0]);
+
+    List<BlockTargetPair> pendingList =
+        Collections.nCopies(numPendingTasks, new BlockTargetPair(null, null));
+    Mockito.when(nodeInfo.getReplicationCommand(maxTransfers))
+        .thenReturn(pendingList);
+    List<BlockECReconstructionInfo> ecPendingList =
+        Collections.nCopies(numECTasks, null);
+
+    Mockito.when(nodeInfo.getErasureCodeCommand(numECTasks))
+        .thenReturn(ecPendingList);
+    DatanodeRegistration dnReg = Mockito.mock(DatanodeRegistration.class);
+    Mockito.when(dm.getDatanode(dnReg)).thenReturn(nodeInfo);
+
+    DatanodeCommand[] cmds = dm.handleHeartbeat(
+        dnReg, new StorageReport[1], "bp-123", 0, 0, 10, maxTransfers, 0, null,
+        SlowPeerReports.EMPTY_REPORT, SlowDiskReports.EMPTY_REPORT);
+
+    assertEquals(2, cmds.length);
+    assertTrue(cmds[0] instanceof BlockCommand);
+    BlockCommand replicaCmd = (BlockCommand) cmds[0];
+    assertEquals(numPendingTasks, replicaCmd.getBlocks().length);
+    assertEquals(numPendingTasks, replicaCmd.getTargets().length);
+    assertTrue(cmds[1] instanceof BlockECReconstructionCommand);
+    BlockECReconstructionCommand ecRecoveryCmd =
+        (BlockECReconstructionCommand) cmds[1];
+    assertEquals(numECTasks, ecRecoveryCmd.getECTasks().size());
+  }
 }

+ 84 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java

@@ -17,6 +17,10 @@
  */
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
@@ -44,6 +48,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
+import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -178,7 +183,7 @@ public class TestPendingReconstruction {
   public void testProcessPendingReconstructions() throws Exception {
     final Configuration conf = new HdfsConfiguration();
     conf.setLong(
-        DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, TIMEOUT);
+        DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, TIMEOUT);
     MiniDFSCluster cluster = null;
     Block block;
     BlockInfo blockInfo;
@@ -418,7 +423,7 @@ public class TestPendingReconstruction {
     CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
     CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
         DFS_REPLICATION_INTERVAL);
-    CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
+    CONF.setInt(DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY,
         DFS_REPLICATION_INTERVAL);
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(
         DATANODE_COUNT).build();
@@ -471,4 +476,81 @@ public class TestPendingReconstruction {
       cluster.shutdown();
     }
   }
+
+  @Test
+  public void testReplicationCounter() throws Exception {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    conf.setInt(DFS_NAMENODE_REDUNDANCY_INTERVAL_SECONDS_KEY, 1);
+    conf.setInt(DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 2);
+    MiniDFSCluster tmpCluster = new MiniDFSCluster.Builder(conf).numDataNodes(
+        DATANODE_COUNT).build();
+    tmpCluster.waitActive();
+    FSNamesystem fsn = tmpCluster.getNamesystem(0);
+    fsn.writeLock();
+
+    try {
+      BlockManager bm = fsn.getBlockManager();
+      BlocksMap blocksMap = bm.blocksMap;
+
+      // create three blockInfo below, blockInfo0 will success, blockInfo1 will
+      // time out, blockInfo2 will fail the replication.
+      BlockCollection bc0 = Mockito.mock(BlockCollection.class);
+      BlockInfo blockInfo0 = new BlockInfoContiguous((short) 3);
+      blockInfo0.setBlockId(0);
+
+      BlockCollection bc1 = Mockito.mock(BlockCollection.class);
+      BlockInfo blockInfo1 = new BlockInfoContiguous((short) 3);
+      blockInfo1.setBlockId(1);
+
+      BlockCollection bc2 = Mockito.mock(BlockCollection.class);
+      Mockito.when(bc2.getId()).thenReturn((2L));
+      BlockInfo blockInfo2 = new BlockInfoContiguous((short) 3);
+      blockInfo2.setBlockId(2);
+
+      blocksMap.addBlockCollection(blockInfo0, bc0);
+      blocksMap.addBlockCollection(blockInfo1, bc1);
+      blocksMap.addBlockCollection(blockInfo2, bc2);
+
+      PendingReconstructionBlocks pending = bm.pendingReconstruction;
+
+      MetricsRecordBuilder rb = getMetrics("NameNodeActivity");
+      assertCounter("SuccessfulReReplications", 0L, rb);
+      assertCounter("NumTimesReReplicationNotScheduled", 0L, rb);
+      assertCounter("TimeoutReReplications", 0L, rb);
+
+      // add block0 and block1 to pending queue.
+      pending.increment(blockInfo0);
+      pending.increment(blockInfo1);
+
+      Thread.sleep(2000);
+
+      rb = getMetrics("NameNodeActivity");
+      assertCounter("SuccessfulReReplications", 0L, rb);
+      assertCounter("NumTimesReReplicationNotScheduled", 0L, rb);
+      assertCounter("TimeoutReReplications", 0L, rb);
+
+      // call addBlock on block0 will make it successfully replicated.
+      // not callign addBlock on block1 will make it timeout later.
+      DatanodeStorageInfo[] storageInfos =
+          DFSTestUtil.createDatanodeStorageInfos(1);
+      bm.addBlock(storageInfos[0], blockInfo0, null);
+
+      // call schedule replication on blockInfo2 will fail the re-replication.
+      // because there is no source data to replicate from.
+      bm.scheduleReconstruction(blockInfo2, 0);
+
+      Thread.sleep(2000);
+
+      rb = getMetrics("NameNodeActivity");
+      assertCounter("SuccessfulReReplications", 1L, rb);
+      assertCounter("NumTimesReReplicationNotScheduled", 1L, rb);
+      assertCounter("TimeoutReReplications", 1L, rb);
+
+    } finally {
+      tmpCluster.shutdown();
+      fsn.writeUnlock();
+    }
+  }
+
+
 }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java

@@ -687,7 +687,7 @@ public class TestFsDatasetImpl {
           @Override public Boolean get() {
               return volume.getReferenceCount() == 0;
             }
-          }, 100, 10);
+          }, 100, 1000);
       assertThat(dataNode.getFSDataset().getNumFailedVolumes(), is(1));
 
       try {

+ 196 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testErasureCodingConf.xml

@@ -163,6 +163,44 @@
       </comparators>
     </test>
 
+    <test>
+      <description>help: enablePolicy command</description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -help enablePolicy</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Enable the erasure coding policy</expected-output>
+        </comparator>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>[-enablePolicy -policy &lt;policy&gt;]</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>help: disablePolicy command</description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -help disablePolicy</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Disable the erasure coding policy</expected-output>
+        </comparator>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>[-disablePolicy -policy &lt;policy&gt;]</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
   <!-- Test erasure code commands -->
     <test>
       <description>setPolicy : set erasure coding policy on a directory to encode files</description>
@@ -349,6 +387,70 @@
       </comparators>
     </test>
 
+    <test>
+      <description>enablePolicy : enable the erasure coding policy</description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-64k</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Erasure coding policy RS-6-3-64k is enabled</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>enablePolicy : enable the erasure coding policy twice</description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-64k</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Erasure coding policy RS-6-3-64k is enabled</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>disablePolicy : disable the erasure coding policy</description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -disablePolicy -policy RS-6-3-64k</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-64k</ec-admin-command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Erasure coding policy RS-6-3-64k is disabled</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>disablePolicy : disable the erasure coding policy twice</description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -disablePolicy -policy RS-6-3-64k</ec-admin-command>
+        <ec-admin-command>-fs NAMENODE -disablePolicy -policy RS-6-3-64k</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-64k</ec-admin-command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Erasure coding policy RS-6-3-64k is disabled</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
 <!-- Test illegal parameters -->
     <test>
       <description>setPolicy : illegal parameters - path is missing</description>
@@ -541,6 +643,66 @@
       </comparators>
     </test>
 
+    <test>
+      <description>enablePolicy : illegal parameters - policy is missing</description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -enablePolicy RS-6-3-64k</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Please specify the policy name</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>enablePolicy : illegal parameters - too many parameters</description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -enablePolicy -policy RS-6-3-64k RS-3-2-64k</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>-enablePolicy: Too many arguments</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>disablePolicy : illegal parameters - policy is missing</description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -disablePolicy RS-6-3-64k</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>Please specify the policy name</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
+    <test>
+      <description>disablePolicy : illegal parameters - too many parameters</description>
+      <test-commands>
+        <ec-admin-command>-fs NAMENODE -disablePolicy -policy RS-6-3-64k RS-3-2-64k</ec-admin-command>
+      </test-commands>
+      <cleanup-commands>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>SubstringComparator</type>
+          <expected-output>-disablePolicy: Too many arguments</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
     <test>
       <description>listCodecs : illegal parameters - too many parameters</description>
       <test-commands>
@@ -611,5 +773,39 @@
         </comparator>
       </comparators>
     </test>
+
+    <test> <!-- TESTED -->
+      <description>ls: file using absolute path and option -e to show erasure coding policy of a directory</description>
+      <test-commands>
+        <command>-fs NAMENODE -mkdir -p /ecdir</command>
+        <ec-admin-command>-fs NAMENODE -setPolicy -path /ecdir -policy RS-6-3-64k</ec-admin-command>
+        <command>-fs NAMENODE -touchz /ecdir/file1</command>
+        <command>-fs NAMENODE -touchz /ecdir/file2</command>
+        <command>-fs NAMENODE -touchz /ecdir/file3</command>
+        <command>-fs NAMENODE -ls -e /ecdir</command>
+      </test-commands>
+      <cleanup-commands>
+        <command>-fs NAMENODE -rmdir /ecdir</command>
+      </cleanup-commands>
+      <comparators>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>Found [0-9] items</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*[A-Za-z0-9-]{1,}( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/ecdir/file1</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*[A-Za-z0-9-]{1,}( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/ecdir/file2</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^-rw-r--r--( )*1( )*USERNAME( )*supergroup( )*[A-Za-z0-9-]{1,}( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/ecdir/file3</expected-output>
+        </comparator>
+      </comparators>
+    </test>
+
   </tests>
 </configuration>

+ 2 - 2
hadoop-hdfs-project/pom.xml

@@ -20,11 +20,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-hdfs-project</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop HDFS Project</description>
   <name>Apache Hadoop HDFS Project</name>
   <packaging>pom</packaging>

+ 1 - 1
hadoop-mapreduce-project/bin/mapred-config.sh

@@ -47,7 +47,7 @@ function hadoop_subproject_init
 
   hadoop_deprecate_envvar HADOOP_MAPRED_ROOT_LOGGER HADOOP_ROOT_LOGGER
 
-  hadoop_deprecate_envvar HADOOP_JOB_HISTORY_OPTS MAPRED_HISTORYSERVER_OPTS
+  hadoop_deprecate_envvar HADOOP_JOB_HISTORYSERVER_OPTS MAPRED_HISTORYSERVER_OPTS
 
   HADOOP_MAPRED_HOME="${HADOOP_MAPRED_HOME:-$HADOOP_HOME}"
 

+ 2 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/pom.xml

@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-app</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce App</name>
 
   <properties>

+ 4 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java

@@ -919,7 +919,8 @@ public class RMContainerAllocator extends RMContainerRequestor
 
   private void applyConcurrentTaskLimits() {
     int numScheduledMaps = scheduledRequests.maps.size();
-    if (maxRunningMaps > 0 && numScheduledMaps > 0) {
+    if (maxRunningMaps > 0 && numScheduledMaps > 0 &&
+        getJob().getTotalMaps() > maxRunningMaps) {
       int maxRequestedMaps = Math.max(0,
           maxRunningMaps - assignedRequests.maps.size());
       int numScheduledFailMaps = scheduledRequests.earlierFailedMaps.size();
@@ -936,7 +937,8 @@ public class RMContainerAllocator extends RMContainerRequestor
     }
 
     int numScheduledReduces = scheduledRequests.reduces.size();
-    if (maxRunningReduces > 0 && numScheduledReduces > 0) {
+    if (maxRunningReduces > 0 && numScheduledReduces > 0 &&
+        getJob().getTotalReduces() > maxRunningReduces) {
       int maxRequestedReduces = Math.max(0,
           maxRunningReduces - assignedRequests.reduces.size());
       int reduceRequestLimit = Math.min(maxRequestedReduces,

+ 69 - 4
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java

@@ -2781,15 +2781,78 @@ public class TestRMContainerAllocator {
         new Text(rmAddr), ugiToken.getService());
   }
 
+  @Test
+  public void testConcurrentTaskLimitsDisabledIfSmaller() throws Exception {
+    final int MAP_COUNT = 1;
+    final int REDUCE_COUNT = 1;
+    final int MAP_LIMIT = 1;
+    final int REDUCE_LIMIT = 1;
+    Configuration conf = new Configuration();
+    conf.setInt(MRJobConfig.JOB_RUNNING_MAP_LIMIT, MAP_LIMIT);
+    conf.setInt(MRJobConfig.JOB_RUNNING_REDUCE_LIMIT, REDUCE_LIMIT);
+    conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, 0.0f);
+    ApplicationId appId = ApplicationId.newInstance(1, 1);
+    ApplicationAttemptId appAttemptId =
+        ApplicationAttemptId.newInstance(appId, 1);
+    JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
+    Job mockJob = mock(Job.class);
+    when(mockJob.getReport()).thenReturn(
+        MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
+            0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
+    when(mockJob.getTotalMaps()).thenReturn(MAP_COUNT);
+    when(mockJob.getTotalReduces()).thenReturn(REDUCE_COUNT);
+
+    final MockScheduler mockScheduler = new MockScheduler(appAttemptId);
+    MyContainerAllocator allocator =
+        new MyContainerAllocator(null, conf, appAttemptId, mockJob,
+            SystemClock.getInstance()) {
+          @Override
+          protected void register() {
+          }
+
+          @Override
+          protected ApplicationMasterProtocol createSchedulerProxy() {
+            return mockScheduler;
+          }
+
+          @Override
+          protected void setRequestLimit(Priority priority,
+              Resource capability, int limit) {
+            Assert.fail("setRequestLimit() should not be invoked");
+          }
+        };
+
+    // create some map requests
+    ContainerRequestEvent[] reqMapEvents = new ContainerRequestEvent[MAP_COUNT];
+    for (int i = 0; i < reqMapEvents.length; ++i) {
+      reqMapEvents[i] = createReq(jobId, i, 1024, new String[] { "h" + i });
+    }
+    allocator.sendRequests(Arrays.asList(reqMapEvents));
+    // create some reduce requests
+    ContainerRequestEvent[] reqReduceEvents =
+        new ContainerRequestEvent[REDUCE_COUNT];
+    for (int i = 0; i < reqReduceEvents.length; ++i) {
+      reqReduceEvents[i] =
+          createReq(jobId, i, 1024, new String[] {}, false, true);
+    }
+    allocator.sendRequests(Arrays.asList(reqReduceEvents));
+    allocator.schedule();
+    allocator.schedule();
+    allocator.schedule();
+    allocator.close();
+  }
+
   @Test
   public void testConcurrentTaskLimits() throws Exception {
+    final int MAP_COUNT = 5;
+    final int REDUCE_COUNT = 2;
     final int MAP_LIMIT = 3;
     final int REDUCE_LIMIT = 1;
     LOG.info("Running testConcurrentTaskLimits");
     Configuration conf = new Configuration();
     conf.setInt(MRJobConfig.JOB_RUNNING_MAP_LIMIT, MAP_LIMIT);
     conf.setInt(MRJobConfig.JOB_RUNNING_REDUCE_LIMIT, REDUCE_LIMIT);
-    conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, 1.0f);
+    conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, 0.0f);
     ApplicationId appId = ApplicationId.newInstance(1, 1);
     ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(
         appId, 1);
@@ -2798,6 +2861,9 @@ public class TestRMContainerAllocator {
     when(mockJob.getReport()).thenReturn(
         MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
             0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
+    when(mockJob.getTotalMaps()).thenReturn(MAP_COUNT);
+    when(mockJob.getTotalReduces()).thenReturn(REDUCE_COUNT);
+
     final MockScheduler mockScheduler = new MockScheduler(appAttemptId);
     MyContainerAllocator allocator = new MyContainerAllocator(null, conf,
         appAttemptId, mockJob, SystemClock.getInstance()) {
@@ -2812,14 +2878,13 @@ public class TestRMContainerAllocator {
     };
 
     // create some map requests
-    ContainerRequestEvent[] reqMapEvents = new ContainerRequestEvent[5];
+    ContainerRequestEvent[] reqMapEvents = new ContainerRequestEvent[MAP_COUNT];
     for (int i = 0; i < reqMapEvents.length; ++i) {
       reqMapEvents[i] = createReq(jobId, i, 1024, new String[] { "h" + i });
     }
     allocator.sendRequests(Arrays.asList(reqMapEvents));
-
     // create some reduce requests
-    ContainerRequestEvent[] reqReduceEvents = new ContainerRequestEvent[2];
+    ContainerRequestEvent[] reqReduceEvents = new ContainerRequestEvent[REDUCE_COUNT];
     for (int i = 0; i < reqReduceEvents.length; ++i) {
       reqReduceEvents[i] = createReq(jobId, i, 1024, new String[] {},
           false, true);

+ 2 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml

@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-common</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce Common</name>
 
   <properties>

+ 2 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml

@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-core</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce Core</name>
 
   <properties>

+ 2 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/pom.xml

@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-hs-plugins</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce HistoryServer Plugins</name>
 
   <properties>

+ 2 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml

@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-hs</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce HistoryServer</name>
 
   <properties>

+ 2 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml

@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-jobclient</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce JobClient</name>
 
   <properties>

+ 2 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/pom.xml

@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-nativetask</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce NativeTask</name>
 
   <properties>

+ 2 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/pom.xml

@@ -19,11 +19,11 @@
   <parent>
     <artifactId>hadoop-mapreduce-client</artifactId>
     <groupId>org.apache.hadoop</groupId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <modelVersion>4.0.0</modelVersion>
   <artifactId>hadoop-mapreduce-client-shuffle</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce Shuffle</name>
 
   <properties>

+ 2 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml

@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-mapreduce-client</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop MapReduce Client</name>
   <packaging>pom</packaging>
 

+ 2 - 2
hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml

@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-mapreduce-examples</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop MapReduce Examples</description>
   <name>Apache Hadoop MapReduce Examples</name>
   <packaging>jar</packaging>

+ 2 - 2
hadoop-mapreduce-project/pom.xml

@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-mapreduce</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>pom</packaging>
   <name>Apache Hadoop MapReduce</name>
   <url>http://hadoop.apache.org/mapreduce/</url>

+ 1 - 1
hadoop-maven-plugins/pom.xml

@@ -19,7 +19,7 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-maven-plugins</artifactId>

+ 2 - 2
hadoop-minicluster/pom.xml

@@ -18,11 +18,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-minicluster</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <packaging>jar</packaging>
 
   <description>Apache Hadoop Mini-Cluster</description>

+ 2 - 2
hadoop-project-dist/pom.xml

@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-project-dist</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Project Dist POM</description>
   <name>Apache Hadoop Project Dist POM</name>
   <packaging>pom</packaging>

+ 3 - 3
hadoop-project/pom.xml

@@ -20,10 +20,10 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-main</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
   </parent>
   <artifactId>hadoop-project</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Project POM</description>
   <name>Apache Hadoop Project POM</name>
   <packaging>pom</packaging>
@@ -1125,7 +1125,7 @@
       <dependency>
         <groupId>com.microsoft.azure</groupId>
         <artifactId>azure-storage</artifactId>
-        <version>4.2.0</version>
+        <version>5.3.0</version>
      </dependency>
 
       <dependency>

+ 1 - 1
hadoop-tools/hadoop-aliyun/pom.xml

@@ -18,7 +18,7 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-aliyun</artifactId>

+ 2 - 2
hadoop-tools/hadoop-archive-logs/pom.xml

@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-archive-logs</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Archive Logs</description>
   <name>Apache Hadoop Archive Logs</name>
   <packaging>jar</packaging>

+ 2 - 2
hadoop-tools/hadoop-archives/pom.xml

@@ -20,11 +20,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-archives</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <description>Apache Hadoop Archives</description>
   <name>Apache Hadoop Archives</name>
   <packaging>jar</packaging>

+ 2 - 2
hadoop-tools/hadoop-aws/pom.xml

@@ -19,11 +19,11 @@
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
-    <version>3.0.0-alpha4-SNAPSHOT</version>
+    <version>3.0.0-beta1-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   <artifactId>hadoop-aws</artifactId>
-  <version>3.0.0-alpha4-SNAPSHOT</version>
+  <version>3.0.0-beta1-SNAPSHOT</version>
   <name>Apache Hadoop Amazon Web Services support</name>
   <description>
     This module contains code to support integration with Amazon Web Services.

Some files were not shown because too many files changed in this diff