浏览代码

Merge from trunk to branch

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/fs-encryption@1615844 13f79535-47bb-0310-9956-ffa450edef68
Andrew Wang 10 年之前
父节点
当前提交
ac73d416f3
共有 100 个文件被更改,包括 3898 次插入1287 次删除
  1. 32 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 9 5
      hadoop-common-project/hadoop-common/src/main/bin/hadoop
  3. 0 2
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh
  4. 10 3
      hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd
  5. 66 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  6. 10 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
  7. 42 42
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java
  8. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
  9. 13 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
  10. 50 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
  11. 67 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  12. 7 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
  13. 8 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
  14. 5 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
  15. 8 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
  16. 7 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
  17. 9 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
  18. 9 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
  19. 12 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java
  20. 2 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
  21. 36 29
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java
  22. 125 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Classpath.java
  23. 14 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java
  24. 14 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java
  25. 37 0
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
  26. 17 2
      hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm
  27. 9 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderCryptoExtension.java
  28. 24 24
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java
  29. 3 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java
  30. 34 15
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredShell.java
  31. 176 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClasspath.java
  32. 51 33
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java
  33. 174 15
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java
  34. 4 2
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java
  35. 6 1
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java
  36. 9 1
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java
  37. 11 0
      hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java
  38. 19 0
      hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm
  39. 134 0
      hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java
  40. 25 0
      hadoop-common-project/hadoop-kms/src/test/resources/log4j-kmsaudit.properties
  41. 215 166
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
  42. 587 3
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java
  43. 52 3
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  44. 5 0
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  45. 6 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
  46. 12 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  47. 4 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  48. 20 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
  49. 10 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java
  50. 11 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
  51. 19 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
  52. 16 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
  53. 13 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
  54. 8 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
  55. 368 249
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
  56. 61 23
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancingPolicy.java
  57. 103 71
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  58. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
  59. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
  60. 13 17
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java
  61. 188 115
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
  62. 11 9
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
  63. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
  64. 21 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  65. 4 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java
  66. 122 22
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
  67. 36 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java
  68. 22 40
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
  69. 68 28
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java
  70. 1 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
  71. 5 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
  72. 0 228
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LDir.java
  73. 0 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
  74. 29 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  75. 8 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
  76. 17 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
  77. 19 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java
  78. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java
  79. 8 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java
  80. 128 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java
  81. 7 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
  82. 58 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java
  83. 3 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java
  84. 10 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
  85. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
  86. 8 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
  87. 8 5
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
  88. 44 0
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm
  89. 18 11
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
  90. 32 12
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java
  91. 28 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
  92. 11 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java
  93. 5 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java
  94. 77 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java
  95. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
  96. 9 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
  97. 7 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
  98. 11 10
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
  99. 5 10
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
  100. 48 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java

+ 32 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -192,6 +192,11 @@ Trunk (Unreleased)
     HADOOP-10891. Add EncryptedKeyVersion factory method to
     HADOOP-10891. Add EncryptedKeyVersion factory method to
     KeyProviderCryptoExtension. (wang)
     KeyProviderCryptoExtension. (wang)
 
 
+    HADOOP-10756. KMS audit log should consolidate successful similar requests. 
+    (asuresh via tucu)
+
+    HADOOP-10793. KeyShell args should use single-dash style. (wang)
+
   BUG FIXES
   BUG FIXES
 
 
     HADOOP-9451. Fault single-layer config if node group topology is enabled.
     HADOOP-9451. Fault single-layer config if node group topology is enabled.
@@ -405,6 +410,12 @@ Trunk (Unreleased)
     HADOOP-10881. Clarify usage of encryption and encrypted encryption
     HADOOP-10881. Clarify usage of encryption and encrypted encryption
     key in KeyProviderCryptoExtension. (wang)
     key in KeyProviderCryptoExtension. (wang)
 
 
+    HADOOP-10920. site plugin couldn't parse hadoop-kms index.apt.vm.
+    (Akira Ajisaka via wang)
+
+    HADOOP-10925. Compilation fails in native link0 function on Windows.
+    (cnauroth)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -463,6 +474,14 @@ Release 2.6.0 - UNRELEASED
     HADOOP-8069. Enable TCP_NODELAY by default for IPC. (Todd Lipcon via
     HADOOP-8069. Enable TCP_NODELAY by default for IPC. (Todd Lipcon via
     Arpit Agarwal)
     Arpit Agarwal)
 
 
+    HADOOP-10902. Deletion of directories with snapshots will not output
+    reason for trash move failure. (Stephen Chu via wang)
+
+    HADOOP-10900. CredentialShell args should use single-dash style. (wang)
+
+    HADOOP-10903. Enhance hadoop classpath command to expand wildcards or write
+    classpath into jar manifest. (cnauroth)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -497,6 +516,15 @@ Release 2.6.0 - UNRELEASED
     HADOOP-10876. The constructor of Path should not take an empty URL as a
     HADOOP-10876. The constructor of Path should not take an empty URL as a
     parameter. (Zhihai Xu via wang)
     parameter. (Zhihai Xu via wang)
 
 
+    HADOOP-10928. Incorrect usage on `hadoop credential list`.
+    (Josh Elser via wang)
+
+    HADOOP-10927. Fix CredentialShell help behavior and error codes.
+    (Josh Elser via wang)
+
+    HADOOP-10937. Need to set version name correctly before decrypting EEK.
+    (Arun Suresh via wang)
+
 Release 2.5.0 - UNRELEASED
 Release 2.5.0 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -637,6 +665,8 @@ Release 2.5.0 - UNRELEASED
 
 
   BUG FIXES 
   BUG FIXES 
 
 
+    HADOOP-10759. Remove hardcoded JAVA_HEAP_MAX. (Sam Liu via Eric Yang)
+
     HADOOP-10378. Typo in help printed by hdfs dfs -help.
     HADOOP-10378. Typo in help printed by hdfs dfs -help.
     (Mit Desai via suresh)
     (Mit Desai via suresh)
 
 
@@ -813,6 +843,8 @@ Release 2.5.0 - UNRELEASED
     HADOOP-10894. Fix dead link in ToolRunner documentation. (Akira Ajisaka
     HADOOP-10894. Fix dead link in ToolRunner documentation. (Akira Ajisaka
     via Arpit Agarwal)
     via Arpit Agarwal)
 
 
+    HADOOP-10910. Increase findbugs maxHeap size. (wang)
+
   BREAKDOWN OF HADOOP-10514 SUBTASKS AND RELATED JIRAS
   BREAKDOWN OF HADOOP-10514 SUBTASKS AND RELATED JIRAS
 
 
     HADOOP-10520. Extended attributes definition and FileSystem APIs for
     HADOOP-10520. Extended attributes definition and FileSystem APIs for

+ 9 - 5
hadoop-common-project/hadoop-common/src/main/bin/hadoop

@@ -35,6 +35,7 @@ function print_usage(){
   echo "  distcp <srcurl> <desturl> copy file or directories recursively"
   echo "  distcp <srcurl> <desturl> copy file or directories recursively"
   echo "  archive -archiveName NAME -p <parent path> <src>* <dest> create a hadoop archive"
   echo "  archive -archiveName NAME -p <parent path> <src>* <dest> create a hadoop archive"
   echo "  classpath            prints the class path needed to get the"
   echo "  classpath            prints the class path needed to get the"
+  echo "  credential           interact with credential providers"
   echo "                       Hadoop jar and the required libraries"
   echo "                       Hadoop jar and the required libraries"
   echo "  daemonlog            get/set the log level for each daemon"
   echo "  daemonlog            get/set the log level for each daemon"
   echo " or"
   echo " or"
@@ -90,11 +91,6 @@ case $COMMAND in
     fi
     fi
     ;;
     ;;
 
 
-  classpath)
-    echo $CLASSPATH
-    exit
-    ;;
-
   #core commands  
   #core commands  
   *)
   *)
     # the core commands
     # the core commands
@@ -118,6 +114,14 @@ case $COMMAND in
       CLASSPATH=${CLASSPATH}:${TOOL_PATH}
       CLASSPATH=${CLASSPATH}:${TOOL_PATH}
     elif [ "$COMMAND" = "credential" ] ; then
     elif [ "$COMMAND" = "credential" ] ; then
       CLASS=org.apache.hadoop.security.alias.CredentialShell
       CLASS=org.apache.hadoop.security.alias.CredentialShell
+    elif [ "$COMMAND" = "classpath" ] ; then
+      if [ "$#" -eq 1 ]; then
+        # No need to bother starting up a JVM for this simple case.
+        echo $CLASSPATH
+        exit
+      else
+        CLASS=org.apache.hadoop.util.Classpath
+      fi
     elif [[ "$COMMAND" = -*  ]] ; then
     elif [[ "$COMMAND" = -*  ]] ; then
         # class and package names cannot begin with a -
         # class and package names cannot begin with a -
         echo "Error: No command named \`$COMMAND' was found. Perhaps you meant \`hadoop ${COMMAND#-}'"
         echo "Error: No command named \`$COMMAND' was found. Perhaps you meant \`hadoop ${COMMAND#-}'"

+ 0 - 2
hadoop-common-project/hadoop-common/src/main/bin/hadoop-config.sh

@@ -149,8 +149,6 @@ if [[ -z $JAVA_HOME ]]; then
 fi
 fi
 
 
 JAVA=$JAVA_HOME/bin/java
 JAVA=$JAVA_HOME/bin/java
-# some Java parameters
-JAVA_HEAP_MAX=-Xmx1000m 
 
 
 # check envvars which might override default args
 # check envvars which might override default args
 if [ "$HADOOP_HEAPSIZE" != "" ]; then
 if [ "$HADOOP_HEAPSIZE" != "" ]; then

+ 10 - 3
hadoop-common-project/hadoop-common/src/main/bin/hadoop.cmd

@@ -115,11 +115,14 @@ call :updatepath %HADOOP_BIN_PATH%
   )
   )
 
 
   if %hadoop-command% == classpath (
   if %hadoop-command% == classpath (
-    @echo %CLASSPATH%
-    goto :eof
+    if not defined hadoop-command-arguments (
+      @rem No need to bother starting up a JVM for this simple case.
+      @echo %CLASSPATH%
+      exit /b
+    )
   )
   )
   
   
-  set corecommands=fs version jar checknative distcp daemonlog archive
+  set corecommands=fs version jar checknative distcp daemonlog archive classpath
   for %%i in ( %corecommands% ) do (
   for %%i in ( %corecommands% ) do (
     if %hadoop-command% == %%i set corecommand=true  
     if %hadoop-command% == %%i set corecommand=true  
   )
   )
@@ -175,6 +178,10 @@ call :updatepath %HADOOP_BIN_PATH%
   set CLASSPATH=%CLASSPATH%;%TOOL_PATH%
   set CLASSPATH=%CLASSPATH%;%TOOL_PATH%
   goto :eof
   goto :eof
 
 
+:classpath
+  set CLASS=org.apache.hadoop.util.Classpath
+  goto :eof
+
 :updatepath
 :updatepath
   set path_to_add=%*
   set path_to_add=%*
   set current_path_comparable=%path%
   set current_path_comparable=%path%

+ 66 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -1843,6 +1843,38 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     return pass;
     return pass;
   }
   }
 
 
+  /**
+   * Get the socket address for <code>hostProperty</code> as a
+   * <code>InetSocketAddress</code>. If <code>hostProperty</code> is
+   * <code>null</code>, <code>addressProperty</code> will be used. This
+   * is useful for cases where we want to differentiate between host
+   * bind address and address clients should use to establish connection.
+   *
+   * @param hostProperty bind host property name.
+   * @param addressProperty address property name.
+   * @param defaultAddressValue the default value
+   * @param defaultPort the default port
+   * @return InetSocketAddress
+   */
+  public InetSocketAddress getSocketAddr(
+      String hostProperty,
+      String addressProperty,
+      String defaultAddressValue,
+      int defaultPort) {
+
+    InetSocketAddress bindAddr = getSocketAddr(
+      addressProperty, defaultAddressValue, defaultPort);
+
+    final String host = get(hostProperty);
+
+    if (host == null || host.isEmpty()) {
+      return bindAddr;
+    }
+
+    return NetUtils.createSocketAddr(
+        host, bindAddr.getPort(), hostProperty);
+  }
+
   /**
   /**
    * Get the socket address for <code>name</code> property as a
    * Get the socket address for <code>name</code> property as a
    * <code>InetSocketAddress</code>.
    * <code>InetSocketAddress</code>.
@@ -1864,6 +1896,40 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   public void setSocketAddr(String name, InetSocketAddress addr) {
   public void setSocketAddr(String name, InetSocketAddress addr) {
     set(name, NetUtils.getHostPortString(addr));
     set(name, NetUtils.getHostPortString(addr));
   }
   }
+
+  /**
+   * Set the socket address a client can use to connect for the
+   * <code>name</code> property as a <code>host:port</code>.  The wildcard
+   * address is replaced with the local host's address. If the host and address
+   * properties are configured the host component of the address will be combined
+   * with the port component of the addr to generate the address.  This is to allow
+   * optional control over which host name is used in multi-home bind-host
+   * cases where a host can have multiple names
+   * @param hostProperty the bind-host configuration name
+   * @param addressProperty the service address configuration name
+   * @param defaultAddressValue the service default address configuration value
+   * @param addr InetSocketAddress of the service listener
+   * @return InetSocketAddress for clients to connect
+   */
+  public InetSocketAddress updateConnectAddr(
+      String hostProperty,
+      String addressProperty,
+      String defaultAddressValue,
+      InetSocketAddress addr) {
+
+    final String host = get(hostProperty);
+    final String connectHostPort = getTrimmed(addressProperty, defaultAddressValue);
+
+    if (host == null || host.isEmpty() || connectHostPort == null || connectHostPort.isEmpty()) {
+      //not our case, fall back to original logic
+      return updateConnectAddr(addressProperty, addr);
+    }
+
+    final String connectHost = connectHostPort.split(":")[0];
+    // Create connect address using client address hostname and server port.
+    return updateConnectAddr(addressProperty, NetUtils.createSocketAddrForHost(
+        connectHost, addr.getPort()));
+  }
   
   
   /**
   /**
    * Set the socket address a client can use to connect for the
    * Set the socket address a client can use to connect for the

+ 10 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java

@@ -21,11 +21,13 @@ package org.apache.hadoop.crypto.key;
 import java.io.IOException;
 import java.io.IOException;
 import java.security.GeneralSecurityException;
 import java.security.GeneralSecurityException;
 import java.security.SecureRandom;
 import java.security.SecureRandom;
+
 import javax.crypto.Cipher;
 import javax.crypto.Cipher;
 import javax.crypto.spec.IvParameterSpec;
 import javax.crypto.spec.IvParameterSpec;
 import javax.crypto.spec.SecretKeySpec;
 import javax.crypto.spec.SecretKeySpec;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 
 
 /**
 /**
@@ -97,7 +99,7 @@ public class KeyProviderCryptoExtension extends
     public static EncryptedKeyVersion createForDecryption(String
     public static EncryptedKeyVersion createForDecryption(String
         encryptionKeyVersionName, byte[] encryptedKeyIv,
         encryptionKeyVersionName, byte[] encryptedKeyIv,
         byte[] encryptedKeyMaterial) {
         byte[] encryptedKeyMaterial) {
-      KeyVersion encryptedKeyVersion = new KeyVersion(null, null,
+      KeyVersion encryptedKeyVersion = new KeyVersion(null, EEK,
           encryptedKeyMaterial);
           encryptedKeyMaterial);
       return new EncryptedKeyVersion(null, encryptionKeyVersionName,
       return new EncryptedKeyVersion(null, encryptionKeyVersionName,
           encryptedKeyIv, encryptedKeyVersion);
           encryptedKeyIv, encryptedKeyVersion);
@@ -258,6 +260,13 @@ public class KeyProviderCryptoExtension extends
           keyProvider.getKeyVersion(encryptionKeyVersionName);
           keyProvider.getKeyVersion(encryptionKeyVersionName);
       Preconditions.checkNotNull(encryptionKey,
       Preconditions.checkNotNull(encryptionKey,
           "KeyVersion name '%s' does not exist", encryptionKeyVersionName);
           "KeyVersion name '%s' does not exist", encryptionKeyVersionName);
+      Preconditions.checkArgument(
+              encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
+                    .equals(KeyProviderCryptoExtension.EEK),
+                "encryptedKey version name must be '%s', is '%s'",
+                KeyProviderCryptoExtension.EEK,
+                encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
+            );
       final byte[] encryptionKeyMaterial = encryptionKey.getMaterial();
       final byte[] encryptionKeyMaterial = encryptionKey.getMaterial();
       // Encryption key IV is determined from encrypted key's IV
       // Encryption key IV is determined from encrypted key's IV
       final byte[] encryptionIV =
       final byte[] encryptionIV =

+ 42 - 42
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyShell.java

@@ -38,9 +38,9 @@ import org.apache.hadoop.util.ToolRunner;
  */
  */
 public class KeyShell extends Configured implements Tool {
 public class KeyShell extends Configured implements Tool {
   final static private String USAGE_PREFIX = "Usage: hadoop key " +
   final static private String USAGE_PREFIX = "Usage: hadoop key " +
-  		"[generic options]\n";
+      "[generic options]\n";
   final static private String COMMANDS =
   final static private String COMMANDS =
-      "   [--help]\n" +
+      "   [-help]\n" +
       "   [" + CreateCommand.USAGE + "]\n" +
       "   [" + CreateCommand.USAGE + "]\n" +
       "   [" + RollCommand.USAGE + "]\n" +
       "   [" + RollCommand.USAGE + "]\n" +
       "   [" + DeleteCommand.USAGE + "]\n" +
       "   [" + DeleteCommand.USAGE + "]\n" +
@@ -90,11 +90,11 @@ public class KeyShell extends Configured implements Tool {
   /**
   /**
    * Parse the command line arguments and initialize the data
    * Parse the command line arguments and initialize the data
    * <pre>
    * <pre>
-   * % hadoop key create keyName [--size size] [--cipher algorithm]
-   *    [--provider providerPath]
-   * % hadoop key roll keyName [--provider providerPath]
+   * % hadoop key create keyName [-size size] [-cipher algorithm]
+   *    [-provider providerPath]
+   * % hadoop key roll keyName [-provider providerPath]
    * % hadoop key list [-provider providerPath]
    * % hadoop key list [-provider providerPath]
-   * % hadoop key delete keyName [--provider providerPath] [-i]
+   * % hadoop key delete keyName [-provider providerPath] [-i]
    * </pre>
    * </pre>
    * @param args Command line arguments.
    * @param args Command line arguments.
    * @return 0 on success, 1 on failure.
    * @return 0 on success, 1 on failure.
@@ -107,47 +107,47 @@ public class KeyShell extends Configured implements Tool {
     for (int i = 0; i < args.length; i++) { // parse command line
     for (int i = 0; i < args.length; i++) { // parse command line
       boolean moreTokens = (i < args.length - 1);
       boolean moreTokens = (i < args.length - 1);
       if (args[i].equals("create")) {
       if (args[i].equals("create")) {
-        String keyName = "--help";
+        String keyName = "-help";
         if (moreTokens) {
         if (moreTokens) {
           keyName = args[++i];
           keyName = args[++i];
         }
         }
 
 
         command = new CreateCommand(keyName, options);
         command = new CreateCommand(keyName, options);
-        if ("--help".equals(keyName)) {
+        if ("-help".equals(keyName)) {
           printKeyShellUsage();
           printKeyShellUsage();
           return 1;
           return 1;
         }
         }
       } else if (args[i].equals("delete")) {
       } else if (args[i].equals("delete")) {
-        String keyName = "--help";
+        String keyName = "-help";
         if (moreTokens) {
         if (moreTokens) {
           keyName = args[++i];
           keyName = args[++i];
         }
         }
 
 
         command = new DeleteCommand(keyName);
         command = new DeleteCommand(keyName);
-        if ("--help".equals(keyName)) {
+        if ("-help".equals(keyName)) {
           printKeyShellUsage();
           printKeyShellUsage();
           return 1;
           return 1;
         }
         }
       } else if (args[i].equals("roll")) {
       } else if (args[i].equals("roll")) {
-        String keyName = "--help";
+        String keyName = "-help";
         if (moreTokens) {
         if (moreTokens) {
           keyName = args[++i];
           keyName = args[++i];
         }
         }
 
 
         command = new RollCommand(keyName);
         command = new RollCommand(keyName);
-        if ("--help".equals(keyName)) {
+        if ("-help".equals(keyName)) {
           printKeyShellUsage();
           printKeyShellUsage();
           return 1;
           return 1;
         }
         }
       } else if ("list".equals(args[i])) {
       } else if ("list".equals(args[i])) {
         command = new ListCommand();
         command = new ListCommand();
-      } else if ("--size".equals(args[i]) && moreTokens) {
+      } else if ("-size".equals(args[i]) && moreTokens) {
         options.setBitLength(Integer.parseInt(args[++i]));
         options.setBitLength(Integer.parseInt(args[++i]));
-      } else if ("--cipher".equals(args[i]) && moreTokens) {
+      } else if ("-cipher".equals(args[i]) && moreTokens) {
         options.setCipher(args[++i]);
         options.setCipher(args[++i]);
-      } else if ("--description".equals(args[i]) && moreTokens) {
+      } else if ("-description".equals(args[i]) && moreTokens) {
         options.setDescription(args[++i]);
         options.setDescription(args[++i]);
-      } else if ("--attr".equals(args[i]) && moreTokens) {
+      } else if ("-attr".equals(args[i]) && moreTokens) {
         final String attrval[] = args[++i].split("=", 2);
         final String attrval[] = args[++i].split("=", 2);
         final String attr = attrval[0].trim();
         final String attr = attrval[0].trim();
         final String val = attrval[1].trim();
         final String val = attrval[1].trim();
@@ -164,14 +164,14 @@ public class KeyShell extends Configured implements Tool {
           return 1;
           return 1;
         }
         }
         attributes.put(attr, val);
         attributes.put(attr, val);
-      } else if ("--provider".equals(args[i]) && moreTokens) {
+      } else if ("-provider".equals(args[i]) && moreTokens) {
         userSuppliedProvider = true;
         userSuppliedProvider = true;
         getConf().set(KeyProviderFactory.KEY_PROVIDER_PATH, args[++i]);
         getConf().set(KeyProviderFactory.KEY_PROVIDER_PATH, args[++i]);
-      } else if ("--metadata".equals(args[i])) {
+      } else if ("-metadata".equals(args[i])) {
         getConf().setBoolean(LIST_METADATA, true);
         getConf().setBoolean(LIST_METADATA, true);
-      } else if ("-i".equals(args[i]) || ("--interactive".equals(args[i]))) {
+      } else if ("-i".equals(args[i]) || ("-interactive".equals(args[i]))) {
         interactive = true;
         interactive = true;
-      } else if ("--help".equals(args[i])) {
+      } else if ("-help".equals(args[i])) {
         printKeyShellUsage();
         printKeyShellUsage();
         return 1;
         return 1;
       } else {
       } else {
@@ -258,11 +258,11 @@ public class KeyShell extends Configured implements Tool {
 
 
   private class ListCommand extends Command {
   private class ListCommand extends Command {
     public static final String USAGE =
     public static final String USAGE =
-        "list [--provider <provider>] [--metadata] [--help]";
+        "list [-provider <provider>] [-metadata] [-help]";
     public static final String DESC =
     public static final String DESC =
         "The list subcommand displays the keynames contained within\n" +
         "The list subcommand displays the keynames contained within\n" +
         "a particular provider as configured in core-site.xml or\n" +
         "a particular provider as configured in core-site.xml or\n" +
-        "specified with the --provider argument. --metadata displays\n" +
+        "specified with the -provider argument. -metadata displays\n" +
         "the metadata.";
         "the metadata.";
 
 
     private boolean metadata = false;
     private boolean metadata = false;
@@ -272,9 +272,9 @@ public class KeyShell extends Configured implements Tool {
       provider = getKeyProvider();
       provider = getKeyProvider();
       if (provider == null) {
       if (provider == null) {
         out.println("There are no non-transient KeyProviders configured.\n"
         out.println("There are no non-transient KeyProviders configured.\n"
-          + "Use the --provider option to specify a provider. If you\n"
+          + "Use the -provider option to specify a provider. If you\n"
           + "want to list a transient provider then you must use the\n"
           + "want to list a transient provider then you must use the\n"
-          + "--provider argument.");
+          + "-provider argument.");
         rc = false;
         rc = false;
       }
       }
       metadata = getConf().getBoolean(LIST_METADATA, false);
       metadata = getConf().getBoolean(LIST_METADATA, false);
@@ -310,10 +310,10 @@ public class KeyShell extends Configured implements Tool {
   }
   }
 
 
   private class RollCommand extends Command {
   private class RollCommand extends Command {
-    public static final String USAGE = "roll <keyname> [--provider <provider>] [--help]";
+    public static final String USAGE = "roll <keyname> [-provider <provider>] [-help]";
     public static final String DESC =
     public static final String DESC =
       "The roll subcommand creates a new version for the specified key\n" +
       "The roll subcommand creates a new version for the specified key\n" +
-      "within the provider indicated using the --provider argument\n";
+      "within the provider indicated using the -provider argument\n";
 
 
     String keyName = null;
     String keyName = null;
 
 
@@ -326,13 +326,13 @@ public class KeyShell extends Configured implements Tool {
       provider = getKeyProvider();
       provider = getKeyProvider();
       if (provider == null) {
       if (provider == null) {
         out.println("There are no valid KeyProviders configured. The key\n" +
         out.println("There are no valid KeyProviders configured. The key\n" +
-          "has not been rolled. Use the --provider option to specify\n" +
+          "has not been rolled. Use the -provider option to specify\n" +
           "a provider.");
           "a provider.");
         rc = false;
         rc = false;
       }
       }
       if (keyName == null) {
       if (keyName == null) {
         out.println("Please provide a <keyname>.\n" +
         out.println("Please provide a <keyname>.\n" +
-          "See the usage description by using --help.");
+          "See the usage description by using -help.");
         rc = false;
         rc = false;
       }
       }
       return rc;
       return rc;
@@ -367,11 +367,11 @@ public class KeyShell extends Configured implements Tool {
   }
   }
 
 
   private class DeleteCommand extends Command {
   private class DeleteCommand extends Command {
-    public static final String USAGE = "delete <keyname> [--provider <provider>] [--help]";
+    public static final String USAGE = "delete <keyname> [-provider <provider>] [-help]";
     public static final String DESC =
     public static final String DESC =
         "The delete subcommand deletes all versions of the key\n" +
         "The delete subcommand deletes all versions of the key\n" +
         "specified by the <keyname> argument from within the\n" +
         "specified by the <keyname> argument from within the\n" +
-        "provider specified --provider.";
+        "provider specified -provider.";
 
 
     String keyName = null;
     String keyName = null;
     boolean cont = true;
     boolean cont = true;
@@ -385,12 +385,12 @@ public class KeyShell extends Configured implements Tool {
       provider = getKeyProvider();
       provider = getKeyProvider();
       if (provider == null) {
       if (provider == null) {
         out.println("There are no valid KeyProviders configured. Nothing\n"
         out.println("There are no valid KeyProviders configured. Nothing\n"
-          + "was deleted. Use the --provider option to specify a provider.");
+          + "was deleted. Use the -provider option to specify a provider.");
         return false;
         return false;
       }
       }
       if (keyName == null) {
       if (keyName == null) {
         out.println("There is no keyName specified. Please specify a " +
         out.println("There is no keyName specified. Please specify a " +
-            "<keyname>. See the usage description with --help.");
+            "<keyname>. See the usage description with -help.");
         return false;
         return false;
       }
       }
       if (interactive) {
       if (interactive) {
@@ -436,19 +436,19 @@ public class KeyShell extends Configured implements Tool {
 
 
   private class CreateCommand extends Command {
   private class CreateCommand extends Command {
     public static final String USAGE =
     public static final String USAGE =
-      "create <keyname> [--cipher <cipher>] [--size <size>]\n" +
-      "                     [--description <description>]\n" +
-      "                     [--attr <attribute=value>]\n" +
-      "                     [--provider <provider>] [--help]";
+      "create <keyname> [-cipher <cipher>] [-size <size>]\n" +
+      "                     [-description <description>]\n" +
+      "                     [-attr <attribute=value>]\n" +
+      "                     [-provider <provider>] [-help]";
     public static final String DESC =
     public static final String DESC =
       "The create subcommand creates a new key for the name specified\n" +
       "The create subcommand creates a new key for the name specified\n" +
       "by the <keyname> argument within the provider specified by the\n" +
       "by the <keyname> argument within the provider specified by the\n" +
-      "--provider argument. You may specify a cipher with the --cipher\n" +
+      "-provider argument. You may specify a cipher with the -cipher\n" +
       "argument. The default cipher is currently \"AES/CTR/NoPadding\".\n" +
       "argument. The default cipher is currently \"AES/CTR/NoPadding\".\n" +
       "The default keysize is 256. You may specify the requested key\n" +
       "The default keysize is 256. You may specify the requested key\n" +
-      "length using the --size argument. Arbitrary attribute=value\n" +
-      "style attributes may be specified using the --attr argument.\n" +
-      "--attr may be specified multiple times, once per attribute.\n";
+      "length using the -size argument. Arbitrary attribute=value\n" +
+      "style attributes may be specified using the -attr argument.\n" +
+      "-attr may be specified multiple times, once per attribute.\n";
 
 
     final String keyName;
     final String keyName;
     final Options options;
     final Options options;
@@ -463,13 +463,13 @@ public class KeyShell extends Configured implements Tool {
       provider = getKeyProvider();
       provider = getKeyProvider();
       if (provider == null) {
       if (provider == null) {
         out.println("There are no valid KeyProviders configured. No key\n" +
         out.println("There are no valid KeyProviders configured. No key\n" +
-          " was created. You can use the --provider option to specify\n" +
+          " was created. You can use the -provider option to specify\n" +
           " a provider to use.");
           " a provider to use.");
         rc = false;
         rc = false;
       }
       }
       if (keyName == null) {
       if (keyName == null) {
         out.println("Please provide a <keyname>. See the usage description" +
         out.println("Please provide a <keyname>. See the usage description" +
-          " with --help.");
+          " with -help.");
         rc = false;
         rc = false;
       }
       }
       return rc;
       return rc;

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java

@@ -653,7 +653,7 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension {
         encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
         encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
             .equals(KeyProviderCryptoExtension.EEK),
             .equals(KeyProviderCryptoExtension.EEK),
         "encryptedKey version name must be '%s', is '%s'",
         "encryptedKey version name must be '%s', is '%s'",
-        KeyProviderCryptoExtension.EK,
+        KeyProviderCryptoExtension.EEK,
         encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
         encryptedKeyVersion.getEncryptedKeyVersion().getVersionName()
     );
     );
     checkNotNull(encryptedKeyVersion.getEncryptedKeyVersion(), "encryptedKey");
     checkNotNull(encryptedKeyVersion.getEncryptedKeyVersion(), "encryptedKey");

+ 13 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java

@@ -43,6 +43,7 @@ import org.apache.hadoop.fs.Options.CreateOpts;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
@@ -803,6 +804,18 @@ public abstract class AbstractFileSystem {
       throws AccessControlException, FileNotFoundException,
       throws AccessControlException, FileNotFoundException,
       UnresolvedLinkException, IOException;
       UnresolvedLinkException, IOException;
 
 
+  /**
+   * The specification of this method matches that of
+   * {@link FileContext#access(Path, FsAction)}
+   * except that an UnresolvedLinkException may be thrown if a symlink is
+   * encountered in the path.
+   */
+  @InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, UnresolvedLinkException, IOException {
+    FileSystem.checkAccessPermissions(this.getFileStatus(path), mode);
+  }
+
   /**
   /**
    * The specification of this method matches that of
    * The specification of this method matches that of
    * {@link FileContext#getFileLinkStatus(Path)}
    * {@link FileContext#getFileLinkStatus(Path)}

+ 50 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java

@@ -44,6 +44,7 @@ import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.Options.CreateOpts;
 import org.apache.hadoop.fs.Options.CreateOpts;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT;
@@ -1108,6 +1109,55 @@ public final class FileContext {
     }.resolve(this, absF);
     }.resolve(this, absF);
   }
   }
 
 
+  /**
+   * Checks if the user can access a path.  The mode specifies which access
+   * checks to perform.  If the requested permissions are granted, then the
+   * method returns normally.  If access is denied, then the method throws an
+   * {@link AccessControlException}.
+   * <p/>
+   * The default implementation of this method calls {@link #getFileStatus(Path)}
+   * and checks the returned permissions against the requested permissions.
+   * Note that the getFileStatus call will be subject to authorization checks.
+   * Typically, this requires search (execute) permissions on each directory in
+   * the path's prefix, but this is implementation-defined.  Any file system
+   * that provides a richer authorization model (such as ACLs) may override the
+   * default implementation so that it checks against that model instead.
+   * <p>
+   * In general, applications should avoid using this method, due to the risk of
+   * time-of-check/time-of-use race conditions.  The permissions on a file may
+   * change immediately after the access call returns.  Most applications should
+   * prefer running specific file system actions as the desired user represented
+   * by a {@link UserGroupInformation}.
+   *
+   * @param path Path to check
+   * @param mode type of access to check
+   * @throws AccessControlException if access is denied
+   * @throws FileNotFoundException if the path does not exist
+   * @throws UnsupportedFileSystemException if file system for <code>path</code>
+   *   is not supported
+   * @throws IOException see specific implementation
+   * 
+   * Exceptions applicable to file systems accessed over RPC:
+   * @throws RpcClientException If an exception occurred in the RPC client
+   * @throws RpcServerException If an exception occurred in the RPC server
+   * @throws UnexpectedServerException If server implementation throws 
+   *           undeclared exception to RPC server
+   */
+  @InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
+  public void access(final Path path, final FsAction mode)
+      throws AccessControlException, FileNotFoundException,
+      UnsupportedFileSystemException, IOException {
+    final Path absPath = fixRelativePart(path);
+    new FSLinkResolver<Void>() {
+      @Override
+      public Void next(AbstractFileSystem fs, Path p) throws IOException,
+          UnresolvedLinkException {
+        fs.access(p, mode);
+        return null;
+      }
+    }.resolve(this, absPath);
+  }
+
   /**
   /**
    * Return a file status object that represents the path. If the path 
    * Return a file status object that represents the path. If the path 
    * refers to a symlink then the FileStatus of the symlink is returned.
    * refers to a symlink then the FileStatus of the symlink is returned.

+ 67 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -25,6 +25,7 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 import java.security.PrivilegedExceptionAction;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.HashSet;
@@ -50,6 +51,7 @@ import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
@@ -2072,6 +2074,71 @@ public abstract class FileSystem extends Configured implements Closeable {
    */
    */
   public abstract FileStatus getFileStatus(Path f) throws IOException;
   public abstract FileStatus getFileStatus(Path f) throws IOException;
 
 
+  /**
+   * Checks if the user can access a path.  The mode specifies which access
+   * checks to perform.  If the requested permissions are granted, then the
+   * method returns normally.  If access is denied, then the method throws an
+   * {@link AccessControlException}.
+   * <p/>
+   * The default implementation of this method calls {@link #getFileStatus(Path)}
+   * and checks the returned permissions against the requested permissions.
+   * Note that the getFileStatus call will be subject to authorization checks.
+   * Typically, this requires search (execute) permissions on each directory in
+   * the path's prefix, but this is implementation-defined.  Any file system
+   * that provides a richer authorization model (such as ACLs) may override the
+   * default implementation so that it checks against that model instead.
+   * <p>
+   * In general, applications should avoid using this method, due to the risk of
+   * time-of-check/time-of-use race conditions.  The permissions on a file may
+   * change immediately after the access call returns.  Most applications should
+   * prefer running specific file system actions as the desired user represented
+   * by a {@link UserGroupInformation}.
+   *
+   * @param path Path to check
+   * @param mode type of access to check
+   * @throws AccessControlException if access is denied
+   * @throws FileNotFoundException if the path does not exist
+   * @throws IOException see specific implementation
+   */
+  @InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, IOException {
+    checkAccessPermissions(this.getFileStatus(path), mode);
+  }
+
+  /**
+   * This method provides the default implementation of
+   * {@link #access(Path, FsAction)}.
+   *
+   * @param stat FileStatus to check
+   * @param mode type of access to check
+   * @throws IOException for any error
+   */
+  @InterfaceAudience.Private
+  static void checkAccessPermissions(FileStatus stat, FsAction mode)
+      throws IOException {
+    FsPermission perm = stat.getPermission();
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+    String user = ugi.getShortUserName();
+    List<String> groups = Arrays.asList(ugi.getGroupNames());
+    if (user.equals(stat.getOwner())) {
+      if (perm.getUserAction().implies(mode)) {
+        return;
+      }
+    } else if (groups.contains(stat.getGroup())) {
+      if (perm.getGroupAction().implies(mode)) {
+        return;
+      }
+    } else {
+      if (perm.getOtherAction().implies(mode)) {
+        return;
+      }
+    }
+    throw new AccessControlException(String.format(
+      "Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user, stat.getPath(),
+      stat.getOwner(), stat.getGroup(), stat.isDirectory() ? "d" : "-", perm));
+  }
+
   /**
   /**
    * See {@link FileContext#fixRelativePart}
    * See {@link FileContext#fixRelativePart}
    */
    */

+ 7 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java

@@ -30,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
@@ -397,6 +398,12 @@ public class FilterFileSystem extends FileSystem {
     return fs.getFileStatus(f);
     return fs.getFileStatus(f);
   }
   }
 
 
+  @Override
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, IOException {
+    fs.access(path, mode);
+  }
+
   public void createSymlink(final Path target, final Path link,
   public void createSymlink(final Path target, final Path link,
       final boolean createParent) throws AccessControlException,
       final boolean createParent) throws AccessControlException,
       FileAlreadyExistsException, FileNotFoundException,
       FileAlreadyExistsException, FileNotFoundException,

+ 8 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java

@@ -29,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
@@ -119,6 +120,13 @@ public abstract class FilterFs extends AbstractFileSystem {
     return myFs.getFileStatus(f);
     return myFs.getFileStatus(f);
   }
   }
 
 
+  @Override
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, UnresolvedLinkException, IOException {
+    checkPath(path);
+    myFs.access(path, mode);
+  }
+
   @Override
   @Override
   public FileStatus getFileLinkStatus(final Path f) 
   public FileStatus getFileLinkStatus(final Path f) 
     throws IOException, UnresolvedLinkException {
     throws IOException, UnresolvedLinkException {

+ 5 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java

@@ -118,7 +118,11 @@ class Delete {
         } catch(FileNotFoundException fnfe) {
         } catch(FileNotFoundException fnfe) {
           throw fnfe;
           throw fnfe;
         } catch (IOException ioe) {
         } catch (IOException ioe) {
-            throw new IOException(ioe.getMessage() + ". Consider using -skipTrash option", ioe);
+          String msg = ioe.getMessage();
+          if (ioe.getCause() != null) {
+            msg += ": " + ioe.getCause().getMessage();
+	  }
+          throw new IOException(msg + ". Consider using -skipTrash option", ioe);
         }
         }
       }
       }
       return success;
       return success;

+ 8 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java

@@ -41,7 +41,9 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 
 
 /**
 /**
@@ -222,6 +224,12 @@ class ChRootedFileSystem extends FilterFileSystem {
     return super.getFileStatus(fullPath(f));
     return super.getFileStatus(fullPath(f));
   }
   }
 
 
+  @Override
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, IOException {
+    super.access(fullPath(path), mode);
+  }
+
   @Override
   @Override
   public FsStatus getStatus(Path p) throws IOException {
   public FsStatus getStatus(Path p) throws IOException {
     return super.getStatus(fullPath(p));
     return super.getStatus(fullPath(p));

+ 7 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java

@@ -41,7 +41,9 @@ import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 
 
@@ -200,6 +202,11 @@ class ChRootedFs extends AbstractFileSystem {
     return myFs.getFileStatus(fullPath(f));
     return myFs.getFileStatus(fullPath(f));
   }
   }
 
 
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, UnresolvedLinkException, IOException {
+    myFs.access(fullPath(path), mode);
+  }
+
   @Override
   @Override
   public FileStatus getFileLinkStatus(final Path f) 
   public FileStatus getFileLinkStatus(final Path f) 
     throws IOException, UnresolvedLinkException {
     throws IOException, UnresolvedLinkException {

+ 9 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java

@@ -51,6 +51,7 @@ import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclUtil;
 import org.apache.hadoop.fs.permission.AclUtil;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.viewfs.InodeTree.INode;
 import org.apache.hadoop.fs.viewfs.InodeTree.INode;
 import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
 import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
@@ -359,7 +360,14 @@ public class ViewFileSystem extends FileSystem {
     return new ViewFsFileStatus(status, this.makeQualified(f));
     return new ViewFsFileStatus(status, this.makeQualified(f));
   }
   }
   
   
-  
+  @Override
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, IOException {
+    InodeTree.ResolveResult<FileSystem> res =
+      fsState.resolve(getUriPath(path), true);
+    res.targetFileSystem.access(res.remainingPath, mode);
+  }
+
   @Override
   @Override
   public FileStatus[] listStatus(final Path f) throws AccessControlException,
   public FileStatus[] listStatus(final Path f) throws AccessControlException,
       FileNotFoundException, IOException {
       FileNotFoundException, IOException {

+ 9 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java

@@ -54,6 +54,7 @@ import org.apache.hadoop.fs.local.LocalConfigKeys;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclUtil;
 import org.apache.hadoop.fs.permission.AclUtil;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.viewfs.InodeTree.INode;
 import org.apache.hadoop.fs.viewfs.InodeTree.INode;
 import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
 import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
@@ -352,6 +353,14 @@ public class ViewFs extends AbstractFileSystem {
     return new ViewFsFileStatus(status, this.makeQualified(f));
     return new ViewFsFileStatus(status, this.makeQualified(f));
   }
   }
 
 
+  @Override
+  public void access(Path path, FsAction mode) throws AccessControlException,
+      FileNotFoundException, UnresolvedLinkException, IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res =
+      fsState.resolve(getUriPath(path), true);
+    res.targetFileSystem.access(res.remainingPath, mode);
+  }
+
   @Override
   @Override
   public FileStatus getFileLinkStatus(final Path f)
   public FileStatus getFileLinkStatus(final Path f)
      throws AccessControlException, FileNotFoundException,
      throws AccessControlException, FileNotFoundException,

+ 12 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/nativeio/NativeIO.java

@@ -33,6 +33,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.HardLink;
 import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException;
 import org.apache.hadoop.io.SecureIOUtils.AlreadyExistsException;
 import org.apache.hadoop.util.NativeCodeLoader;
 import org.apache.hadoop.util.NativeCodeLoader;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell;
@@ -823,6 +824,14 @@ public class NativeIO {
     }
     }
   }
   }
 
 
+  public static void link(File src, File dst) throws IOException {
+    if (!nativeLoaded) {
+      HardLink.createHardLink(src, dst);
+    } else {
+      link0(src.getAbsolutePath(), dst.getAbsolutePath());
+    }
+  }
+
   /**
   /**
    * A version of renameTo that throws a descriptive exception when it fails.
    * A version of renameTo that throws a descriptive exception when it fails.
    *
    *
@@ -833,4 +842,7 @@ public class NativeIO {
    */
    */
   private static native void renameTo0(String src, String dst)
   private static native void renameTo0(String src, String dst)
       throws NativeIOException;
       throws NativeIOException;
+
+  private static native void link0(String src, String dst)
+      throws NativeIOException;
 }
 }

+ 2 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java

@@ -77,7 +77,8 @@ public class SecurityUtil {
    * For use only by tests and initialization
    * For use only by tests and initialization
    */
    */
   @InterfaceAudience.Private
   @InterfaceAudience.Private
-  static void setTokenServiceUseIp(boolean flag) {
+  @VisibleForTesting
+  public static void setTokenServiceUseIp(boolean flag) {
     useIpForTokenService = flag;
     useIpForTokenService = flag;
     hostResolver = !useIpForTokenService
     hostResolver = !useIpForTokenService
         ? new QualifiedHostResolver()
         ? new QualifiedHostResolver()

+ 36 - 29
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/CredentialShell.java

@@ -67,11 +67,11 @@ public class CredentialShell extends Configured implements Tool {
       if (command.validate()) {
       if (command.validate()) {
           command.execute();
           command.execute();
       } else {
       } else {
-        exitCode = -1;
+        exitCode = 1;
       }
       }
     } catch (Exception e) {
     } catch (Exception e) {
       e.printStackTrace(err);
       e.printStackTrace(err);
-      return -1;
+      return 1;
     }
     }
     return exitCode;
     return exitCode;
   }
   }
@@ -79,47 +79,54 @@ public class CredentialShell extends Configured implements Tool {
   /**
   /**
    * Parse the command line arguments and initialize the data
    * Parse the command line arguments and initialize the data
    * <pre>
    * <pre>
-   * % hadoop alias create alias [--provider providerPath]
-   * % hadoop alias list [-provider providerPath]
-   * % hadoop alias delete alias [--provider providerPath] [-i]
+   * % hadoop credential create alias [-provider providerPath]
+   * % hadoop credential list [-provider providerPath]
+   * % hadoop credential delete alias [-provider providerPath] [-i]
    * </pre>
    * </pre>
    * @param args
    * @param args
-   * @return
+   * @return 0 if the argument(s) were recognized, 1 otherwise
    * @throws IOException
    * @throws IOException
    */
    */
-  private int init(String[] args) throws IOException {
+  protected int init(String[] args) throws IOException {
+    // no args should print the help message
+    if (0 == args.length) {
+      printCredShellUsage();
+      ToolRunner.printGenericCommandUsage(System.err);
+      return 1;
+    }
+
     for (int i = 0; i < args.length; i++) { // parse command line
     for (int i = 0; i < args.length; i++) { // parse command line
       if (args[i].equals("create")) {
       if (args[i].equals("create")) {
         String alias = args[++i];
         String alias = args[++i];
         command = new CreateCommand(alias);
         command = new CreateCommand(alias);
-        if (alias.equals("--help")) {
+        if (alias.equals("-help")) {
           printCredShellUsage();
           printCredShellUsage();
-          return -1;
+          return 0;
         }
         }
       } else if (args[i].equals("delete")) {
       } else if (args[i].equals("delete")) {
         String alias = args[++i];
         String alias = args[++i];
         command = new DeleteCommand(alias);
         command = new DeleteCommand(alias);
-        if (alias.equals("--help")) {
+        if (alias.equals("-help")) {
           printCredShellUsage();
           printCredShellUsage();
-          return -1;
+          return 0;
         }
         }
       } else if (args[i].equals("list")) {
       } else if (args[i].equals("list")) {
         command = new ListCommand();
         command = new ListCommand();
-      } else if (args[i].equals("--provider")) {
+      } else if (args[i].equals("-provider")) {
         userSuppliedProvider = true;
         userSuppliedProvider = true;
         getConf().set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, 
         getConf().set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, 
             args[++i]);
             args[++i]);
-      } else if (args[i].equals("-i") || (args[i].equals("--interactive"))) {
+      } else if (args[i].equals("-i") || (args[i].equals("-interactive"))) {
         interactive = true;
         interactive = true;
-      } else if (args[i].equals("-v") || (args[i].equals("--value"))) {
+      } else if (args[i].equals("-v") || (args[i].equals("-value"))) {
         value = args[++i];
         value = args[++i];
-      } else if (args[i].equals("--help")) {
+      } else if (args[i].equals("-help")) {
         printCredShellUsage();
         printCredShellUsage();
-        return -1;
+        return 0;
       } else {
       } else {
         printCredShellUsage();
         printCredShellUsage();
         ToolRunner.printGenericCommandUsage(System.err);
         ToolRunner.printGenericCommandUsage(System.err);
-        return -1;
+        return 1;
       }
       }
     }
     }
     return 0;
     return 0;
@@ -188,20 +195,20 @@ public class CredentialShell extends Configured implements Tool {
   }
   }
 
 
   private class ListCommand extends Command {
   private class ListCommand extends Command {
-    public static final String USAGE = "list <alias> [--provider] [--help]";
+    public static final String USAGE = "list [-provider] [-help]";
     public static final String DESC =
     public static final String DESC =
         "The list subcommand displays the aliases contained within \n" +
         "The list subcommand displays the aliases contained within \n" +
         "a particular provider - as configured in core-site.xml or " +
         "a particular provider - as configured in core-site.xml or " +
-        "indicated\nthrough the --provider argument.";
+        "indicated\nthrough the -provider argument.";
 
 
     public boolean validate() {
     public boolean validate() {
       boolean rc = true;
       boolean rc = true;
       provider = getCredentialProvider();
       provider = getCredentialProvider();
       if (provider == null) {
       if (provider == null) {
         out.println("There are no non-transient CredentialProviders configured.\n"
         out.println("There are no non-transient CredentialProviders configured.\n"
-            + "Consider using the --provider option to indicate the provider\n"
+            + "Consider using the -provider option to indicate the provider\n"
             + "to use. If you want to list a transient provider then you\n"
             + "to use. If you want to list a transient provider then you\n"
-            + "you MUST use the --provider argument.");
+            + "you MUST use the -provider argument.");
         rc = false;
         rc = false;
       }
       }
       return rc;
       return rc;
@@ -229,11 +236,11 @@ public class CredentialShell extends Configured implements Tool {
   }
   }
 
 
   private class DeleteCommand extends Command {
   private class DeleteCommand extends Command {
-    public static final String USAGE = "delete <alias> [--provider] [--help]";
+    public static final String USAGE = "delete <alias> [-provider] [-help]";
     public static final String DESC =
     public static final String DESC =
         "The delete subcommand deletes the credenital\n" +
         "The delete subcommand deletes the credenital\n" +
         "specified as the <alias> argument from within the provider\n" +
         "specified as the <alias> argument from within the provider\n" +
-        "indicated through the --provider argument";
+        "indicated through the -provider argument";
 
 
     String alias = null;
     String alias = null;
     boolean cont = true;
     boolean cont = true;
@@ -248,13 +255,13 @@ public class CredentialShell extends Configured implements Tool {
       if (provider == null) {
       if (provider == null) {
         out.println("There are no valid CredentialProviders configured.\n"
         out.println("There are no valid CredentialProviders configured.\n"
             + "Nothing will be deleted.\n"
             + "Nothing will be deleted.\n"
-            + "Consider using the --provider option to indicate the provider"
+            + "Consider using the -provider option to indicate the provider"
             + " to use.");
             + " to use.");
         return false;
         return false;
       }
       }
       if (alias == null) {
       if (alias == null) {
         out.println("There is no alias specified. Please provide the" +
         out.println("There is no alias specified. Please provide the" +
-            "mandatory <alias>. See the usage description with --help.");
+            "mandatory <alias>. See the usage description with -help.");
         return false;
         return false;
       }
       }
       if (interactive) {
       if (interactive) {
@@ -299,11 +306,11 @@ public class CredentialShell extends Configured implements Tool {
   }
   }
 
 
   private class CreateCommand extends Command {
   private class CreateCommand extends Command {
-    public static final String USAGE = "create <alias> [--provider] [--help]";
+    public static final String USAGE = "create <alias> [-provider] [-help]";
     public static final String DESC =
     public static final String DESC =
         "The create subcommand creates a new credential for the name specified\n" +
         "The create subcommand creates a new credential for the name specified\n" +
         "as the <alias> argument within the provider indicated through\n" +
         "as the <alias> argument within the provider indicated through\n" +
-        "the --provider argument.";
+        "the -provider argument.";
 
 
     String alias = null;
     String alias = null;
 
 
@@ -317,13 +324,13 @@ public class CredentialShell extends Configured implements Tool {
       if (provider == null) {
       if (provider == null) {
         out.println("There are no valid CredentialProviders configured." +
         out.println("There are no valid CredentialProviders configured." +
         		"\nCredential will not be created.\n"
         		"\nCredential will not be created.\n"
-            + "Consider using the --provider option to indicate the provider" +
+            + "Consider using the -provider option to indicate the provider" +
             " to use.");
             " to use.");
         rc = false;
         rc = false;
       }
       }
       if (alias == null) {
       if (alias == null) {
         out.println("There is no alias specified. Please provide the" +
         out.println("There is no alias specified. Please provide the" +
-        		"mandatory <alias>. See the usage description with --help.");
+            "mandatory <alias>. See the usage description with -help.");
         rc = false;
         rc = false;
       }
       }
       return rc;
       return rc;

+ 125 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Classpath.java

@@ -0,0 +1,125 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.shell.CommandFormat;
+import org.apache.hadoop.fs.shell.CommandFormat.UnknownOptionException;
+
+/**
+ * Command-line utility for getting the full classpath needed to launch a Hadoop
+ * client application.  If the hadoop script is called with "classpath" as the
+ * command, then it simply prints the classpath and exits immediately without
+ * launching a JVM.  The output likely will include wildcards in the classpath.
+ * If there are arguments passed to the classpath command, then this class gets
+ * called.  With the --glob argument, it prints the full classpath with wildcards
+ * expanded.  This is useful in situations where wildcard syntax isn't usable.
+ * With the --jar argument, it writes the classpath as a manifest in a jar file.
+ * This is useful in environments with short limitations on the maximum command
+ * line length, where it may not be possible to specify the full classpath in a
+ * command.  For example, the maximum command line length on Windows is 8191
+ * characters.
+ */
+@InterfaceAudience.Private
+public final class Classpath {
+  private static final String usage =
+    "classpath [--glob|--jar <path>|-h|--help] :\n"
+    + "  Prints the classpath needed to get the Hadoop jar and the required\n"
+    + "  libraries.\n"
+    + "  Options:\n"
+    + "\n"
+    + "  --glob       expand wildcards\n"
+    + "  --jar <path> write classpath as manifest in jar named <path>\n"
+    + "  -h, --help   print help\n";
+
+  /**
+   * Main entry point.
+   *
+   * @param args command-line arguments
+   */
+  public static void main(String[] args) {
+    if (args.length < 1 || args[0].equals("-h") || args[0].equals("--help")) {
+      System.out.println(usage);
+      return;
+    }
+
+    // Copy args, because CommandFormat mutates the list.
+    List<String> argsList = new ArrayList<String>(Arrays.asList(args));
+    CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "-glob", "-jar");
+    try {
+      cf.parse(argsList);
+    } catch (UnknownOptionException e) {
+      terminate(1, "unrecognized option");
+      return;
+    }
+
+    String classPath = System.getProperty("java.class.path");
+
+    if (cf.getOpt("-glob")) {
+      // The classpath returned from the property has been globbed already.
+      System.out.println(classPath);
+    } else if (cf.getOpt("-jar")) {
+      if (argsList.isEmpty() || argsList.get(0) == null ||
+          argsList.get(0).isEmpty()) {
+        terminate(1, "-jar option requires path of jar file to write");
+        return;
+      }
+
+      // Write the classpath into the manifest of a temporary jar file.
+      Path workingDir = new Path(System.getProperty("user.dir"));
+      final String tmpJarPath;
+      try {
+        tmpJarPath = FileUtil.createJarWithClassPath(classPath, workingDir,
+          System.getenv());
+      } catch (IOException e) {
+        terminate(1, "I/O error creating jar: " + e.getMessage());
+        return;
+      }
+
+      // Rename the temporary file to its final location.
+      String jarPath = argsList.get(0);
+      try {
+        FileUtil.replaceFile(new File(tmpJarPath), new File(jarPath));
+      } catch (IOException e) {
+        terminate(1, "I/O error renaming jar temporary file to path: " +
+          e.getMessage());
+        return;
+      }
+    }
+  }
+
+  /**
+   * Prints a message to stderr and exits with a status code.
+   *
+   * @param status exit code
+   * @param msg message
+   */
+  private static void terminate(int status, String msg) {
+    System.err.println(msg);
+    ExitUtil.terminate(status, msg);
+  }
+}

+ 14 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DiskChecker.java

@@ -78,6 +78,20 @@ public class DiskChecker {
            (mkdirsWithExistsCheck(new File(parent)) &&
            (mkdirsWithExistsCheck(new File(parent)) &&
                                       (canonDir.mkdir() || canonDir.exists()));
                                       (canonDir.mkdir() || canonDir.exists()));
   }
   }
+
+  /**
+   * Recurse down a directory tree, checking all child directories.
+   * @param dir
+   * @throws DiskErrorException
+   */
+  public static void checkDirs(File dir) throws DiskErrorException {
+    checkDir(dir);
+    for (File child : dir.listFiles()) {
+      if (child.isDirectory()) {
+        checkDirs(child);
+      }
+    }
+  }
   
   
   /**
   /**
    * Create the directory if it doesn't exist and check that dir is readable,
    * Create the directory if it doesn't exist and check that dir is readable,

+ 14 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java

@@ -27,6 +27,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collection;
 import java.util.Date;
 import java.util.Date;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Iterator;
 import java.util.LinkedHashSet;
 import java.util.LinkedHashSet;
 import java.util.List;
 import java.util.List;
@@ -377,6 +378,19 @@ public class StringUtils {
     return str.trim().split("\\s*,\\s*");
     return str.trim().split("\\s*,\\s*");
   }
   }
 
 
+  /**
+   * Trims all the strings in a Collection<String> and returns a Set<String>.
+   * @param strings
+   * @return
+   */
+  public static Set<String> getTrimmedStrings(Collection<String> strings) {
+    Set<String> trimmedStrings = new HashSet<String>();
+    for (String string: strings) {
+      trimmedStrings.add(string.trim());
+    }
+    return trimmedStrings;
+  }
+
   final public static String[] emptyStringArray = {};
   final public static String[] emptyStringArray = {};
   final public static char COMMA = ',';
   final public static char COMMA = ',';
   final public static String COMMA_STR = ",";
   final public static String COMMA_STR = ",";

+ 37 - 0
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c

@@ -1054,6 +1054,43 @@ done:
 #endif
 #endif
 }
 }
 
 
+JNIEXPORT void JNICALL
+Java_org_apache_hadoop_io_nativeio_NativeIO_link0(JNIEnv *env,
+jclass clazz, jstring jsrc, jstring jdst)
+{
+#ifdef UNIX
+  const char *src = NULL, *dst = NULL;
+
+  src = (*env)->GetStringUTFChars(env, jsrc, NULL);
+  if (!src) goto done; // exception was thrown
+  dst = (*env)->GetStringUTFChars(env, jdst, NULL);
+  if (!dst) goto done; // exception was thrown
+  if (link(src, dst)) {
+    throw_ioe(env, errno);
+  }
+
+done:
+  if (src) (*env)->ReleaseStringUTFChars(env, jsrc, src);
+  if (dst) (*env)->ReleaseStringUTFChars(env, jdst, dst);
+#endif
+
+#ifdef WINDOWS
+  LPCTSTR src = NULL, dst = NULL;
+
+  src = (LPCTSTR) (*env)->GetStringChars(env, jsrc, NULL);
+  if (!src) goto done; // exception was thrown
+  dst = (LPCTSTR) (*env)->GetStringChars(env, jdst, NULL);
+  if (!dst) goto done; // exception was thrown
+  if (!CreateHardLink(dst, src, NULL)) {
+    throw_ioe(env, GetLastError());
+  }
+
+done:
+  if (src) (*env)->ReleaseStringChars(env, jsrc, src);
+  if (dst) (*env)->ReleaseStringChars(env, jdst, dst);
+#endif
+}
+
 JNIEXPORT jlong JNICALL
 JNIEXPORT jlong JNICALL
 Java_org_apache_hadoop_io_nativeio_NativeIO_getMemlockLimit0(
 Java_org_apache_hadoop_io_nativeio_NativeIO_getMemlockLimit0(
 JNIEnv *env, jclass clazz)
 JNIEnv *env, jclass clazz)

+ 17 - 2
hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm

@@ -296,9 +296,24 @@ User Commands
 * <<<classpath>>>
 * <<<classpath>>>
 
 
    Prints the class path needed to get the Hadoop jar and the required
    Prints the class path needed to get the Hadoop jar and the required
-   libraries.
+   libraries.  If called without arguments, then prints the classpath set up by
+   the command scripts, which is likely to contain wildcards in the classpath
+   entries.  Additional options print the classpath after wildcard expansion or
+   write the classpath into the manifest of a jar file.  The latter is useful in
+   environments where wildcards cannot be used and the expanded classpath exceeds
+   the maximum supported command line length.
 
 
-   Usage: <<<hadoop classpath>>>
+   Usage: <<<hadoop classpath [--glob|--jar <path>|-h|--help]>>>
+
+*-----------------+-----------------------------------------------------------+
+|| COMMAND_OPTION || Description
+*-----------------+-----------------------------------------------------------+
+| --glob          | expand wildcards
+*-----------------+-----------------------------------------------------------+
+| --jar <path>    | write classpath as manifest in jar named <path>
+*-----------------+-----------------------------------------------------------+
+| -h, --help      | print help
+*-----------------+-----------------------------------------------------------+
 
 
 Administration Commands
 Administration Commands
 
 

+ 9 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderCryptoExtension.java

@@ -26,10 +26,10 @@ import javax.crypto.spec.IvParameterSpec;
 import javax.crypto.spec.SecretKeySpec;
 import javax.crypto.spec.SecretKeySpec;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
 import org.junit.BeforeClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.Test;
 
 
-
 import static org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
 import static org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertEquals;
@@ -118,8 +118,15 @@ public class TestKeyProviderCryptoExtension {
         new IvParameterSpec(KeyProviderCryptoExtension.EncryptedKeyVersion
         new IvParameterSpec(KeyProviderCryptoExtension.EncryptedKeyVersion
             .deriveIV(encryptedKeyIv)));
             .deriveIV(encryptedKeyIv)));
     final byte[] manualMaterial = cipher.doFinal(encryptedKeyMaterial);
     final byte[] manualMaterial = cipher.doFinal(encryptedKeyMaterial);
+
+    // Test the createForDecryption factory method
+    EncryptedKeyVersion eek2 =
+        EncryptedKeyVersion.createForDecryption(
+            eek.getEncryptionKeyVersionName(), eek.getEncryptedKeyIv(),
+            eek.getEncryptedKeyVersion().getMaterial());
+
     // Decrypt it with the API
     // Decrypt it with the API
-    KeyVersion decryptedKey = kpExt.decryptEncryptedKey(eek);
+    KeyVersion decryptedKey = kpExt.decryptEncryptedKey(eek2);
     final byte[] apiMaterial = decryptedKey.getMaterial();
     final byte[] apiMaterial = decryptedKey.getMaterial();
 
 
     assertArrayEquals("Wrong key material from decryptEncryptedKey",
     assertArrayEquals("Wrong key material from decryptEncryptedKey",

+ 24 - 24
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java

@@ -73,7 +73,7 @@ public class TestKeyShell {
   private void deleteKey(KeyShell ks, String keyName) throws Exception {
   private void deleteKey(KeyShell ks, String keyName) throws Exception {
     int rc;
     int rc;
     outContent.reset();
     outContent.reset();
-    final String[] delArgs = {"delete", keyName, "--provider", jceksProvider};
+    final String[] delArgs = {"delete", keyName, "-provider", jceksProvider};
     rc = ks.run(delArgs);
     rc = ks.run(delArgs);
     assertEquals(0, rc);
     assertEquals(0, rc);
     assertTrue(outContent.toString().contains(keyName + " has been " +
     assertTrue(outContent.toString().contains(keyName + " has been " +
@@ -90,8 +90,8 @@ public class TestKeyShell {
   private String listKeys(KeyShell ks, boolean wantMetadata) throws Exception {
   private String listKeys(KeyShell ks, boolean wantMetadata) throws Exception {
     int rc;
     int rc;
     outContent.reset();
     outContent.reset();
-    final String[] listArgs = {"list", "--provider", jceksProvider };
-    final String[] listArgsM = {"list", "--metadata", "--provider", jceksProvider };
+    final String[] listArgs = {"list", "-provider", jceksProvider };
+    final String[] listArgsM = {"list", "-metadata", "-provider", jceksProvider };
     rc = ks.run(wantMetadata ? listArgsM : listArgs);
     rc = ks.run(wantMetadata ? listArgsM : listArgs);
     assertEquals(0, rc);
     assertEquals(0, rc);
     return outContent.toString();
     return outContent.toString();
@@ -106,7 +106,7 @@ public class TestKeyShell {
     ks.setConf(new Configuration());
     ks.setConf(new Configuration());
 
 
     outContent.reset();
     outContent.reset();
-    final String[] args1 = {"create", keyName, "--provider", jceksProvider};
+    final String[] args1 = {"create", keyName, "-provider", jceksProvider};
     rc = ks.run(args1);
     rc = ks.run(args1);
     assertEquals(0, rc);
     assertEquals(0, rc);
     assertTrue(outContent.toString().contains(keyName + " has been " +
     assertTrue(outContent.toString().contains(keyName + " has been " +
@@ -121,7 +121,7 @@ public class TestKeyShell {
     assertTrue(listOut.contains("created"));
     assertTrue(listOut.contains("created"));
 
 
     outContent.reset();
     outContent.reset();
-    final String[] args2 = {"roll", keyName, "--provider", jceksProvider};
+    final String[] args2 = {"roll", keyName, "-provider", jceksProvider};
     rc = ks.run(args2);
     rc = ks.run(args2);
     assertEquals(0, rc);
     assertEquals(0, rc);
     assertTrue(outContent.toString().contains("key1 has been successfully " +
     assertTrue(outContent.toString().contains("key1 has been successfully " +
@@ -137,8 +137,8 @@ public class TestKeyShell {
   @Test
   @Test
   public void testKeySuccessfulCreationWithDescription() throws Exception {
   public void testKeySuccessfulCreationWithDescription() throws Exception {
     outContent.reset();
     outContent.reset();
-    final String[] args1 = {"create", "key1", "--provider", jceksProvider,
-                      "--description", "someDescription"};
+    final String[] args1 = {"create", "key1", "-provider", jceksProvider,
+                      "-description", "someDescription"};
     int rc = 0;
     int rc = 0;
     KeyShell ks = new KeyShell();
     KeyShell ks = new KeyShell();
     ks.setConf(new Configuration());
     ks.setConf(new Configuration());
@@ -154,7 +154,7 @@ public class TestKeyShell {
 
 
   @Test
   @Test
   public void testInvalidKeySize() throws Exception {
   public void testInvalidKeySize() throws Exception {
-    final String[] args1 = {"create", "key1", "--size", "56", "--provider",
+    final String[] args1 = {"create", "key1", "-size", "56", "-provider",
             jceksProvider};
             jceksProvider};
 
 
     int rc = 0;
     int rc = 0;
@@ -167,7 +167,7 @@ public class TestKeyShell {
 
 
   @Test
   @Test
   public void testInvalidCipher() throws Exception {
   public void testInvalidCipher() throws Exception {
-    final String[] args1 = {"create", "key1", "--cipher", "LJM", "--provider",
+    final String[] args1 = {"create", "key1", "-cipher", "LJM", "-provider",
             jceksProvider};
             jceksProvider};
 
 
     int rc = 0;
     int rc = 0;
@@ -180,7 +180,7 @@ public class TestKeyShell {
 
 
   @Test
   @Test
   public void testInvalidProvider() throws Exception {
   public void testInvalidProvider() throws Exception {
-    final String[] args1 = {"create", "key1", "--cipher", "AES", "--provider",
+    final String[] args1 = {"create", "key1", "-cipher", "AES", "-provider",
       "sdff://file/tmp/keystore.jceks"};
       "sdff://file/tmp/keystore.jceks"};
     
     
     int rc = 0;
     int rc = 0;
@@ -194,7 +194,7 @@ public class TestKeyShell {
 
 
   @Test
   @Test
   public void testTransientProviderWarning() throws Exception {
   public void testTransientProviderWarning() throws Exception {
-    final String[] args1 = {"create", "key1", "--cipher", "AES", "--provider",
+    final String[] args1 = {"create", "key1", "-cipher", "AES", "-provider",
       "user:///"};
       "user:///"};
     
     
     int rc = 0;
     int rc = 0;
@@ -224,8 +224,8 @@ public class TestKeyShell {
   @Test
   @Test
   public void testFullCipher() throws Exception {
   public void testFullCipher() throws Exception {
     final String keyName = "key1";
     final String keyName = "key1";
-    final String[] args1 = {"create", keyName, "--cipher", "AES/CBC/pkcs5Padding",
-        "--provider", jceksProvider};
+    final String[] args1 = {"create", keyName, "-cipher", "AES/CBC/pkcs5Padding",
+        "-provider", jceksProvider};
     
     
     int rc = 0;
     int rc = 0;
     KeyShell ks = new KeyShell();
     KeyShell ks = new KeyShell();
@@ -245,8 +245,8 @@ public class TestKeyShell {
     ks.setConf(new Configuration());
     ks.setConf(new Configuration());
 
 
     /* Simple creation test */
     /* Simple creation test */
-    final String[] args1 = {"create", "keyattr1", "--provider", jceksProvider,
-            "--attr", "foo=bar"};
+    final String[] args1 = {"create", "keyattr1", "-provider", jceksProvider,
+            "-attr", "foo=bar"};
     rc = ks.run(args1);
     rc = ks.run(args1);
     assertEquals(0, rc);
     assertEquals(0, rc);
     assertTrue(outContent.toString().contains("keyattr1 has been " +
     assertTrue(outContent.toString().contains("keyattr1 has been " +
@@ -259,8 +259,8 @@ public class TestKeyShell {
 
 
     /* Negative tests: no attribute */
     /* Negative tests: no attribute */
     outContent.reset();
     outContent.reset();
-    final String[] args2 = {"create", "keyattr2", "--provider", jceksProvider,
-            "--attr", "=bar"};
+    final String[] args2 = {"create", "keyattr2", "-provider", jceksProvider,
+            "-attr", "=bar"};
     rc = ks.run(args2);
     rc = ks.run(args2);
     assertEquals(1, rc);
     assertEquals(1, rc);
 
 
@@ -288,10 +288,10 @@ public class TestKeyShell {
 
 
     /* Test several attrs together... */
     /* Test several attrs together... */
     outContent.reset();
     outContent.reset();
-    final String[] args3 = {"create", "keyattr3", "--provider", jceksProvider,
-            "--attr", "foo = bar",
-            "--attr", " glarch =baz  ",
-            "--attr", "abc=def"};
+    final String[] args3 = {"create", "keyattr3", "-provider", jceksProvider,
+            "-attr", "foo = bar",
+            "-attr", " glarch =baz  ",
+            "-attr", "abc=def"};
     rc = ks.run(args3);
     rc = ks.run(args3);
     assertEquals(0, rc);
     assertEquals(0, rc);
 
 
@@ -304,9 +304,9 @@ public class TestKeyShell {
 
 
     /* Negative test - repeated attributes should fail */
     /* Negative test - repeated attributes should fail */
     outContent.reset();
     outContent.reset();
-    final String[] args4 = {"create", "keyattr4", "--provider", jceksProvider,
-            "--attr", "foo=bar",
-            "--attr", "foo=glarch"};
+    final String[] args4 = {"create", "keyattr4", "-provider", jceksProvider,
+            "-attr", "foo=bar",
+            "-attr", "foo=glarch"};
     rc = ks.run(args4);
     rc = ks.run(args4);
     assertEquals(1, rc);
     assertEquals(1, rc);
 
 

+ 3 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java

@@ -23,6 +23,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
@@ -201,6 +202,8 @@ public class TestHarFileSystem {
     public void removeXAttr(Path path, String name) throws IOException;
     public void removeXAttr(Path path, String name) throws IOException;
 
 
     public AclStatus getAclStatus(Path path) throws IOException;
     public AclStatus getAclStatus(Path path) throws IOException;
+
+    public void access(Path path, FsAction mode) throws IOException;
   }
   }
 
 
   @Test
   @Test

+ 34 - 15
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/alias/TestCredShell.java

@@ -17,16 +17,18 @@
  */
  */
 package org.apache.hadoop.security.alias;
 package org.apache.hadoop.security.alias;
 
 
-import static org.junit.Assert.*;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 
 import java.io.ByteArrayOutputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.File;
 import java.io.PrintStream;
 import java.io.PrintStream;
 import java.util.ArrayList;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 import java.util.List;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.alias.CredentialShell.PasswordReader;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
 
 
@@ -45,7 +47,7 @@ public class TestCredShell {
   @Test
   @Test
   public void testCredentialSuccessfulLifecycle() throws Exception {
   public void testCredentialSuccessfulLifecycle() throws Exception {
     outContent.reset();
     outContent.reset();
-    String[] args1 = {"create", "credential1", "--value", "p@ssw0rd", "--provider", 
+    String[] args1 = {"create", "credential1", "-value", "p@ssw0rd", "-provider",
         "jceks://file" + tmpDir + "/credstore.jceks"};
         "jceks://file" + tmpDir + "/credstore.jceks"};
     int rc = 0;
     int rc = 0;
     CredentialShell cs = new CredentialShell();
     CredentialShell cs = new CredentialShell();
@@ -56,14 +58,14 @@ public class TestCredShell {
     		"created."));
     		"created."));
 
 
     outContent.reset();
     outContent.reset();
-    String[] args2 = {"list", "--provider", 
+    String[] args2 = {"list", "-provider",
         "jceks://file" + tmpDir + "/credstore.jceks"};
         "jceks://file" + tmpDir + "/credstore.jceks"};
     rc = cs.run(args2);
     rc = cs.run(args2);
     assertEquals(0, rc);
     assertEquals(0, rc);
     assertTrue(outContent.toString().contains("credential1"));
     assertTrue(outContent.toString().contains("credential1"));
 
 
     outContent.reset();
     outContent.reset();
-    String[] args4 = {"delete", "credential1", "--provider", 
+    String[] args4 = {"delete", "credential1", "-provider",
         "jceks://file" + tmpDir + "/credstore.jceks"};
         "jceks://file" + tmpDir + "/credstore.jceks"};
     rc = cs.run(args4);
     rc = cs.run(args4);
     assertEquals(0, rc);
     assertEquals(0, rc);
@@ -71,7 +73,7 @@ public class TestCredShell {
     		"deleted."));
     		"deleted."));
 
 
     outContent.reset();
     outContent.reset();
-    String[] args5 = {"list", "--provider", 
+    String[] args5 = {"list", "-provider",
         "jceks://file" + tmpDir + "/credstore.jceks"};
         "jceks://file" + tmpDir + "/credstore.jceks"};
     rc = cs.run(args5);
     rc = cs.run(args5);
     assertEquals(0, rc);
     assertEquals(0, rc);
@@ -80,21 +82,21 @@ public class TestCredShell {
 
 
   @Test
   @Test
   public void testInvalidProvider() throws Exception {
   public void testInvalidProvider() throws Exception {
-    String[] args1 = {"create", "credential1", "--value", "p@ssw0rd", "--provider", 
+    String[] args1 = {"create", "credential1", "-value", "p@ssw0rd", "-provider",
       "sdff://file/tmp/credstore.jceks"};
       "sdff://file/tmp/credstore.jceks"};
     
     
     int rc = 0;
     int rc = 0;
     CredentialShell cs = new CredentialShell();
     CredentialShell cs = new CredentialShell();
     cs.setConf(new Configuration());
     cs.setConf(new Configuration());
     rc = cs.run(args1);
     rc = cs.run(args1);
-    assertEquals(-1, rc);
+    assertEquals(1, rc);
     assertTrue(outContent.toString().contains("There are no valid " +
     assertTrue(outContent.toString().contains("There are no valid " +
     		"CredentialProviders configured."));
     		"CredentialProviders configured."));
   }
   }
 
 
   @Test
   @Test
   public void testTransientProviderWarning() throws Exception {
   public void testTransientProviderWarning() throws Exception {
-    String[] args1 = {"create", "credential1", "--value", "p@ssw0rd", "--provider", 
+    String[] args1 = {"create", "credential1", "-value", "p@ssw0rd", "-provider",
       "user:///"};
       "user:///"};
     
     
     int rc = 0;
     int rc = 0;
@@ -105,7 +107,7 @@ public class TestCredShell {
     assertTrue(outContent.toString().contains("WARNING: you are modifying a " +
     assertTrue(outContent.toString().contains("WARNING: you are modifying a " +
     		"transient provider."));
     		"transient provider."));
 
 
-    String[] args2 = {"delete", "credential1", "--provider", "user:///"};
+    String[] args2 = {"delete", "credential1", "-provider", "user:///"};
     rc = cs.run(args2);
     rc = cs.run(args2);
     assertEquals(outContent.toString(), 0, rc);
     assertEquals(outContent.toString(), 0, rc);
     assertTrue(outContent.toString().contains("credential1 has been successfully " +
     assertTrue(outContent.toString().contains("credential1 has been successfully " +
@@ -122,14 +124,14 @@ public class TestCredShell {
     config.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, "user:///");
     config.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, "user:///");
     cs.setConf(config);
     cs.setConf(config);
     rc = cs.run(args1);
     rc = cs.run(args1);
-    assertEquals(-1, rc);
+    assertEquals(1, rc);
     assertTrue(outContent.toString().contains("There are no valid " +
     assertTrue(outContent.toString().contains("There are no valid " +
     		"CredentialProviders configured."));
     		"CredentialProviders configured."));
   }
   }
   
   
   @Test
   @Test
   public void testPromptForCredentialWithEmptyPasswd() throws Exception {
   public void testPromptForCredentialWithEmptyPasswd() throws Exception {
-    String[] args1 = {"create", "credential1", "--provider", 
+    String[] args1 = {"create", "credential1", "-provider",
         "jceks://file" + tmpDir + "/credstore.jceks"};
         "jceks://file" + tmpDir + "/credstore.jceks"};
     ArrayList<String> passwords = new ArrayList<String>();
     ArrayList<String> passwords = new ArrayList<String>();
     passwords.add(null);
     passwords.add(null);
@@ -139,13 +141,13 @@ public class TestCredShell {
     shell.setConf(new Configuration());
     shell.setConf(new Configuration());
     shell.setPasswordReader(new MockPasswordReader(passwords));
     shell.setPasswordReader(new MockPasswordReader(passwords));
     rc = shell.run(args1);
     rc = shell.run(args1);
-    assertEquals(outContent.toString(), -1, rc);
+    assertEquals(outContent.toString(), 1, rc);
     assertTrue(outContent.toString().contains("Passwords don't match"));
     assertTrue(outContent.toString().contains("Passwords don't match"));
   }
   }
 
 
   @Test
   @Test
   public void testPromptForCredential() throws Exception {
   public void testPromptForCredential() throws Exception {
-    String[] args1 = {"create", "credential1", "--provider", 
+    String[] args1 = {"create", "credential1", "-provider",
         "jceks://file" + tmpDir + "/credstore.jceks"};
         "jceks://file" + tmpDir + "/credstore.jceks"};
     ArrayList<String> passwords = new ArrayList<String>();
     ArrayList<String> passwords = new ArrayList<String>();
     passwords.add("p@ssw0rd");
     passwords.add("p@ssw0rd");
@@ -159,7 +161,7 @@ public class TestCredShell {
     assertTrue(outContent.toString().contains("credential1 has been successfully " +
     assertTrue(outContent.toString().contains("credential1 has been successfully " +
         "created."));
         "created."));
     
     
-    String[] args2 = {"delete", "credential1", "--provider", 
+    String[] args2 = {"delete", "credential1", "-provider",
         "jceks://file" + tmpDir + "/credstore.jceks"};
         "jceks://file" + tmpDir + "/credstore.jceks"};
     rc = shell.run(args2);
     rc = shell.run(args2);
     assertEquals(0, rc);
     assertEquals(0, rc);
@@ -186,4 +188,21 @@ public class TestCredShell {
       System.out.println(message);
       System.out.println(message);
     }
     }
   }
   }
+
+  @Test
+  public void testEmptyArgList() throws Exception {
+    CredentialShell shell = new CredentialShell();
+    shell.setConf(new Configuration());
+    assertEquals(1, shell.init(new String[0]));
+  }
+
+  @Test
+  public void testCommandHelpExitsNormally() throws Exception {
+    for (String cmd : Arrays.asList("create", "list", "delete")) {
+      CredentialShell shell = new CredentialShell();
+      shell.setConf(new Configuration());
+      assertEquals("Expected help argument on " + cmd + " to return 0",
+              0, shell.init(new String[] {cmd, "-help"}));
+    }
+  }
 }
 }

+ 176 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClasspath.java

@@ -0,0 +1,176 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import static org.junit.Assert.*;
+
+import java.io.ByteArrayOutputStream;
+import java.io.File;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.nio.charset.Charset;
+import java.util.jar.Attributes;
+import java.util.jar.JarFile;
+import java.util.jar.Manifest;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.io.IOUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+/**
+ * Tests covering the classpath command-line utility.
+ */
+public class TestClasspath {
+
+  private static final Log LOG = LogFactory.getLog(TestClasspath.class);
+  private static final File TEST_DIR = new File(
+    System.getProperty("test.build.data", "/tmp"), "TestClasspath");
+  private static final Charset UTF8 = Charset.forName("UTF-8");
+
+  static {
+    ExitUtil.disableSystemExit();
+  }
+
+  private PrintStream oldStdout, oldStderr;
+  private ByteArrayOutputStream stdout, stderr;
+  private PrintStream printStdout, printStderr;
+
+  @Before
+  public void setUp() {
+    assertTrue(FileUtil.fullyDelete(TEST_DIR));
+    assertTrue(TEST_DIR.mkdirs());
+    oldStdout = System.out;
+    oldStderr = System.err;
+
+    stdout = new ByteArrayOutputStream();
+    printStdout = new PrintStream(stdout);
+    System.setOut(printStdout);
+
+    stderr = new ByteArrayOutputStream();
+    printStderr = new PrintStream(stderr);
+    System.setErr(printStderr);
+  }
+
+  @After
+  public void tearDown() {
+    System.setOut(oldStdout);
+    System.setErr(oldStderr);
+    IOUtils.cleanup(LOG, printStdout, printStderr);
+    assertTrue(FileUtil.fullyDelete(TEST_DIR));
+  }
+
+  @Test
+  public void testGlob() {
+    Classpath.main(new String[] { "--glob" });
+    String strOut = new String(stdout.toByteArray(), UTF8);
+    assertEquals(System.getProperty("java.class.path"), strOut.trim());
+    assertTrue(stderr.toByteArray().length == 0);
+  }
+
+  @Test
+  public void testJar() throws IOException {
+    File file = new File(TEST_DIR, "classpath.jar");
+    Classpath.main(new String[] { "--jar", file.getAbsolutePath() });
+    assertTrue(stdout.toByteArray().length == 0);
+    assertTrue(stderr.toByteArray().length == 0);
+    assertTrue(file.exists());
+    assertJar(file);
+  }
+
+  @Test
+  public void testJarReplace() throws IOException {
+    // Run the command twice with the same output jar file, and expect success.
+    testJar();
+    testJar();
+  }
+
+  @Test
+  public void testJarFileMissing() throws IOException {
+    try {
+      Classpath.main(new String[] { "--jar" });
+      fail("expected exit");
+    } catch (ExitUtil.ExitException e) {
+      assertTrue(stdout.toByteArray().length == 0);
+      String strErr = new String(stderr.toByteArray(), UTF8);
+      assertTrue(strErr.contains("requires path of jar"));
+    }
+  }
+
+  @Test
+  public void testHelp() {
+    Classpath.main(new String[] { "--help" });
+    String strOut = new String(stdout.toByteArray(), UTF8);
+    assertTrue(strOut.contains("Prints the classpath"));
+    assertTrue(stderr.toByteArray().length == 0);
+  }
+
+  @Test
+  public void testHelpShort() {
+    Classpath.main(new String[] { "-h" });
+    String strOut = new String(stdout.toByteArray(), UTF8);
+    assertTrue(strOut.contains("Prints the classpath"));
+    assertTrue(stderr.toByteArray().length == 0);
+  }
+
+  @Test
+  public void testUnrecognized() {
+    try {
+      Classpath.main(new String[] { "--notarealoption" });
+      fail("expected exit");
+    } catch (ExitUtil.ExitException e) {
+      assertTrue(stdout.toByteArray().length == 0);
+      String strErr = new String(stderr.toByteArray(), UTF8);
+      assertTrue(strErr.contains("unrecognized option"));
+    }
+  }
+
+  /**
+   * Asserts that the specified file is a jar file with a manifest containing a
+   * non-empty classpath attribute.
+   *
+   * @param file File to check
+   * @throws IOException if there is an I/O error
+   */
+  private static void assertJar(File file) throws IOException {
+    JarFile jarFile = null;
+    try {
+      jarFile = new JarFile(file);
+      Manifest manifest = jarFile.getManifest();
+      assertNotNull(manifest);
+      Attributes mainAttributes = manifest.getMainAttributes();
+      assertNotNull(mainAttributes);
+      assertTrue(mainAttributes.containsKey(Attributes.Name.CLASS_PATH));
+      String classPathAttr = mainAttributes.getValue(Attributes.Name.CLASS_PATH);
+      assertNotNull(classPathAttr);
+      assertFalse(classPathAttr.isEmpty());
+    } finally {
+      // It's too bad JarFile doesn't implement Closeable.
+      if (jarFile != null) {
+        try {
+          jarFile.close();
+        } catch (IOException e) {
+          LOG.warn("exception closing jarFile: " + jarFile, e);
+        }
+      }
+    }
+  }
+}

+ 51 - 33
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.crypto.key.kms.server;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProvider;
+import org.apache.hadoop.crypto.key.KeyProvider.KeyVersion;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
 import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
 import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
@@ -27,7 +28,6 @@ import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
 import org.apache.hadoop.crypto.key.kms.KMSClientProvider;
-import org.apache.hadoop.util.StringUtils;
 
 
 import javax.ws.rs.Consumes;
 import javax.ws.rs.Consumes;
 import javax.ws.rs.DELETE;
 import javax.ws.rs.DELETE;
@@ -59,22 +59,25 @@ import java.util.Map;
 @Path(KMSRESTConstants.SERVICE_VERSION)
 @Path(KMSRESTConstants.SERVICE_VERSION)
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 public class KMS {
 public class KMS {
-  private static final String CREATE_KEY = "CREATE_KEY";
-  private static final String DELETE_KEY = "DELETE_KEY";
-  private static final String ROLL_NEW_VERSION = "ROLL_NEW_VERSION";
-  private static final String GET_KEYS = "GET_KEYS";
-  private static final String GET_KEYS_METADATA = "GET_KEYS_METADATA";
-  private static final String GET_KEY_VERSION = "GET_KEY_VERSION";
-  private static final String GET_CURRENT_KEY = "GET_CURRENT_KEY";
-  private static final String GET_KEY_VERSIONS = "GET_KEY_VERSIONS";
-  private static final String GET_METADATA = "GET_METADATA";
-  private static final String GENERATE_EEK = "GENERATE_EEK";
-  private static final String DECRYPT_EEK = "DECRYPT_EEK";
-
+  public static final String CREATE_KEY = "CREATE_KEY";
+  public static final String DELETE_KEY = "DELETE_KEY";
+  public static final String ROLL_NEW_VERSION = "ROLL_NEW_VERSION";
+  public static final String GET_KEYS = "GET_KEYS";
+  public static final String GET_KEYS_METADATA = "GET_KEYS_METADATA";
+  public static final String GET_KEY_VERSIONS = "GET_KEY_VERSIONS";
+  public static final String GET_METADATA = "GET_METADATA";
+
+  public static final String GET_KEY_VERSION = "GET_KEY_VERSION";
+  public static final String GET_CURRENT_KEY = "GET_CURRENT_KEY";
+  public static final String GENERATE_EEK = "GENERATE_EEK";
+  public static final String DECRYPT_EEK = "DECRYPT_EEK";
+  
   private KeyProviderCryptoExtension provider;
   private KeyProviderCryptoExtension provider;
+  private KMSAudit kmsAudit;
 
 
   public KMS() throws Exception {
   public KMS() throws Exception {
     provider = KMSWebApp.getKeyProvider();
     provider = KMSWebApp.getKeyProvider();
+    kmsAudit= KMSWebApp.getKMSAudit();
   }
   }
 
 
   private static Principal getPrincipal(SecurityContext securityContext)
   private static Principal getPrincipal(SecurityContext securityContext)
@@ -86,13 +89,26 @@ public class KMS {
     return user;
     return user;
   }
   }
 
 
-  private static void assertAccess(KMSACLs.Type aclType, Principal principal,
+
+  private static final String UNAUTHORIZED_MSG_WITH_KEY = 
+      "User:{0} not allowed to do ''{1}'' on ''{2}''";
+  
+  private static final String UNAUTHORIZED_MSG_WITHOUT_KEY = 
+      "User:{0} not allowed to do ''{1}''";
+
+  private void assertAccess(KMSACLs.Type aclType, Principal principal,
+      String operation) throws AccessControlException {
+    assertAccess(aclType, principal, operation, null);
+  }
+
+  private void assertAccess(KMSACLs.Type aclType, Principal principal,
       String operation, String key) throws AccessControlException {
       String operation, String key) throws AccessControlException {
     if (!KMSWebApp.getACLs().hasAccess(aclType, principal.getName())) {
     if (!KMSWebApp.getACLs().hasAccess(aclType, principal.getName())) {
       KMSWebApp.getUnauthorizedCallsMeter().mark();
       KMSWebApp.getUnauthorizedCallsMeter().mark();
-      KMSAudit.unauthorized(principal, operation, key);
+      kmsAudit.unauthorized(principal, operation, key);
       throw new AuthorizationException(MessageFormat.format(
       throw new AuthorizationException(MessageFormat.format(
-          "User:{0} not allowed to do ''{1}'' on ''{2}''",
+          (key != null) ? UNAUTHORIZED_MSG_WITH_KEY 
+                        : UNAUTHORIZED_MSG_WITHOUT_KEY,
           principal.getName(), operation, key));
           principal.getName(), operation, key));
     }
     }
   }
   }
@@ -149,7 +165,7 @@ public class KMS {
 
 
     provider.flush();
     provider.flush();
 
 
-    KMSAudit.ok(user, CREATE_KEY, name, "UserProvidedMaterial:" +
+    kmsAudit.ok(user, CREATE_KEY, name, "UserProvidedMaterial:" +
         (material != null) + " Description:" + description);
         (material != null) + " Description:" + description);
 
 
     if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) {
     if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) {
@@ -175,7 +191,7 @@ public class KMS {
     provider.deleteKey(name);
     provider.deleteKey(name);
     provider.flush();
     provider.flush();
 
 
-    KMSAudit.ok(user, DELETE_KEY, name, "");
+    kmsAudit.ok(user, DELETE_KEY, name, "");
 
 
     return Response.ok().build();
     return Response.ok().build();
   }
   }
@@ -203,7 +219,7 @@ public class KMS {
 
 
     provider.flush();
     provider.flush();
 
 
-    KMSAudit.ok(user, ROLL_NEW_VERSION, name, "UserProvidedMaterial:" +
+    kmsAudit.ok(user, ROLL_NEW_VERSION, name, "UserProvidedMaterial:" +
         (material != null) + " NewVersion:" + keyVersion.getVersionName());
         (material != null) + " NewVersion:" + keyVersion.getVersionName());
 
 
     if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) {
     if (!KMSWebApp.getACLs().hasAccess(KMSACLs.Type.GET, user.getName())) {
@@ -222,11 +238,10 @@ public class KMS {
     KMSWebApp.getAdminCallsMeter().mark();
     KMSWebApp.getAdminCallsMeter().mark();
     Principal user = getPrincipal(securityContext);
     Principal user = getPrincipal(securityContext);
     String[] keyNames = keyNamesList.toArray(new String[keyNamesList.size()]);
     String[] keyNames = keyNamesList.toArray(new String[keyNamesList.size()]);
-    String names = StringUtils.arrayToString(keyNames);
-    assertAccess(KMSACLs.Type.GET_METADATA, user, GET_KEYS_METADATA, names);
+    assertAccess(KMSACLs.Type.GET_METADATA, user, GET_KEYS_METADATA);
     KeyProvider.Metadata[] keysMeta = provider.getKeysMetadata(keyNames);
     KeyProvider.Metadata[] keysMeta = provider.getKeysMetadata(keyNames);
     Object json = KMSServerJSONUtils.toJSON(keyNames, keysMeta);
     Object json = KMSServerJSONUtils.toJSON(keyNames, keysMeta);
-    KMSAudit.ok(user, GET_KEYS_METADATA, names, "");
+    kmsAudit.ok(user, GET_KEYS_METADATA, "");
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
   }
   }
 
 
@@ -237,9 +252,9 @@ public class KMS {
       throws Exception {
       throws Exception {
     KMSWebApp.getAdminCallsMeter().mark();
     KMSWebApp.getAdminCallsMeter().mark();
     Principal user = getPrincipal(securityContext);
     Principal user = getPrincipal(securityContext);
-    assertAccess(KMSACLs.Type.GET_KEYS, user, GET_KEYS, "*");
+    assertAccess(KMSACLs.Type.GET_KEYS, user, GET_KEYS);
     Object json = provider.getKeys();
     Object json = provider.getKeys();
-    KMSAudit.ok(user, GET_KEYS, "*", "");
+    kmsAudit.ok(user, GET_KEYS, "");
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
   }
   }
 
 
@@ -263,7 +278,7 @@ public class KMS {
     KMSWebApp.getAdminCallsMeter().mark();
     KMSWebApp.getAdminCallsMeter().mark();
     assertAccess(KMSACLs.Type.GET_METADATA, user, GET_METADATA, name);
     assertAccess(KMSACLs.Type.GET_METADATA, user, GET_METADATA, name);
     Object json = KMSServerJSONUtils.toJSON(name, provider.getMetadata(name));
     Object json = KMSServerJSONUtils.toJSON(name, provider.getMetadata(name));
-    KMSAudit.ok(user, GET_METADATA, name, "");
+    kmsAudit.ok(user, GET_METADATA, name, "");
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
   }
   }
 
 
@@ -279,7 +294,7 @@ public class KMS {
     KMSWebApp.getKeyCallsMeter().mark();
     KMSWebApp.getKeyCallsMeter().mark();
     assertAccess(KMSACLs.Type.GET, user, GET_CURRENT_KEY, name);
     assertAccess(KMSACLs.Type.GET, user, GET_CURRENT_KEY, name);
     Object json = KMSServerJSONUtils.toJSON(provider.getCurrentKey(name));
     Object json = KMSServerJSONUtils.toJSON(provider.getCurrentKey(name));
-    KMSAudit.ok(user, GET_CURRENT_KEY, name, "");
+    kmsAudit.ok(user, GET_CURRENT_KEY, name, "");
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
   }
   }
 
 
@@ -292,9 +307,12 @@ public class KMS {
     Principal user = getPrincipal(securityContext);
     Principal user = getPrincipal(securityContext);
     KMSClientProvider.checkNotEmpty(versionName, "versionName");
     KMSClientProvider.checkNotEmpty(versionName, "versionName");
     KMSWebApp.getKeyCallsMeter().mark();
     KMSWebApp.getKeyCallsMeter().mark();
-    assertAccess(KMSACLs.Type.GET, user, GET_KEY_VERSION, versionName);
-    Object json = KMSServerJSONUtils.toJSON(provider.getKeyVersion(versionName));
-    KMSAudit.ok(user, GET_KEY_VERSION, versionName, "");
+    KeyVersion keyVersion = provider.getKeyVersion(versionName);
+    assertAccess(KMSACLs.Type.GET, user, GET_KEY_VERSION);
+    if (keyVersion != null) {
+      kmsAudit.ok(user, GET_KEY_VERSION, keyVersion.getName(), "");
+    }
+    Object json = KMSServerJSONUtils.toJSON(keyVersion);
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
   }
   }
 
 
@@ -327,7 +345,7 @@ public class KMS {
       } catch (Exception e) {
       } catch (Exception e) {
         throw new IOException(e);
         throw new IOException(e);
       }
       }
-      KMSAudit.ok(user, GENERATE_EEK, name, "");
+      kmsAudit.ok(user, GENERATE_EEK, name, "");
       retJSON = new ArrayList();
       retJSON = new ArrayList();
       for (EncryptedKeyVersion edek : retEdeks) {
       for (EncryptedKeyVersion edek : retEdeks) {
         ((ArrayList)retJSON).add(KMSServerJSONUtils.toJSON(edek));
         ((ArrayList)retJSON).add(KMSServerJSONUtils.toJSON(edek));
@@ -362,7 +380,7 @@ public class KMS {
         (String) jsonPayload.get(KMSRESTConstants.MATERIAL_FIELD);
         (String) jsonPayload.get(KMSRESTConstants.MATERIAL_FIELD);
     Object retJSON;
     Object retJSON;
     if (eekOp.equals(KMSRESTConstants.EEK_DECRYPT)) {
     if (eekOp.equals(KMSRESTConstants.EEK_DECRYPT)) {
-      assertAccess(KMSACLs.Type.DECRYPT_EEK, user, DECRYPT_EEK, versionName);
+      assertAccess(KMSACLs.Type.DECRYPT_EEK, user, DECRYPT_EEK, keyName);
       KMSClientProvider.checkNotNull(ivStr, KMSRESTConstants.IV_FIELD);
       KMSClientProvider.checkNotNull(ivStr, KMSRESTConstants.IV_FIELD);
       byte[] iv = Base64.decodeBase64(ivStr);
       byte[] iv = Base64.decodeBase64(ivStr);
       KMSClientProvider.checkNotNull(encMaterialStr,
       KMSClientProvider.checkNotNull(encMaterialStr,
@@ -373,7 +391,7 @@ public class KMS {
               new KMSClientProvider.KMSEncryptedKeyVersion(keyName, versionName,
               new KMSClientProvider.KMSEncryptedKeyVersion(keyName, versionName,
                   iv, KeyProviderCryptoExtension.EEK, encMaterial));
                   iv, KeyProviderCryptoExtension.EEK, encMaterial));
       retJSON = KMSServerJSONUtils.toJSON(retKeyVersion);
       retJSON = KMSServerJSONUtils.toJSON(retKeyVersion);
-      KMSAudit.ok(user, DECRYPT_EEK, versionName, "");
+      kmsAudit.ok(user, DECRYPT_EEK, keyName, "");
     } else {
     } else {
       throw new IllegalArgumentException("Wrong " + KMSRESTConstants.EEK_OP +
       throw new IllegalArgumentException("Wrong " + KMSRESTConstants.EEK_OP +
           " value, it must be " + KMSRESTConstants.EEK_GENERATE + " or " +
           " value, it must be " + KMSRESTConstants.EEK_GENERATE + " or " +
@@ -396,7 +414,7 @@ public class KMS {
     KMSWebApp.getKeyCallsMeter().mark();
     KMSWebApp.getKeyCallsMeter().mark();
     assertAccess(KMSACLs.Type.GET, user, GET_KEY_VERSIONS, name);
     assertAccess(KMSACLs.Type.GET, user, GET_KEY_VERSIONS, name);
     Object json = KMSServerJSONUtils.toJSON(provider.getKeyVersions(name));
     Object json = KMSServerJSONUtils.toJSON(provider.getKeyVersions(name));
-    KMSAudit.ok(user, GET_KEY_VERSIONS, name, "");
+    kmsAudit.ok(user, GET_KEY_VERSIONS, name, "");
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
     return Response.ok().type(MediaType.APPLICATION_JSON).entity(json).build();
   }
   }
 
 

+ 174 - 15
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java

@@ -20,43 +20,202 @@ package org.apache.hadoop.crypto.key.kms.server;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
+import com.google.common.base.Joiner;
+import com.google.common.base.Strings;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.RemovalListener;
+import com.google.common.cache.RemovalNotification;
+import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
 import java.security.Principal;
 import java.security.Principal;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
 
 
 /**
 /**
  * Provides convenience methods for audit logging consistently the different
  * Provides convenience methods for audit logging consistently the different
  * types of events.
  * types of events.
  */
  */
 public class KMSAudit {
 public class KMSAudit {
+
+  private static class AuditEvent {
+    private final AtomicLong accessCount = new AtomicLong(-1);
+    private final String keyName;
+    private final String user;
+    private final String op;
+    private final String extraMsg;
+    private final long startTime = System.currentTimeMillis();
+
+    private AuditEvent(String keyName, String user, String op, String msg) {
+      this.keyName = keyName;
+      this.user = user;
+      this.op = op;
+      this.extraMsg = msg;
+    }
+
+    public String getExtraMsg() {
+      return extraMsg;
+    }
+
+    public AtomicLong getAccessCount() {
+      return accessCount;
+    }
+
+    public String getKeyName() {
+      return keyName;
+    }
+
+    public String getUser() {
+      return user;
+    }
+
+    public String getOp() {
+      return op;
+    }
+
+    public long getStartTime() {
+      return startTime;
+    }
+  }
+
+  public static enum OpStatus {
+    OK, UNAUTHORIZED, UNAUTHENTICATED, ERROR;
+  }
+
+  private static Set<String> AGGREGATE_OPS_WHITELIST = Sets.newHashSet(
+    KMS.GET_KEY_VERSION, KMS.GET_CURRENT_KEY, KMS.DECRYPT_EEK, KMS.GENERATE_EEK
+  );
+
+  private Cache<String, AuditEvent> cache;
+
+  private ScheduledExecutorService executor;
+
   public static final String KMS_LOGGER_NAME = "kms-audit";
   public static final String KMS_LOGGER_NAME = "kms-audit";
 
 
   private static Logger AUDIT_LOG = LoggerFactory.getLogger(KMS_LOGGER_NAME);
   private static Logger AUDIT_LOG = LoggerFactory.getLogger(KMS_LOGGER_NAME);
 
 
-  private static void op(String status, String op, Principal user, String key,
-      String extraMsg) {
-    AUDIT_LOG.info("Status:{} User:{} Op:{} Name:{}{}", status, user.getName(),
-        op, key, extraMsg);
+  KMSAudit(long delay) {
+    cache = CacheBuilder.newBuilder()
+        .expireAfterWrite(delay, TimeUnit.MILLISECONDS)
+        .removalListener(
+            new RemovalListener<String, AuditEvent>() {
+              @Override
+              public void onRemoval(
+                  RemovalNotification<String, AuditEvent> entry) {
+                AuditEvent event = entry.getValue();
+                if (event.getAccessCount().get() > 0) {
+                  KMSAudit.this.logEvent(event);
+                  event.getAccessCount().set(0);
+                  KMSAudit.this.cache.put(entry.getKey(), event);
+                }
+              }
+            }).build();
+    executor = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
+        .setDaemon(true).setNameFormat(KMS_LOGGER_NAME + "_thread").build());
+    executor.scheduleAtFixedRate(new Runnable() {
+      @Override
+      public void run() {
+        cache.cleanUp();
+      }
+    }, delay / 10, delay / 10, TimeUnit.MILLISECONDS);
+  }
+
+  private void logEvent(AuditEvent event) {
+    AUDIT_LOG.info(
+        "OK[op={}, key={}, user={}, accessCount={}, interval={}ms] {}",
+        event.getOp(), event.getKeyName(), event.getUser(),
+        event.getAccessCount().get(),
+        (System.currentTimeMillis() - event.getStartTime()),
+        event.getExtraMsg());
+  }
+
+  private void op(OpStatus opStatus, final String op, final String user,
+      final String key, final String extraMsg) {
+    if (!Strings.isNullOrEmpty(user) && !Strings.isNullOrEmpty(key)
+        && !Strings.isNullOrEmpty(op)
+        && AGGREGATE_OPS_WHITELIST.contains(op)) {
+      String cacheKey = createCacheKey(user, key, op);
+      if (opStatus == OpStatus.UNAUTHORIZED) {
+        cache.invalidate(cacheKey);
+        AUDIT_LOG.info("UNAUTHORIZED[op={}, key={}, user={}] {}", op, key, user,
+            extraMsg);
+      } else {
+        try {
+          AuditEvent event = cache.get(cacheKey, new Callable<AuditEvent>() {
+            @Override
+            public AuditEvent call() throws Exception {
+              return new AuditEvent(key, user, op, extraMsg);
+            }
+          });
+          // Log first access (initialized as -1 so
+          // incrementAndGet() == 0 implies first access)
+          if (event.getAccessCount().incrementAndGet() == 0) {
+            event.getAccessCount().incrementAndGet();
+            logEvent(event);
+          }
+        } catch (ExecutionException ex) {
+          throw new RuntimeException(ex);
+        }
+      }
+    } else {
+      List<String> kvs = new LinkedList<String>();
+      if (!Strings.isNullOrEmpty(op)) {
+        kvs.add("op=" + op);
+      }
+      if (!Strings.isNullOrEmpty(key)) {
+        kvs.add("key=" + key);
+      }
+      if (!Strings.isNullOrEmpty(user)) {
+        kvs.add("user=" + user);
+      }
+      if (kvs.size() == 0) {
+        AUDIT_LOG.info("{} {}", opStatus.toString(), extraMsg);
+      } else {
+        String join = Joiner.on(", ").join(kvs);
+        AUDIT_LOG.info("{}[{}] {}", opStatus.toString(), join, extraMsg);
+      }
+    }
   }
   }
 
 
-  public static void ok(Principal user, String op, String key,
+  public void ok(Principal user, String op, String key,
       String extraMsg) {
       String extraMsg) {
-    op("OK", op, user, key, extraMsg);
+    op(OpStatus.OK, op, user.getName(), key, extraMsg);
+  }
+
+  public void ok(Principal user, String op, String extraMsg) {
+    op(OpStatus.OK, op, user.getName(), null, extraMsg);
   }
   }
 
 
-  public static void unauthorized(Principal user, String op, String key) {
-    op("UNAUTHORIZED", op, user, key, "");
+  public void unauthorized(Principal user, String op, String key) {
+    op(OpStatus.UNAUTHORIZED, op, user.getName(), key, "");
   }
   }
 
 
-  public static void error(Principal user, String method, String url,
+  public void error(Principal user, String method, String url,
       String extraMsg) {
       String extraMsg) {
-    AUDIT_LOG.info("Status:ERROR User:{} Method:{} URL:{} Exception:'{}'",
-        user.getName(), method, url, extraMsg);
+    op(OpStatus.ERROR, null, user.getName(), null, "Method:'" + method
+        + "' Exception:'" + extraMsg + "'");
   }
   }
 
 
-  public static void unauthenticated(String remoteHost, String method,
+  public void unauthenticated(String remoteHost, String method,
       String url, String extraMsg) {
       String url, String extraMsg) {
-    AUDIT_LOG.info(
-        "Status:UNAUTHENTICATED RemoteHost:{} Method:{} URL:{} ErrorMsg:'{}'",
-        remoteHost, method, url, extraMsg);
+    op(OpStatus.UNAUTHENTICATED, null, null, null, "RemoteHost:"
+        + remoteHost + " Method:" + method
+        + " URL:" + url + " ErrorMsg:'" + extraMsg + "'");
   }
   }
 
 
+  private static String createCacheKey(String user, String key, String op) {
+    return user + "#" + key + "#" + op;
+  }
+
+  public void shutdown() {
+    executor.shutdownNow();
+  }
 }
 }

+ 4 - 2
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAuthenticationFilter.java

@@ -115,8 +115,10 @@ public class KMSAuthenticationFilter extends AuthenticationFilter {
       if (queryString != null) {
       if (queryString != null) {
         requestURL.append("?").append(queryString);
         requestURL.append("?").append(queryString);
       }
       }
-      KMSAudit.unauthenticated(request.getRemoteHost(), method,
-          requestURL.toString(), kmsResponse.msg);
+
+      KMSWebApp.getKMSAudit().unauthenticated(
+          request.getRemoteHost(), method, requestURL.toString(),
+          kmsResponse.msg);
     }
     }
   }
   }
 
 

+ 6 - 1
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSConfiguration.java

@@ -43,12 +43,17 @@ public class KMSConfiguration {
   // TImeout for the Current Key cache
   // TImeout for the Current Key cache
   public static final String CURR_KEY_CACHE_TIMEOUT_KEY = CONFIG_PREFIX +
   public static final String CURR_KEY_CACHE_TIMEOUT_KEY = CONFIG_PREFIX +
       "current.key.cache.timeout.ms";
       "current.key.cache.timeout.ms";
-
+  // Delay for Audit logs that need aggregation
+  public static final String KMS_AUDIT_AGGREGATION_DELAY = CONFIG_PREFIX +
+      "aggregation.delay.ms";
+  
   public static final boolean KEY_CACHE_ENABLE_DEFAULT = true;
   public static final boolean KEY_CACHE_ENABLE_DEFAULT = true;
   // 10 mins
   // 10 mins
   public static final long KEY_CACHE_TIMEOUT_DEFAULT = 10 * 60 * 1000;
   public static final long KEY_CACHE_TIMEOUT_DEFAULT = 10 * 60 * 1000;
   // 30 secs
   // 30 secs
   public static final long CURR_KEY_CACHE_TIMEOUT_DEFAULT = 30 * 1000;
   public static final long CURR_KEY_CACHE_TIMEOUT_DEFAULT = 30 * 1000;
+  // 10 secs
+  public static final long KMS_AUDIT_AGGREGATION_DELAY_DEFAULT = 10000;
 
 
   static Configuration getConfiguration(boolean loadHadoopDefaults,
   static Configuration getConfiguration(boolean loadHadoopDefaults,
       String ... resources) {
       String ... resources) {

+ 9 - 1
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSExceptionsProvider.java

@@ -20,9 +20,11 @@ package org.apache.hadoop.crypto.key.kms.server;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 
 
 import com.sun.jersey.api.container.ContainerException;
 import com.sun.jersey.api.container.ContainerException;
+
 import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
 import org.apache.hadoop.crypto.key.kms.KMSRESTConstants;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -30,6 +32,7 @@ import javax.ws.rs.core.MediaType;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.ext.ExceptionMapper;
 import javax.ws.rs.ext.ExceptionMapper;
 import javax.ws.rs.ext.Provider;
 import javax.ws.rs.ext.Provider;
+
 import java.io.IOException;
 import java.io.IOException;
 import java.security.Principal;
 import java.security.Principal;
 import java.util.LinkedHashMap;
 import java.util.LinkedHashMap;
@@ -83,6 +86,10 @@ public class KMSExceptionsProvider implements ExceptionMapper<Exception> {
       status = Response.Status.FORBIDDEN;
       status = Response.Status.FORBIDDEN;
       // we don't audit here because we did it already when checking access
       // we don't audit here because we did it already when checking access
       doAudit = false;
       doAudit = false;
+    } else if (throwable instanceof AuthorizationException) {
+      status = Response.Status.UNAUTHORIZED;
+      // we don't audit here because we did it already when checking access
+      doAudit = false;
     } else if (throwable instanceof AccessControlException) {
     } else if (throwable instanceof AccessControlException) {
       status = Response.Status.FORBIDDEN;
       status = Response.Status.FORBIDDEN;
     } else if (exception instanceof IOException) {
     } else if (exception instanceof IOException) {
@@ -95,7 +102,8 @@ public class KMSExceptionsProvider implements ExceptionMapper<Exception> {
       status = Response.Status.INTERNAL_SERVER_ERROR;
       status = Response.Status.INTERNAL_SERVER_ERROR;
     }
     }
     if (doAudit) {
     if (doAudit) {
-      KMSAudit.error(KMSMDCFilter.getPrincipal(), KMSMDCFilter.getMethod(),
+      KMSWebApp.getKMSAudit().error(KMSMDCFilter.getPrincipal(),
+          KMSMDCFilter.getMethod(),
           KMSMDCFilter.getURL(), getOneLineMessage(exception));
           KMSMDCFilter.getURL(), getOneLineMessage(exception));
     }
     }
     return createResponse(status, throwable);
     return createResponse(status, throwable);

+ 11 - 0
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java

@@ -76,6 +76,7 @@ public class KMSWebApp implements ServletContextListener {
   private static Meter decryptEEKCallsMeter;
   private static Meter decryptEEKCallsMeter;
   private static Meter generateEEKCallsMeter;
   private static Meter generateEEKCallsMeter;
   private static Meter invalidCallsMeter;
   private static Meter invalidCallsMeter;
+  private static KMSAudit kmsAudit;
   private static KeyProviderCryptoExtension keyProviderCryptoExtension;
   private static KeyProviderCryptoExtension keyProviderCryptoExtension;
 
 
   static {
   static {
@@ -144,6 +145,11 @@ public class KMSWebApp implements ServletContextListener {
       unauthenticatedCallsMeter = metricRegistry.register(
       unauthenticatedCallsMeter = metricRegistry.register(
           UNAUTHENTICATED_CALLS_METER, new Meter());
           UNAUTHENTICATED_CALLS_METER, new Meter());
 
 
+      kmsAudit =
+          new KMSAudit(kmsConf.getLong(
+              KMSConfiguration.KMS_AUDIT_AGGREGATION_DELAY,
+              KMSConfiguration.KMS_AUDIT_AGGREGATION_DELAY_DEFAULT));
+
       // this is required for the the JMXJsonServlet to work properly.
       // this is required for the the JMXJsonServlet to work properly.
       // the JMXJsonServlet is behind the authentication filter,
       // the JMXJsonServlet is behind the authentication filter,
       // thus the '*' ACL.
       // thus the '*' ACL.
@@ -199,6 +205,7 @@ public class KMSWebApp implements ServletContextListener {
 
 
   @Override
   @Override
   public void contextDestroyed(ServletContextEvent sce) {
   public void contextDestroyed(ServletContextEvent sce) {
+    kmsAudit.shutdown();
     acls.stopReloader();
     acls.stopReloader();
     jmxReporter.stop();
     jmxReporter.stop();
     jmxReporter.close();
     jmxReporter.close();
@@ -245,4 +252,8 @@ public class KMSWebApp implements ServletContextListener {
   public static KeyProviderCryptoExtension getKeyProvider() {
   public static KeyProviderCryptoExtension getKeyProvider() {
     return keyProviderCryptoExtension;
     return keyProviderCryptoExtension;
   }
   }
+
+  public static KMSAudit getKMSAudit() {
+    return kmsAudit;
+  }
 }
 }

+ 19 - 0
hadoop-common-project/hadoop-kms/src/site/apt/index.apt.vm

@@ -104,6 +104,25 @@ Hadoop Key Management Server (KMS) - Documentation Sets ${project.version}
   </property>
   </property>
 +---+
 +---+
 
 
+** KMS Aggregated Audit logs
+
+  Audit logs are aggregated for API accesses to the GET_KEY_VERSION,
+  GET_CURRENT_KEY, DECRYPT_EEK, GENERATE_EEK operations.
+
+  Entries are grouped by the (user,key,operation) combined key for a
+  configurable aggregation interval after which the number of accesses to the
+  specified end-point by the user for a given key is flushed to the audit log.
+
+  The Aggregation interval is configured via the property :
+
++---+
+  <property>
+    <name>hadoop.kms.aggregation.delay.ms</name>
+    <value>10000</value>
+  </property>
++---+
+ 
+
 ** Start/Stop the KMS
 ** Start/Stop the KMS
 
 
   To start/stop KMS use KMS's bin/kms.sh script. For example:
   To start/stop KMS use KMS's bin/kms.sh script. For example:

+ 134 - 0
hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMSAudit.java

@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.crypto.key.kms.server;
+
+import java.io.ByteArrayOutputStream;
+import java.io.FilterOutputStream;
+import java.io.OutputStream;
+import java.io.PrintStream;
+import java.security.Principal;
+
+import org.apache.log4j.LogManager;
+import org.apache.log4j.PropertyConfigurator;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+public class TestKMSAudit {
+
+  private PrintStream originalOut;
+  private ByteArrayOutputStream memOut;
+  private FilterOut filterOut;
+  private PrintStream capturedOut;
+  
+  private KMSAudit kmsAudit; 
+
+  private static class FilterOut extends FilterOutputStream {
+    public FilterOut(OutputStream out) {
+      super(out);
+    }
+
+    public void setOutputStream(OutputStream out) {
+      this.out = out;
+    }
+  }
+
+  @Before
+  public void setUp() {
+    originalOut = System.err;
+    memOut = new ByteArrayOutputStream();
+    filterOut = new FilterOut(memOut);
+    capturedOut = new PrintStream(filterOut);
+    System.setErr(capturedOut);
+    PropertyConfigurator.configure(Thread.currentThread().
+        getContextClassLoader()
+        .getResourceAsStream("log4j-kmsaudit.properties"));
+    this.kmsAudit = new KMSAudit(1000);
+  }
+
+  @After
+  public void cleanUp() {
+    System.setErr(originalOut);
+    LogManager.resetConfiguration();
+    kmsAudit.shutdown();
+  }
+
+  private String getAndResetLogOutput() {
+    capturedOut.flush();
+    String logOutput = new String(memOut.toByteArray());
+    memOut = new ByteArrayOutputStream();
+    filterOut.setOutputStream(memOut);
+    return logOutput;
+  }
+
+  @Test
+  public void testAggregation() throws Exception {
+    Principal luser = Mockito.mock(Principal.class);
+    Mockito.when(luser.getName()).thenReturn("luser");
+    kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
+    kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
+    kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
+    kmsAudit.ok(luser, KMS.DELETE_KEY, "k1", "testmsg");
+    kmsAudit.ok(luser, KMS.ROLL_NEW_VERSION, "k1", "testmsg");
+    kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
+    kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
+    kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
+    Thread.sleep(1500);
+    kmsAudit.ok(luser, KMS.DECRYPT_EEK, "k1", "testmsg");
+    Thread.sleep(1500);
+    String out = getAndResetLogOutput();
+    System.out.println(out);
+    Assert.assertTrue(
+        out.matches(
+            "OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg"
+            // Not aggregated !!
+            + "OK\\[op=DELETE_KEY, key=k1, user=luser\\] testmsg"
+            + "OK\\[op=ROLL_NEW_VERSION, key=k1, user=luser\\] testmsg"
+            // Aggregated
+            + "OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=6, interval=[^m]{1,4}ms\\] testmsg"
+            + "OK\\[op=DECRYPT_EEK, key=k1, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg"));
+  }
+
+  @Test
+  public void testAggregationUnauth() throws Exception {
+    Principal luser = Mockito.mock(Principal.class);
+    Mockito.when(luser.getName()).thenReturn("luser");
+    kmsAudit.unauthorized(luser, KMS.GENERATE_EEK, "k2");
+    Thread.sleep(1000);
+    kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
+    kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
+    kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
+    kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
+    kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
+    kmsAudit.unauthorized(luser, KMS.GENERATE_EEK, "k3");
+    kmsAudit.ok(luser, KMS.GENERATE_EEK, "k3", "testmsg");
+    Thread.sleep(2000);
+    String out = getAndResetLogOutput();
+    System.out.println(out);
+    Assert.assertTrue(
+        out.matches(
+            "UNAUTHORIZED\\[op=GENERATE_EEK, key=k2, user=luser\\] "
+            + "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg"
+            + "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=5, interval=[^m]{1,4}ms\\] testmsg"
+            + "UNAUTHORIZED\\[op=GENERATE_EEK, key=k3, user=luser\\] "
+            + "OK\\[op=GENERATE_EEK, key=k3, user=luser, accessCount=1, interval=[^m]{1,4}ms\\] testmsg"));
+  }
+
+}

+ 25 - 0
hadoop-common-project/hadoop-kms/src/test/resources/log4j-kmsaudit.properties

@@ -0,0 +1,25 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# LOG Appender
+log4j.appender.kms-audit=org.apache.log4j.ConsoleAppender
+log4j.appender.kms-audit.Target=System.err
+log4j.appender.kms-audit.layout=org.apache.log4j.PatternLayout
+log4j.appender.kms-audit.layout.ConversionPattern=%m
+
+log4j.rootLogger=INFO, kms-audit

文件差异内容过多而无法显示
+ 215 - 166
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java


+ 587 - 3
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestRpcProgramNfs3.java

@@ -18,19 +18,603 @@
 package org.apache.hadoop.hdfs.nfs.nfs3;
 package org.apache.hadoop.hdfs.nfs.nfs3;
 
 
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.nio.ByteBuffer;
+import org.jboss.netty.channel.Channel;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.mockito.Mockito;
 
 
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
+import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
-import org.junit.Assert;
-import org.junit.Test;
+import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
+import org.apache.hadoop.nfs.nfs3.Nfs3Status;
+import org.apache.hadoop.nfs.nfs3.request.LOOKUP3Request;
+import org.apache.hadoop.nfs.nfs3.request.READ3Request;
+import org.apache.hadoop.nfs.nfs3.request.WRITE3Request;
+import org.apache.hadoop.nfs.nfs3.response.ACCESS3Response;
+import org.apache.hadoop.nfs.nfs3.response.COMMIT3Response;
+import org.apache.hadoop.nfs.nfs3.response.CREATE3Response;
+import org.apache.hadoop.nfs.nfs3.response.FSSTAT3Response;
+import org.apache.hadoop.nfs.nfs3.response.FSINFO3Response;
+import org.apache.hadoop.nfs.nfs3.response.GETATTR3Response;
+import org.apache.hadoop.nfs.nfs3.response.LOOKUP3Response;
+import org.apache.hadoop.nfs.nfs3.response.PATHCONF3Response;
+import org.apache.hadoop.nfs.nfs3.response.READ3Response;
+import org.apache.hadoop.nfs.nfs3.response.REMOVE3Response;
+import org.apache.hadoop.nfs.nfs3.response.RMDIR3Response;
+import org.apache.hadoop.nfs.nfs3.response.RENAME3Response;
+import org.apache.hadoop.nfs.nfs3.response.READDIR3Response;
+import org.apache.hadoop.nfs.nfs3.response.READDIRPLUS3Response;
+import org.apache.hadoop.nfs.nfs3.response.READLINK3Response;
+import org.apache.hadoop.nfs.nfs3.response.SETATTR3Response;
+import org.apache.hadoop.nfs.nfs3.response.SYMLINK3Response;
+import org.apache.hadoop.nfs.nfs3.response.WRITE3Response;
+import org.apache.hadoop.nfs.nfs3.request.SetAttr3;
+import org.apache.hadoop.oncrpc.XDR;
+import org.apache.hadoop.oncrpc.security.SecurityHandler;
+import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
+import org.apache.hadoop.security.authorize.ProxyUsers;
 
 
 
 
 /**
 /**
  * Tests for {@link RpcProgramNfs3}
  * Tests for {@link RpcProgramNfs3}
  */
  */
 public class TestRpcProgramNfs3 {
 public class TestRpcProgramNfs3 {
+  static DistributedFileSystem hdfs;
+  static MiniDFSCluster cluster = null;
+  static NfsConfiguration config = new NfsConfiguration();
+  static NameNode nn;
+  static Nfs3 nfs;
+  static RpcProgramNfs3 nfsd;
+  static SecurityHandler securityHandler;
+  static SecurityHandler securityHandlerUnpriviledged;
+  static String testdir = "/tmp";
+
+  @BeforeClass
+  public static void setup() throws Exception {
+    String currentUser = System.getProperty("user.name");
+
+    config.set("fs.permissions.umask-mode", "u=rwx,g=,o=");
+    config.set(DefaultImpersonationProvider.getTestProvider()
+        .getProxySuperuserGroupConfKey(currentUser), "*");
+    config.set(DefaultImpersonationProvider.getTestProvider()
+        .getProxySuperuserIpConfKey(currentUser), "*");
+    ProxyUsers.refreshSuperUserGroupsConfiguration(config);
+
+    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
+    cluster.waitActive();
+    hdfs = cluster.getFileSystem();
+    nn = cluster.getNameNode();
+
+    // Use ephemeral ports in case tests are running in parallel
+    config.setInt("nfs3.mountd.port", 0);
+    config.setInt("nfs3.server.port", 0);
+
+    // Start NFS with allowed.hosts set to "* rw"
+    config.set("dfs.nfs.exports.allowed.hosts", "* rw");
+    nfs = new Nfs3(config);
+    nfs.startServiceInternal(false);
+    nfsd = (RpcProgramNfs3) nfs.getRpcProgram();
+
+
+    // Mock SecurityHandler which returns system user.name
+    securityHandler = Mockito.mock(SecurityHandler.class);
+    Mockito.when(securityHandler.getUser()).thenReturn(currentUser);
+
+    // Mock SecurityHandler which returns a dummy username "harry"
+    securityHandlerUnpriviledged = Mockito.mock(SecurityHandler.class);
+    Mockito.when(securityHandlerUnpriviledged.getUser()).thenReturn("harry");
+  }
+
+  @AfterClass
+  public static void shutdown() throws Exception {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Before
+  public void createFiles() throws IllegalArgumentException, IOException {
+    hdfs.delete(new Path(testdir), true);
+    hdfs.mkdirs(new Path(testdir));
+    hdfs.mkdirs(new Path(testdir + "/foo"));
+    DFSTestUtil.createFile(hdfs, new Path(testdir + "/bar"), 0, (short) 1, 0);
+  }
+
+  @Test(timeout = 60000)
+  public void testGetattr() throws Exception {
+    HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
+    long dirId = status.getFileId();
+    FileHandle handle = new FileHandle(dirId);
+    XDR xdr_req = new XDR();
+    handle.serialize(xdr_req);
+
+    // Attempt by an unpriviledged user should fail.
+    GETATTR3Response response1 = nfsd.getattr(xdr_req.asReadOnlyWrap(),
+        securityHandlerUnpriviledged,
+        new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES,
+        response1.getStatus());
+
+    // Attempt by a priviledged user should pass.
+    GETATTR3Response response2 = nfsd.getattr(xdr_req.asReadOnlyWrap(),
+        securityHandler, new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code", Nfs3Status.NFS3_OK,
+        response2.getStatus());
+  }
+
+  @Test(timeout = 60000)
+  public void testSetattr() throws Exception {
+    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
+    long dirId = status.getFileId();
+    XDR xdr_req = new XDR();
+    FileHandle handle = new FileHandle(dirId);
+    handle.serialize(xdr_req);
+    xdr_req.writeString("bar");
+    SetAttr3 symAttr = new SetAttr3();
+    symAttr.serialize(xdr_req);
+    xdr_req.writeBoolean(false);
+
+    // Attempt by an unpriviledged user should fail.
+    SETATTR3Response response1 = nfsd.setattr(xdr_req.asReadOnlyWrap(),
+        securityHandlerUnpriviledged,
+        new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES,
+        response1.getStatus());
+
+    // Attempt by a priviledged user should pass.
+    SETATTR3Response response2 = nfsd.setattr(xdr_req.asReadOnlyWrap(),
+        securityHandler, new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code", Nfs3Status.NFS3_OK,
+        response2.getStatus());
+  }
+
+  @Test(timeout = 60000)
+  public void testLookup() throws Exception {
+    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
+    long dirId = status.getFileId();
+    FileHandle handle = new FileHandle(dirId);
+    LOOKUP3Request lookupReq = new LOOKUP3Request(handle, "bar");
+    XDR xdr_req = new XDR();
+    lookupReq.serialize(xdr_req);
+
+    // Attempt by an unpriviledged user should fail.
+    LOOKUP3Response response1 = nfsd.lookup(xdr_req.asReadOnlyWrap(),
+        securityHandlerUnpriviledged,
+        new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES,
+        response1.getStatus());
+
+    // Attempt by a priviledged user should pass.
+    LOOKUP3Response response2 = nfsd.lookup(xdr_req.asReadOnlyWrap(),
+        securityHandler, new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code", Nfs3Status.NFS3_OK,
+        response2.getStatus());
+  }
+
+  @Test(timeout = 60000)
+  public void testAccess() throws Exception {
+    HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
+    long dirId = status.getFileId();
+    FileHandle handle = new FileHandle(dirId);
+    XDR xdr_req = new XDR();
+    handle.serialize(xdr_req);
+
+    // Attempt by an unpriviledged user should fail.
+    ACCESS3Response response1 = nfsd.access(xdr_req.asReadOnlyWrap(),
+        securityHandlerUnpriviledged,
+        new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code", Nfs3Status.NFS3ERR_ACCES,
+        response1.getStatus());
+
+    // Attempt by a priviledged user should pass.
+    ACCESS3Response response2 = nfsd.access(xdr_req.asReadOnlyWrap(),
+        securityHandler, new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code", Nfs3Status.NFS3_OK,
+        response2.getStatus());
+  }
+
+  @Test(timeout = 60000)
+  public void testReadlink() throws Exception {
+    // Create a symlink first.
+    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
+    long dirId = status.getFileId();
+    XDR xdr_req = new XDR();
+    FileHandle handle = new FileHandle(dirId);
+    handle.serialize(xdr_req);
+    xdr_req.writeString("fubar");
+    SetAttr3 symAttr = new SetAttr3();
+    symAttr.serialize(xdr_req);
+    xdr_req.writeString("bar");
+
+    SYMLINK3Response response = nfsd.symlink(xdr_req.asReadOnlyWrap(),
+        securityHandler, new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
+        response.getStatus());
+
+    // Now perform readlink operations.
+    FileHandle handle2 = response.getObjFileHandle();
+    XDR xdr_req2 = new XDR();
+    handle2.serialize(xdr_req2);
+
+    // Attempt by an unpriviledged user should fail.
+    READLINK3Response response1 = nfsd.readlink(xdr_req2.asReadOnlyWrap(),
+        securityHandlerUnpriviledged,
+        new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
+        response1.getStatus());
+
+    // Attempt by a priviledged user should pass.
+    READLINK3Response response2 = nfsd.readlink(xdr_req2.asReadOnlyWrap(),
+        securityHandler, new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
+        response2.getStatus());
+  }
+
+  @Test(timeout = 60000)
+  public void testRead() throws Exception {
+    HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
+    long dirId = status.getFileId();
+    FileHandle handle = new FileHandle(dirId);
+
+    READ3Request readReq = new READ3Request(handle, 0, 5);
+    XDR xdr_req = new XDR();
+    readReq.serialize(xdr_req);
+
+    // Attempt by an unpriviledged user should fail.
+    /* Hits HDFS-6582. It needs to be fixed first.
+    READ3Response response1 = nfsd.read(xdr_req.asReadOnlyWrap(),
+        securityHandlerUnpriviledged,
+        new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
+        response1.getStatus());
+    */
+
+    // Attempt by a priviledged user should pass.
+    READ3Response response2 = nfsd.read(xdr_req.asReadOnlyWrap(),
+        securityHandler, new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
+        response2.getStatus());
+  }
+
+  @Test(timeout = 60000)
+  public void testWrite() throws Exception {
+    HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
+    long dirId = status.getFileId();
+    FileHandle handle = new FileHandle(dirId);
+
+    byte[] buffer = new byte[10];
+    for (int i = 0; i < 10; i++) {
+      buffer[i] = (byte) i;
+    }
+
+    WRITE3Request writeReq = new WRITE3Request(handle, 0, 10,
+        WriteStableHow.DATA_SYNC, ByteBuffer.wrap(buffer));
+    XDR xdr_req = new XDR();
+    writeReq.serialize(xdr_req);
+
+    // Attempt by an unpriviledged user should fail.
+    WRITE3Response response1 = nfsd.write(xdr_req.asReadOnlyWrap(),
+        null, 1, securityHandlerUnpriviledged,
+        new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
+        response1.getStatus());
+
+    // Attempt by a priviledged user should pass.
+    WRITE3Response response2 = nfsd.write(xdr_req.asReadOnlyWrap(),
+        null, 1, securityHandler,
+        new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect response:", null, response2);
+  }
+
+  @Test(timeout = 60000)
+  public void testCreate() throws Exception {
+    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
+    long dirId = status.getFileId();
+    XDR xdr_req = new XDR();
+    FileHandle handle = new FileHandle(dirId);
+    handle.serialize(xdr_req);
+    xdr_req.writeString("fubar");
+    xdr_req.writeInt(Nfs3Constant.CREATE_UNCHECKED);
+    SetAttr3 symAttr = new SetAttr3();
+    symAttr.serialize(xdr_req);
+
+    // Attempt by an unpriviledged user should fail.
+    CREATE3Response response1 = nfsd.create(xdr_req.asReadOnlyWrap(),
+        securityHandlerUnpriviledged,
+        new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
+        response1.getStatus());
+
+    // Attempt by a priviledged user should pass.
+    CREATE3Response response2 = nfsd.create(xdr_req.asReadOnlyWrap(),
+        securityHandler, new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
+        response2.getStatus());
+  }
+
+  @Test(timeout = 60000)
+  public void testMkdir() throws Exception {
+    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
+    long dirId = status.getFileId();
+    XDR xdr_req = new XDR();
+    FileHandle handle = new FileHandle(dirId);
+    handle.serialize(xdr_req);
+    xdr_req.writeString("fubar");
+    SetAttr3 symAttr = new SetAttr3();
+    symAttr.serialize(xdr_req);
+    xdr_req.writeString("bar");
+
+    // Attempt to remove by an unpriviledged user should fail.
+    SYMLINK3Response response1 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
+        securityHandlerUnpriviledged,
+        new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
+        response1.getStatus());
+
+    // Attempt to remove by a priviledged user should pass.
+    SYMLINK3Response response2 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
+        securityHandler, new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
+        response2.getStatus());
+  }
+
+  @Test(timeout = 60000)
+  public void testSymlink() throws Exception {
+    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
+    long dirId = status.getFileId();
+    XDR xdr_req = new XDR();
+    FileHandle handle = new FileHandle(dirId);
+    handle.serialize(xdr_req);
+    xdr_req.writeString("fubar");
+    SetAttr3 symAttr = new SetAttr3();
+    symAttr.serialize(xdr_req);
+    xdr_req.writeString("bar");
+
+    // Attempt by an unpriviledged user should fail.
+    SYMLINK3Response response1 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
+        securityHandlerUnpriviledged,
+        new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
+        response1.getStatus());
+
+    // Attempt by a priviledged user should pass.
+    SYMLINK3Response response2 = nfsd.symlink(xdr_req.asReadOnlyWrap(),
+        securityHandler, new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
+        response2.getStatus());
+  }
+
+  @Test(timeout = 60000)
+  public void testRemove() throws Exception {
+    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
+    long dirId = status.getFileId();
+    XDR xdr_req = new XDR();
+    FileHandle handle = new FileHandle(dirId);
+    handle.serialize(xdr_req);
+    xdr_req.writeString("bar");
+
+    // Attempt by an unpriviledged user should fail.
+    REMOVE3Response response1 = nfsd.remove(xdr_req.asReadOnlyWrap(),
+        securityHandlerUnpriviledged,
+        new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
+        response1.getStatus());
+
+    // Attempt by a priviledged user should pass.
+    REMOVE3Response response2 = nfsd.remove(xdr_req.asReadOnlyWrap(),
+        securityHandler, new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
+        response2.getStatus());
+  }
+
+  @Test(timeout = 60000)
+  public void testRmdir() throws Exception {
+    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
+    long dirId = status.getFileId();
+    XDR xdr_req = new XDR();
+    FileHandle handle = new FileHandle(dirId);
+    handle.serialize(xdr_req);
+    xdr_req.writeString("foo");
+
+    // Attempt by an unpriviledged user should fail.
+    RMDIR3Response response1 = nfsd.rmdir(xdr_req.asReadOnlyWrap(),
+        securityHandlerUnpriviledged,
+        new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
+        response1.getStatus());
+
+    // Attempt by a priviledged user should pass.
+    RMDIR3Response response2 = nfsd.rmdir(xdr_req.asReadOnlyWrap(),
+        securityHandler, new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
+        response2.getStatus());
+  }
+
+  @Test(timeout = 60000)
+  public void testRename() throws Exception {
+    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
+    long dirId = status.getFileId();
+    XDR xdr_req = new XDR();
+    FileHandle handle = new FileHandle(dirId);
+    handle.serialize(xdr_req);
+    xdr_req.writeString("bar");
+    handle.serialize(xdr_req);
+    xdr_req.writeString("fubar");
+
+    // Attempt by an unpriviledged user should fail.
+    RENAME3Response response1 = nfsd.rename(xdr_req.asReadOnlyWrap(),
+        securityHandlerUnpriviledged,
+        new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
+        response1.getStatus());
+
+    // Attempt by a priviledged user should pass.
+    RENAME3Response response2 = nfsd.rename(xdr_req.asReadOnlyWrap(),
+        securityHandler, new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
+        response2.getStatus());
+  }
+
+  @Test(timeout = 60000)
+  public void testReaddir() throws Exception {
+    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
+    long dirId = status.getFileId();
+    FileHandle handle = new FileHandle(dirId);
+    XDR xdr_req = new XDR();
+    handle.serialize(xdr_req);
+    xdr_req.writeLongAsHyper(0);
+    xdr_req.writeLongAsHyper(0);
+    xdr_req.writeInt(100);
+
+    // Attempt by an unpriviledged user should fail.
+    READDIR3Response response1 = nfsd.readdir(xdr_req.asReadOnlyWrap(),
+        securityHandlerUnpriviledged,
+        new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
+        response1.getStatus());
+
+    // Attempt by a priviledged user should pass.
+    READDIR3Response response2 = nfsd.readdir(xdr_req.asReadOnlyWrap(),
+        securityHandler, new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
+        response2.getStatus());
+  }
+
+  @Test(timeout = 60000)
+  public void testReaddirplus() throws Exception {
+    HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
+    long dirId = status.getFileId();
+    FileHandle handle = new FileHandle(dirId);
+    XDR xdr_req = new XDR();
+    handle.serialize(xdr_req);
+    xdr_req.writeLongAsHyper(0);
+    xdr_req.writeLongAsHyper(0);
+    xdr_req.writeInt(3);
+    xdr_req.writeInt(2);
+
+    // Attempt by an unpriviledged user should fail.
+    READDIRPLUS3Response response1 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(),
+        securityHandlerUnpriviledged,
+        new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
+        response1.getStatus());
+
+    // Attempt by a priviledged user should pass.
+    READDIRPLUS3Response response2 = nfsd.readdirplus(xdr_req.asReadOnlyWrap(),
+        securityHandler, new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
+        response2.getStatus());
+  }
+
+  @Test(timeout = 60000)
+  public void testFsstat() throws Exception {
+    HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
+    long dirId = status.getFileId();
+    FileHandle handle = new FileHandle(dirId);
+    XDR xdr_req = new XDR();
+    handle.serialize(xdr_req);
+
+    // Attempt by an unpriviledged user should fail.
+    FSSTAT3Response response1 = nfsd.fsstat(xdr_req.asReadOnlyWrap(),
+        securityHandlerUnpriviledged,
+        new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
+        response1.getStatus());
+
+    // Attempt by a priviledged user should pass.
+    FSSTAT3Response response2 = nfsd.fsstat(xdr_req.asReadOnlyWrap(),
+        securityHandler, new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
+        response2.getStatus());
+  }
+
+  @Test(timeout = 60000)
+  public void testFsinfo() throws Exception {
+    HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
+    long dirId = status.getFileId();
+    FileHandle handle = new FileHandle(dirId);
+    XDR xdr_req = new XDR();
+    handle.serialize(xdr_req);
+
+    // Attempt by an unpriviledged user should fail.
+    FSINFO3Response response1 = nfsd.fsinfo(xdr_req.asReadOnlyWrap(),
+        securityHandlerUnpriviledged,
+        new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
+        response1.getStatus());
+
+    // Attempt by a priviledged user should pass.
+    FSINFO3Response response2 = nfsd.fsinfo(xdr_req.asReadOnlyWrap(),
+        securityHandler, new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
+        response2.getStatus());
+  }
+
+  @Test(timeout = 60000)
+  public void testPathconf() throws Exception {
+    HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
+    long dirId = status.getFileId();
+    FileHandle handle = new FileHandle(dirId);
+    XDR xdr_req = new XDR();
+    handle.serialize(xdr_req);
+
+    // Attempt by an unpriviledged user should fail.
+    PATHCONF3Response response1 = nfsd.pathconf(xdr_req.asReadOnlyWrap(),
+        securityHandlerUnpriviledged,
+        new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
+        response1.getStatus());
+
+    // Attempt by a priviledged user should pass.
+    PATHCONF3Response response2 = nfsd.pathconf(xdr_req.asReadOnlyWrap(),
+        securityHandler, new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
+        response2.getStatus());
+  }
+
+  @Test(timeout = 60000)
+  public void testCommit() throws Exception {
+    HdfsFileStatus status = nn.getRpcServer().getFileInfo("/tmp/bar");
+    long dirId = status.getFileId();
+    FileHandle handle = new FileHandle(dirId);
+    XDR xdr_req = new XDR();
+    handle.serialize(xdr_req);
+    xdr_req.writeLongAsHyper(0);
+    xdr_req.writeInt(5);
+
+    Channel ch = Mockito.mock(Channel.class);
+
+    // Attempt by an unpriviledged user should fail.
+    COMMIT3Response response1 = nfsd.commit(xdr_req.asReadOnlyWrap(),
+        ch, 1, securityHandlerUnpriviledged,
+        new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
+        response1.getStatus());
+
+    // Attempt by a priviledged user should pass.
+    COMMIT3Response response2 = nfsd.commit(xdr_req.asReadOnlyWrap(),
+        ch, 1, securityHandler,
+        new InetSocketAddress("localhost", 1234));
+    assertEquals("Incorrect COMMIT3Response:", null, response2);
+  }
+
   @Test(timeout=1000)
   @Test(timeout=1000)
   public void testIdempotent() {
   public void testIdempotent() {
     Object[][] procedures = {
     Object[][] procedures = {

+ 52 - 3
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -130,6 +130,9 @@ Trunk (Unreleased)
     HDFS-6609. Use DirectorySnapshottableFeature to represent a snapshottable
     HDFS-6609. Use DirectorySnapshottableFeature to represent a snapshottable
     directory. (Jing Zhao via wheat9)
     directory. (Jing Zhao via wheat9)
 
 
+    HDFS-6482. Use block ID-based block layout on datanodes (James Thomas via
+    Colin Patrick McCabe)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -184,9 +187,6 @@ Trunk (Unreleased)
 
 
     HDFS-3549. Fix dist tar build fails in hadoop-hdfs-raid project. (Jason Lowe via daryn)
     HDFS-3549. Fix dist tar build fails in hadoop-hdfs-raid project. (Jason Lowe via daryn)
 
 
-    HDFS-3482. hdfs balancer throws ArrayIndexOutOfBoundsException 
-    if option is specified without values. ( Madhukara Phatak via umamahesh) 
-
     HDFS-3614. Revert unused MiniDFSCluster constructor from HDFS-3049.
     HDFS-3614. Revert unused MiniDFSCluster constructor from HDFS-3049.
     (acmurthy via eli)
     (acmurthy via eli)
 
 
@@ -332,6 +332,31 @@ Release 2.6.0 - UNRELEASED
     HDFS-6778. The extended attributes javadoc should simply refer to the
     HDFS-6778. The extended attributes javadoc should simply refer to the
     user docs. (clamb via wang)
     user docs. (clamb via wang)
 
 
+    HDFS-6570. add api that enables checking if a user has certain permissions on
+    a file. (Jitendra Pandey via cnauroth)
+
+    HDFS-6441. Add ability to exclude/include specific datanodes while
+    balancing. (Benoy Antony and Yu Li via Arpit Agarwal)
+
+    HDFS-6685. Balancer should preserve storage type of replicas.  (szetszwo)
+
+    HDFS-6798. Add test case for incorrect data node condition during
+    balancing. (Benoy Antony via Arpit Agarwal)
+
+    HDFS-6796. Improve the argument check during balancer command line parsing.
+    (Benoy Antony via szetszwo)
+
+    HDFS-6794. Update BlockManager methods to use DatanodeStorageInfo
+    where possible (Arpit Agarwal)
+
+    HDFS-6802. Some tests in TestDFSClientFailover are missing @Test
+    annotation. (Akira Ajisaka via wang)
+
+    HDFS-6788. Improve synchronization in BPOfferService with read write lock.
+    (Yongjun Zhang via wang)
+
+    HDFS-6787. Remove duplicate code in FSDirectory#unprotectedConcat. (Yi Liu via umamahesh)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-6690. Deduplicate xattr names in memory. (wang)
     HDFS-6690. Deduplicate xattr names in memory. (wang)
@@ -394,6 +419,27 @@ Release 2.6.0 - UNRELEASED
     HDFS-6749. FSNamesystem methods should call resolvePath.
     HDFS-6749. FSNamesystem methods should call resolvePath.
     (Charles Lamb via cnauroth)
     (Charles Lamb via cnauroth)
 
 
+    HDFS-4629. Using com.sun.org.apache.xml.internal.serialize.* in
+    XmlEditsVisitor.java is JVM vendor specific. Breaks IBM JAVA.
+    (Amir Sanjar via stevel)
+
+    HDFS-3482. hdfs balancer throws ArrayIndexOutOfBoundsException 
+    if option is specified without values. ( Madhukara Phatak via umamahesh) 
+
+    HDFS-6797. DataNode logs wrong layoutversion during upgrade. (Benoy Antony
+    via Arpit Agarwal)
+
+    HDFS-6810. StorageReport array is initialized with wrong size in
+    DatanodeDescriptor#getStorageReports. (szetszwo via Arpit Agarwal)
+
+    HDFS-5723. Append failed FINALIZED replica should not be accepted as valid
+    when that block is underconstruction (vinayakumarb)
+
+    HDFS-5185. DN fails to startup if one of the data dir is full. (vinayakumarb)
+
+    HDFS-6451. NFS should not return NFS3ERR_IO for AccessControlException 
+    (Abhiraj Butala via brandonli)
+
 Release 2.5.0 - UNRELEASED
 Release 2.5.0 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -949,6 +995,9 @@ Release 2.5.0 - UNRELEASED
     HDFS-6717. JIRA HDFS-5804 breaks default nfs-gateway behavior for unsecured config
     HDFS-6717. JIRA HDFS-5804 breaks default nfs-gateway behavior for unsecured config
     (brandonli)
     (brandonli)
 
 
+    HDFS-6768. Fix a few unit tests that use hard-coded port numbers. (Arpit
+    Agarwal)
+
   BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
   BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
 
 
     HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
     HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -176,6 +176,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>netty</artifactId>
       <artifactId>netty</artifactId>
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
+    <dependency>
+      <groupId>xerces</groupId>
+      <artifactId>xercesImpl</artifactId>
+      <scope>compile</scope>
+    </dependency>
   </dependencies>
   </dependencies>
 
 
   <build>
   <build>

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java

@@ -33,6 +33,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.CryptoCodec;
 import org.apache.hadoop.crypto.CryptoCodec;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.hdfs.CorruptFileBlockIterator;
 import org.apache.hadoop.hdfs.CorruptFileBlockIterator;
@@ -456,6 +457,11 @@ public class Hdfs extends AbstractFileSystem {
     dfs.removeXAttr(getUriPath(path), name);
     dfs.removeXAttr(getUriPath(path), name);
   }
   }
 
 
+  @Override
+  public void access(Path path, final FsAction mode) throws IOException {
+    dfs.checkAccess(getUriPath(path), mode);
+  }
+
   /**
   /**
    * Renew an existing delegation token.
    * Renew an existing delegation token.
    * 
    * 

+ 12 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -132,6 +132,7 @@ import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.net.Peer;
@@ -2951,6 +2952,17 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     }
     }
   }
   }
 
 
+  public void checkAccess(String src, FsAction mode) throws IOException {
+    checkOpen();
+    try {
+      namenode.checkAccess(src, mode);
+    } catch (RemoteException re) {
+      throw re.unwrapRemoteException(AccessControlException.class,
+          FileNotFoundException.class,
+          UnresolvedPathException.class);
+    }
+  }
+
   @Override // RemotePeerFactory
   @Override // RemotePeerFactory
   public Peer newConnectedPeer(InetSocketAddress addr,
   public Peer newConnectedPeer(InetSocketAddress addr,
       Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
       Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -381,8 +381,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_DATANODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_HTTP_DEFAULT_PORT;
   public static final String  DFS_DATANODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_DATANODE_HTTP_DEFAULT_PORT;
   public static final String  DFS_DATANODE_MAX_RECEIVER_THREADS_KEY = "dfs.datanode.max.transfer.threads";
   public static final String  DFS_DATANODE_MAX_RECEIVER_THREADS_KEY = "dfs.datanode.max.transfer.threads";
   public static final int     DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT = 4096;
   public static final int     DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT = 4096;
-  public static final String  DFS_DATANODE_NUMBLOCKS_KEY = "dfs.datanode.numblocks";
-  public static final int     DFS_DATANODE_NUMBLOCKS_DEFAULT = 64;
   public static final String  DFS_DATANODE_SCAN_PERIOD_HOURS_KEY = "dfs.datanode.scan.period.hours";
   public static final String  DFS_DATANODE_SCAN_PERIOD_HOURS_KEY = "dfs.datanode.scan.period.hours";
   public static final int     DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 0;
   public static final int     DFS_DATANODE_SCAN_PERIOD_HOURS_DEFAULT = 0;
   public static final String  DFS_DATANODE_TRANSFERTO_ALLOWED_KEY = "dfs.datanode.transferTo.allowed";
   public static final String  DFS_DATANODE_TRANSFERTO_ALLOWED_KEY = "dfs.datanode.transferTo.allowed";
@@ -668,4 +666,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
    public static final String DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY =
    public static final String DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_KEY =
      "dfs.datanode.slow.io.warning.threshold.ms";
      "dfs.datanode.slow.io.warning.threshold.ms";
    public static final long DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 300;
    public static final long DFS_DATANODE_SLOW_IO_WARNING_THRESHOLD_DEFAULT = 300;
+
+   public static final String DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS_KEY =
+       "dfs.datanode.block.id.layout.upgrade.threads";
+   public static final int DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS = 12;
 }
 }

+ 20 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -59,6 +59,7 @@ import org.apache.hadoop.fs.VolumeId;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.client.HdfsAdmin;
 import org.apache.hadoop.hdfs.client.HdfsAdmin;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
@@ -1913,4 +1914,23 @@ public class DistributedFileSystem extends FileSystem {
       }
       }
     }.resolve(this, absF);
     }.resolve(this, absF);
   }
   }
+
+  @Override
+  public void access(Path path, final FsAction mode) throws IOException {
+    final Path absF = fixRelativePart(path);
+    new FileSystemLinkResolver<Void>() {
+      @Override
+      public Void doCall(final Path p) throws IOException {
+        dfs.checkAccess(getPathName(p), mode);
+        return null;
+      }
+
+      @Override
+      public Void next(final FileSystem fs, final Path p)
+          throws IOException {
+        fs.access(p, mode);
+        return null;
+      }
+    }.resolve(this, absF);
+  }
 }
 }

+ 10 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/StorageType.java

@@ -18,6 +18,9 @@
 
 
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
+import java.util.Arrays;
+import java.util.List;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 
 
@@ -32,4 +35,11 @@ public enum StorageType {
   SSD;
   SSD;
 
 
   public static final StorageType DEFAULT = DISK;
   public static final StorageType DEFAULT = DISK;
+  public static final StorageType[] EMPTY_ARRAY = {};
+  
+  private static final StorageType[] VALUES = values();
+  
+  public static List<StorageType> asList() {
+    return Arrays.asList(VALUES);
+  }
 }
 }

+ 11 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java

@@ -50,6 +50,9 @@ public class Block implements Writable, Comparable<Block> {
   public static final Pattern metaFilePattern = Pattern
   public static final Pattern metaFilePattern = Pattern
       .compile(BLOCK_FILE_PREFIX + "(-??\\d++)_(\\d++)\\" + METADATA_EXTENSION
       .compile(BLOCK_FILE_PREFIX + "(-??\\d++)_(\\d++)\\" + METADATA_EXTENSION
           + "$");
           + "$");
+  public static final Pattern metaOrBlockFilePattern = Pattern
+      .compile(BLOCK_FILE_PREFIX + "(-??\\d++)(_(\\d++)\\" + METADATA_EXTENSION
+          + ")?$");
 
 
   public static boolean isBlockFilename(File f) {
   public static boolean isBlockFilename(File f) {
     String name = f.getName();
     String name = f.getName();
@@ -65,6 +68,11 @@ public class Block implements Writable, Comparable<Block> {
     return metaFilePattern.matcher(name).matches();
     return metaFilePattern.matcher(name).matches();
   }
   }
 
 
+  public static File metaToBlockFile(File metaFile) {
+    return new File(metaFile.getParent(), metaFile.getName().substring(
+        0, metaFile.getName().lastIndexOf('_')));
+  }
+
   /**
   /**
    * Get generation stamp from the name of the metafile name
    * Get generation stamp from the name of the metafile name
    */
    */
@@ -75,10 +83,10 @@ public class Block implements Writable, Comparable<Block> {
   }
   }
 
 
   /**
   /**
-   * Get the blockId from the name of the metafile name
+   * Get the blockId from the name of the meta or block file
    */
    */
-  public static long getBlockId(String metaFile) {
-    Matcher m = metaFilePattern.matcher(metaFile);
+  public static long getBlockId(String metaOrBlockFile) {
+    Matcher m = metaOrBlockFilePattern.matcher(metaOrBlockFile);
     return m.matches() ? Long.parseLong(m.group(1)) : 0;
     return m.matches() ? Long.parseLong(m.group(1)) : 0;
   }
   }
 
 

+ 19 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

@@ -40,6 +40,7 @@ import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
@@ -1346,4 +1347,22 @@ public interface ClientProtocol {
    */
    */
   @AtMostOnce
   @AtMostOnce
   public void removeXAttr(String src, XAttr xAttr) throws IOException;
   public void removeXAttr(String src, XAttr xAttr) throws IOException;
+
+  /**
+   * Checks if the user can access a path.  The mode specifies which access
+   * checks to perform.  If the requested permissions are granted, then the
+   * method returns normally.  If access is denied, then the method throws an
+   * {@link AccessControlException}.
+   * In general, applications should avoid using this method, due to the risk of
+   * time-of-check/time-of-use race conditions.  The permissions on a file may
+   * change immediately after the access call returns.
+   *
+   * @param path Path to check
+   * @param mode type of access to check
+   * @throws AccessControlException if access is denied
+   * @throws FileNotFoundException if the path does not exist
+   * @throws IOException see specific implementation
+   */
+  @Idempotent
+  public void checkAccess(String path, FsAction mode) throws IOException;
 }
 }

+ 16 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java

@@ -175,6 +175,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesResponseProto;
@@ -325,6 +327,9 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   private static final RemoveXAttrResponseProto
   private static final RemoveXAttrResponseProto
     VOID_REMOVEXATTR_RESPONSE = RemoveXAttrResponseProto.getDefaultInstance();
     VOID_REMOVEXATTR_RESPONSE = RemoveXAttrResponseProto.getDefaultInstance();
 
 
+  private static final CheckAccessResponseProto
+    VOID_CHECKACCESS_RESPONSE = CheckAccessResponseProto.getDefaultInstance();
+
   /**
   /**
    * Constructor
    * Constructor
    * 
    * 
@@ -1375,4 +1380,15 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     }
     return VOID_REMOVEXATTR_RESPONSE;
     return VOID_REMOVEXATTR_RESPONSE;
   }
   }
+
+  @Override
+  public CheckAccessResponseProto checkAccess(RpcController controller,
+     CheckAccessRequestProto req) throws ServiceException {
+    try {
+      server.checkAccess(req.getPath(), PBHelper.convert(req.getMode()));
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return VOID_CHECKACCESS_RESPONSE;
+  }
 }
 }

+ 13 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

@@ -41,6 +41,7 @@ import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
@@ -147,6 +148,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetSaf
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SetTimesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdateBlockForPipelineRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CheckAccessRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.CreateEncryptionZoneRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.ListEncryptionZonesRequestProto;
@@ -1400,4 +1402,15 @@ public class ClientNamenodeProtocolTranslatorPB implements
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
   }
   }
+
+  @Override
+  public void checkAccess(String path, FsAction mode) throws IOException {
+    CheckAccessRequestProto req = CheckAccessRequestProto.newBuilder()
+        .setPath(path).setMode(PBHelper.convert(mode)).build();
+    try {
+      rpcProxy.checkAccess(null, req);
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
 }
 }

+ 8 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java

@@ -357,15 +357,19 @@ public class PBHelper {
     return BlockWithLocationsProto.newBuilder()
     return BlockWithLocationsProto.newBuilder()
         .setBlock(convert(blk.getBlock()))
         .setBlock(convert(blk.getBlock()))
         .addAllDatanodeUuids(Arrays.asList(blk.getDatanodeUuids()))
         .addAllDatanodeUuids(Arrays.asList(blk.getDatanodeUuids()))
-        .addAllStorageUuids(Arrays.asList(blk.getStorageIDs())).build();
+        .addAllStorageUuids(Arrays.asList(blk.getStorageIDs()))
+        .addAllStorageTypes(convertStorageTypes(blk.getStorageTypes()))
+        .build();
   }
   }
 
 
   public static BlockWithLocations convert(BlockWithLocationsProto b) {
   public static BlockWithLocations convert(BlockWithLocationsProto b) {
     final List<String> datanodeUuids = b.getDatanodeUuidsList();
     final List<String> datanodeUuids = b.getDatanodeUuidsList();
     final List<String> storageUuids = b.getStorageUuidsList();
     final List<String> storageUuids = b.getStorageUuidsList();
+    final List<StorageTypeProto> storageTypes = b.getStorageTypesList();
     return new BlockWithLocations(convert(b.getBlock()),
     return new BlockWithLocations(convert(b.getBlock()),
         datanodeUuids.toArray(new String[datanodeUuids.size()]),
         datanodeUuids.toArray(new String[datanodeUuids.size()]),
-        storageUuids.toArray(new String[storageUuids.size()]));
+        storageUuids.toArray(new String[storageUuids.size()]),
+        convertStorageTypes(storageTypes, storageUuids.size()));
   }
   }
 
 
   public static BlocksWithLocationsProto convert(BlocksWithLocations blks) {
   public static BlocksWithLocationsProto convert(BlocksWithLocations blks) {
@@ -2122,11 +2126,11 @@ public class PBHelper {
     return castEnum(v, XATTR_NAMESPACE_VALUES);
     return castEnum(v, XATTR_NAMESPACE_VALUES);
   }
   }
 
 
-  private static FsActionProto convert(FsAction v) {
+  public static FsActionProto convert(FsAction v) {
     return FsActionProto.valueOf(v != null ? v.ordinal() : 0);
     return FsActionProto.valueOf(v != null ? v.ordinal() : 0);
   }
   }
 
 
-  private static FsAction convert(FsActionProto v) {
+  public static FsAction convert(FsActionProto v) {
     return castEnum(v, FSACTION_VALUES);
     return castEnum(v, FSACTION_VALUES);
   }
   }
 
 

文件差异内容过多而无法显示
+ 368 - 249
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java


+ 61 - 23
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancingPolicy.java

@@ -18,7 +18,11 @@
 package org.apache.hadoop.hdfs.server.balancer;
 package org.apache.hadoop.hdfs.server.balancer;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.StorageType;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
+import org.apache.hadoop.hdfs.util.EnumCounters;
+import org.apache.hadoop.hdfs.util.EnumDoubles;
 
 
 /**
 /**
  * Balancing policy.
  * Balancing policy.
@@ -28,31 +32,43 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 abstract class BalancingPolicy {
 abstract class BalancingPolicy {
-  long totalCapacity;
-  long totalUsedSpace;
-  private double avgUtilization;
+  final EnumCounters<StorageType> totalCapacities
+      = new EnumCounters<StorageType>(StorageType.class);
+  final EnumCounters<StorageType> totalUsedSpaces
+      = new EnumCounters<StorageType>(StorageType.class);
+  final EnumDoubles<StorageType> avgUtilizations
+      = new EnumDoubles<StorageType>(StorageType.class);
 
 
   void reset() {
   void reset() {
-    totalCapacity = 0L;
-    totalUsedSpace = 0L;
-    avgUtilization = 0.0;
+    totalCapacities.reset();
+    totalUsedSpaces.reset();
+    avgUtilizations.reset();
   }
   }
 
 
   /** Get the policy name. */
   /** Get the policy name. */
   abstract String getName();
   abstract String getName();
 
 
   /** Accumulate used space and capacity. */
   /** Accumulate used space and capacity. */
-  abstract void accumulateSpaces(DatanodeInfo d);
+  abstract void accumulateSpaces(DatanodeStorageReport r);
 
 
   void initAvgUtilization() {
   void initAvgUtilization() {
-    this.avgUtilization = totalUsedSpace*100.0/totalCapacity;
+    for(StorageType t : StorageType.asList()) {
+      final long capacity = totalCapacities.get(t);
+      if (capacity > 0L) {
+        final double avg  = totalUsedSpaces.get(t)*100.0/capacity;
+        avgUtilizations.set(t, avg);
+      }
+    }
   }
   }
-  double getAvgUtilization() {
-    return avgUtilization;
+
+  double getAvgUtilization(StorageType t) {
+    return avgUtilizations.get(t);
   }
   }
 
 
-  /** Return the utilization of a datanode */
-  abstract double getUtilization(DatanodeInfo d);
+  /** @return the utilization of a particular storage type of a datanode;
+   *          or return null if the datanode does not have such storage type.
+   */
+  abstract Double getUtilization(DatanodeStorageReport r, StorageType t);
   
   
   @Override
   @Override
   public String toString() {
   public String toString() {
@@ -84,14 +100,25 @@ abstract class BalancingPolicy {
     }
     }
 
 
     @Override
     @Override
-    void accumulateSpaces(DatanodeInfo d) {
-      totalCapacity += d.getCapacity();
-      totalUsedSpace += d.getDfsUsed();  
+    void accumulateSpaces(DatanodeStorageReport r) {
+      for(StorageReport s : r.getStorageReports()) {
+        final StorageType t = s.getStorage().getStorageType();
+        totalCapacities.add(t, s.getCapacity());
+        totalUsedSpaces.add(t, s.getDfsUsed());
+      }
     }
     }
     
     
     @Override
     @Override
-    double getUtilization(DatanodeInfo d) {
-      return d.getDfsUsed()*100.0/d.getCapacity();
+    Double getUtilization(DatanodeStorageReport r, final StorageType t) {
+      long capacity = 0L;
+      long dfsUsed = 0L;
+      for(StorageReport s : r.getStorageReports()) {
+        if (s.getStorage().getStorageType() == t) {
+          capacity += s.getCapacity();
+          dfsUsed += s.getDfsUsed();
+        }
+      }
+      return capacity == 0L? null: dfsUsed*100.0/capacity;
     }
     }
   }
   }
 
 
@@ -108,14 +135,25 @@ abstract class BalancingPolicy {
     }
     }
 
 
     @Override
     @Override
-    void accumulateSpaces(DatanodeInfo d) {
-      totalCapacity += d.getCapacity();
-      totalUsedSpace += d.getBlockPoolUsed();  
+    void accumulateSpaces(DatanodeStorageReport r) {
+      for(StorageReport s : r.getStorageReports()) {
+        final StorageType t = s.getStorage().getStorageType();
+        totalCapacities.add(t, s.getCapacity());
+        totalUsedSpaces.add(t, s.getBlockPoolUsed());
+      }
     }
     }
 
 
     @Override
     @Override
-    double getUtilization(DatanodeInfo d) {
-      return d.getBlockPoolUsed()*100.0/d.getCapacity();
+    Double getUtilization(DatanodeStorageReport r, final StorageType t) {
+      long capacity = 0L;
+      long blockPoolUsed = 0L;
+      for(StorageReport s : r.getStorageReports()) {
+        if (s.getStorage().getStorageType() == t) {
+          capacity += s.getCapacity();
+          blockPoolUsed += s.getBlockPoolUsed();
+        }
+      }
+      return capacity == 0L? null: blockPoolUsed*100.0/capacity;
     }
     }
   }
   }
 }
 }

+ 103 - 71
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -1082,6 +1082,7 @@ public class BlockManager {
    * Mark the block belonging to datanode as corrupt
    * Mark the block belonging to datanode as corrupt
    * @param blk Block to be marked as corrupt
    * @param blk Block to be marked as corrupt
    * @param dn Datanode which holds the corrupt replica
    * @param dn Datanode which holds the corrupt replica
+   * @param storageID if known, null otherwise.
    * @param reason a textual reason why the block should be marked corrupt,
    * @param reason a textual reason why the block should be marked corrupt,
    * for logging purposes
    * for logging purposes
    */
    */
@@ -1098,19 +1099,29 @@ public class BlockManager {
           + blk + " not found");
           + blk + " not found");
       return;
       return;
     }
     }
-    markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock,
-        blk.getGenerationStamp(), reason, Reason.CORRUPTION_REPORTED),
-        dn, storageID);
-  }
 
 
-  private void markBlockAsCorrupt(BlockToMarkCorrupt b,
-      DatanodeInfo dn, String storageID) throws IOException {
     DatanodeDescriptor node = getDatanodeManager().getDatanode(dn);
     DatanodeDescriptor node = getDatanodeManager().getDatanode(dn);
     if (node == null) {
     if (node == null) {
-      throw new IOException("Cannot mark " + b
+      throw new IOException("Cannot mark " + blk
           + " as corrupt because datanode " + dn + " (" + dn.getDatanodeUuid()
           + " as corrupt because datanode " + dn + " (" + dn.getDatanodeUuid()
           + ") does not exist");
           + ") does not exist");
     }
     }
+    
+    markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock,
+            blk.getGenerationStamp(), reason, Reason.CORRUPTION_REPORTED),
+        storageID == null ? null : node.getStorageInfo(storageID),
+        node);
+  }
+
+  /**
+   * 
+   * @param b
+   * @param storageInfo storage that contains the block, if known. null otherwise.
+   * @throws IOException
+   */
+  private void markBlockAsCorrupt(BlockToMarkCorrupt b,
+      DatanodeStorageInfo storageInfo,
+      DatanodeDescriptor node) throws IOException {
 
 
     BlockCollection bc = b.corrupted.getBlockCollection();
     BlockCollection bc = b.corrupted.getBlockCollection();
     if (bc == null) {
     if (bc == null) {
@@ -1121,7 +1132,9 @@ public class BlockManager {
     } 
     } 
 
 
     // Add replica to the data-node if it is not already there
     // Add replica to the data-node if it is not already there
-    node.addBlock(storageID, b.stored);
+    if (storageInfo != null) {
+      storageInfo.addBlock(b.stored);
+    }
 
 
     // Add this replica to corruptReplicas Map
     // Add this replica to corruptReplicas Map
     corruptReplicas.addToCorruptReplicasMap(b.corrupted, node, b.reason,
     corruptReplicas.addToCorruptReplicasMap(b.corrupted, node, b.reason,
@@ -1460,7 +1473,7 @@ public class BlockManager {
    * @throws IOException
    * @throws IOException
    *           if the number of targets < minimum replication.
    *           if the number of targets < minimum replication.
    * @see BlockPlacementPolicy#chooseTarget(String, int, Node,
    * @see BlockPlacementPolicy#chooseTarget(String, int, Node,
-   *      List, boolean, Set, long)
+   *      List, boolean, Set, long, StorageType)
    */
    */
   public DatanodeStorageInfo[] chooseTarget(final String src,
   public DatanodeStorageInfo[] chooseTarget(final String src,
       final int numOfReplicas, final DatanodeDescriptor client,
       final int numOfReplicas, final DatanodeDescriptor client,
@@ -1697,7 +1710,7 @@ public class BlockManager {
    * @throws IOException
    * @throws IOException
    */
    */
   public boolean processReport(final DatanodeID nodeID,
   public boolean processReport(final DatanodeID nodeID,
-      final DatanodeStorage storage, final String poolId,
+      final DatanodeStorage storage,
       final BlockListAsLongs newReport) throws IOException {
       final BlockListAsLongs newReport) throws IOException {
     namesystem.writeLock();
     namesystem.writeLock();
     final long startTime = Time.now(); //after acquiring write lock
     final long startTime = Time.now(); //after acquiring write lock
@@ -1729,9 +1742,9 @@ public class BlockManager {
       if (storageInfo.numBlocks() == 0) {
       if (storageInfo.numBlocks() == 0) {
         // The first block report can be processed a lot more efficiently than
         // The first block report can be processed a lot more efficiently than
         // ordinary block reports.  This shortens restart times.
         // ordinary block reports.  This shortens restart times.
-        processFirstBlockReport(node, storage.getStorageID(), newReport);
+        processFirstBlockReport(storageInfo, newReport);
       } else {
       } else {
-        processReport(node, storage, newReport);
+        processReport(storageInfo, newReport);
       }
       }
       
       
       // Now that we have an up-to-date block report, we know that any
       // Now that we have an up-to-date block report, we know that any
@@ -1793,9 +1806,8 @@ public class BlockManager {
     }
     }
   }
   }
   
   
-  private void processReport(final DatanodeDescriptor node,
-      final DatanodeStorage storage,
-      final BlockListAsLongs report) throws IOException {
+  private void processReport(final DatanodeStorageInfo storageInfo,
+                             final BlockListAsLongs report) throws IOException {
     // Normal case:
     // Normal case:
     // Modify the (block-->datanode) map, according to the difference
     // Modify the (block-->datanode) map, according to the difference
     // between the old and new block report.
     // between the old and new block report.
@@ -1805,19 +1817,20 @@ public class BlockManager {
     Collection<Block> toInvalidate = new LinkedList<Block>();
     Collection<Block> toInvalidate = new LinkedList<Block>();
     Collection<BlockToMarkCorrupt> toCorrupt = new LinkedList<BlockToMarkCorrupt>();
     Collection<BlockToMarkCorrupt> toCorrupt = new LinkedList<BlockToMarkCorrupt>();
     Collection<StatefulBlockInfo> toUC = new LinkedList<StatefulBlockInfo>();
     Collection<StatefulBlockInfo> toUC = new LinkedList<StatefulBlockInfo>();
-    reportDiff(node, storage, report,
+    reportDiff(storageInfo, report,
         toAdd, toRemove, toInvalidate, toCorrupt, toUC);
         toAdd, toRemove, toInvalidate, toCorrupt, toUC);
-
+   
+    DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
     // Process the blocks on each queue
     // Process the blocks on each queue
     for (StatefulBlockInfo b : toUC) { 
     for (StatefulBlockInfo b : toUC) { 
-      addStoredBlockUnderConstruction(b, node, storage.getStorageID());
+      addStoredBlockUnderConstruction(b, storageInfo);
     }
     }
     for (Block b : toRemove) {
     for (Block b : toRemove) {
       removeStoredBlock(b, node);
       removeStoredBlock(b, node);
     }
     }
     int numBlocksLogged = 0;
     int numBlocksLogged = 0;
     for (BlockInfo b : toAdd) {
     for (BlockInfo b : toAdd) {
-      addStoredBlock(b, node, storage.getStorageID(), null, numBlocksLogged < maxNumBlocksToLog);
+      addStoredBlock(b, storageInfo, null, numBlocksLogged < maxNumBlocksToLog);
       numBlocksLogged++;
       numBlocksLogged++;
     }
     }
     if (numBlocksLogged > maxNumBlocksToLog) {
     if (numBlocksLogged > maxNumBlocksToLog) {
@@ -1831,7 +1844,7 @@ public class BlockManager {
       addToInvalidates(b, node);
       addToInvalidates(b, node);
     }
     }
     for (BlockToMarkCorrupt b : toCorrupt) {
     for (BlockToMarkCorrupt b : toCorrupt) {
-      markBlockAsCorrupt(b, node, storage.getStorageID());
+      markBlockAsCorrupt(b, storageInfo, node);
     }
     }
   }
   }
 
 
@@ -1842,16 +1855,16 @@ public class BlockManager {
    * a toRemove list (since there won't be any).  It also silently discards 
    * a toRemove list (since there won't be any).  It also silently discards 
    * any invalid blocks, thereby deferring their processing until 
    * any invalid blocks, thereby deferring their processing until 
    * the next block report.
    * the next block report.
-   * @param node - DatanodeDescriptor of the node that sent the report
+   * @param storageInfo - DatanodeStorageInfo that sent the report
    * @param report - the initial block report, to be processed
    * @param report - the initial block report, to be processed
    * @throws IOException 
    * @throws IOException 
    */
    */
-  private void processFirstBlockReport(final DatanodeDescriptor node,
-      final String storageID,
+  private void processFirstBlockReport(
+      final DatanodeStorageInfo storageInfo,
       final BlockListAsLongs report) throws IOException {
       final BlockListAsLongs report) throws IOException {
     if (report == null) return;
     if (report == null) return;
     assert (namesystem.hasWriteLock());
     assert (namesystem.hasWriteLock());
-    assert (node.getStorageInfo(storageID).numBlocks() == 0);
+    assert (storageInfo.numBlocks() == 0);
     BlockReportIterator itBR = report.getBlockReportIterator();
     BlockReportIterator itBR = report.getBlockReportIterator();
 
 
     while(itBR.hasNext()) {
     while(itBR.hasNext()) {
@@ -1860,7 +1873,7 @@ public class BlockManager {
       
       
       if (shouldPostponeBlocksFromFuture &&
       if (shouldPostponeBlocksFromFuture &&
           namesystem.isGenStampInFuture(iblk)) {
           namesystem.isGenStampInFuture(iblk)) {
-        queueReportedBlock(node, storageID, iblk, reportedState,
+        queueReportedBlock(storageInfo, iblk, reportedState,
             QUEUE_REASON_FUTURE_GENSTAMP);
             QUEUE_REASON_FUTURE_GENSTAMP);
         continue;
         continue;
       }
       }
@@ -1872,15 +1885,16 @@ public class BlockManager {
       // If block is corrupt, mark it and continue to next block.
       // If block is corrupt, mark it and continue to next block.
       BlockUCState ucState = storedBlock.getBlockUCState();
       BlockUCState ucState = storedBlock.getBlockUCState();
       BlockToMarkCorrupt c = checkReplicaCorrupt(
       BlockToMarkCorrupt c = checkReplicaCorrupt(
-          iblk, reportedState, storedBlock, ucState, node);
+          iblk, reportedState, storedBlock, ucState,
+          storageInfo.getDatanodeDescriptor());
       if (c != null) {
       if (c != null) {
         if (shouldPostponeBlocksFromFuture) {
         if (shouldPostponeBlocksFromFuture) {
           // In the Standby, we may receive a block report for a file that we
           // In the Standby, we may receive a block report for a file that we
           // just have an out-of-date gen-stamp or state for, for example.
           // just have an out-of-date gen-stamp or state for, for example.
-          queueReportedBlock(node, storageID, iblk, reportedState,
+          queueReportedBlock(storageInfo, iblk, reportedState,
               QUEUE_REASON_CORRUPT_STATE);
               QUEUE_REASON_CORRUPT_STATE);
         } else {
         } else {
-          markBlockAsCorrupt(c, node, storageID);
+          markBlockAsCorrupt(c, storageInfo, storageInfo.getDatanodeDescriptor());
         }
         }
         continue;
         continue;
       }
       }
@@ -1888,7 +1902,7 @@ public class BlockManager {
       // If block is under construction, add this replica to its list
       // If block is under construction, add this replica to its list
       if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) {
       if (isBlockUnderConstruction(storedBlock, ucState, reportedState)) {
         ((BlockInfoUnderConstruction)storedBlock).addReplicaIfNotPresent(
         ((BlockInfoUnderConstruction)storedBlock).addReplicaIfNotPresent(
-            node.getStorageInfo(storageID), iblk, reportedState);
+            storageInfo, iblk, reportedState);
         // OpenFileBlocks only inside snapshots also will be added to safemode
         // OpenFileBlocks only inside snapshots also will be added to safemode
         // threshold. So we need to update such blocks to safemode
         // threshold. So we need to update such blocks to safemode
         // refer HDFS-5283
         // refer HDFS-5283
@@ -1901,12 +1915,12 @@ public class BlockManager {
       }      
       }      
       //add replica if appropriate
       //add replica if appropriate
       if (reportedState == ReplicaState.FINALIZED) {
       if (reportedState == ReplicaState.FINALIZED) {
-        addStoredBlockImmediate(storedBlock, node, storageID);
+        addStoredBlockImmediate(storedBlock, storageInfo);
       }
       }
     }
     }
   }
   }
 
 
-  private void reportDiff(DatanodeDescriptor dn, DatanodeStorage storage, 
+  private void reportDiff(DatanodeStorageInfo storageInfo, 
       BlockListAsLongs newReport, 
       BlockListAsLongs newReport, 
       Collection<BlockInfo> toAdd,              // add to DatanodeDescriptor
       Collection<BlockInfo> toAdd,              // add to DatanodeDescriptor
       Collection<Block> toRemove,           // remove from DatanodeDescriptor
       Collection<Block> toRemove,           // remove from DatanodeDescriptor
@@ -1914,8 +1928,6 @@ public class BlockManager {
       Collection<BlockToMarkCorrupt> toCorrupt, // add to corrupt replicas list
       Collection<BlockToMarkCorrupt> toCorrupt, // add to corrupt replicas list
       Collection<StatefulBlockInfo> toUC) { // add to under-construction list
       Collection<StatefulBlockInfo> toUC) { // add to under-construction list
 
 
-    final DatanodeStorageInfo storageInfo = dn.getStorageInfo(storage.getStorageID());
-
     // place a delimiter in the list which separates blocks 
     // place a delimiter in the list which separates blocks 
     // that have been reported from those that have not
     // that have been reported from those that have not
     BlockInfo delimiter = new BlockInfo(new Block(), 1);
     BlockInfo delimiter = new BlockInfo(new Block(), 1);
@@ -1932,7 +1944,7 @@ public class BlockManager {
     while(itBR.hasNext()) {
     while(itBR.hasNext()) {
       Block iblk = itBR.next();
       Block iblk = itBR.next();
       ReplicaState iState = itBR.getCurrentReplicaState();
       ReplicaState iState = itBR.getCurrentReplicaState();
-      BlockInfo storedBlock = processReportedBlock(dn, storage.getStorageID(),
+      BlockInfo storedBlock = processReportedBlock(storageInfo,
           iblk, iState, toAdd, toInvalidate, toCorrupt, toUC);
           iblk, iState, toAdd, toInvalidate, toCorrupt, toUC);
 
 
       // move block to the head of the list
       // move block to the head of the list
@@ -1969,7 +1981,7 @@ public class BlockManager {
    * BlockInfoUnderConstruction's list of replicas.</li>
    * BlockInfoUnderConstruction's list of replicas.</li>
    * </ol>
    * </ol>
    * 
    * 
-   * @param dn descriptor for the datanode that made the report
+   * @param storageInfo DatanodeStorageInfo that sent the report.
    * @param block reported block replica
    * @param block reported block replica
    * @param reportedState reported replica state
    * @param reportedState reported replica state
    * @param toAdd add to DatanodeDescriptor
    * @param toAdd add to DatanodeDescriptor
@@ -1981,14 +1993,16 @@ public class BlockManager {
    * @return the up-to-date stored block, if it should be kept.
    * @return the up-to-date stored block, if it should be kept.
    *         Otherwise, null.
    *         Otherwise, null.
    */
    */
-  private BlockInfo processReportedBlock(final DatanodeDescriptor dn,
-      final String storageID,
+  private BlockInfo processReportedBlock(
+      final DatanodeStorageInfo storageInfo,
       final Block block, final ReplicaState reportedState, 
       final Block block, final ReplicaState reportedState, 
       final Collection<BlockInfo> toAdd, 
       final Collection<BlockInfo> toAdd, 
       final Collection<Block> toInvalidate, 
       final Collection<Block> toInvalidate, 
       final Collection<BlockToMarkCorrupt> toCorrupt,
       final Collection<BlockToMarkCorrupt> toCorrupt,
       final Collection<StatefulBlockInfo> toUC) {
       final Collection<StatefulBlockInfo> toUC) {
     
     
+    DatanodeDescriptor dn = storageInfo.getDatanodeDescriptor();
+
     if(LOG.isDebugEnabled()) {
     if(LOG.isDebugEnabled()) {
       LOG.debug("Reported block " + block
       LOG.debug("Reported block " + block
           + " on " + dn + " size " + block.getNumBytes()
           + " on " + dn + " size " + block.getNumBytes()
@@ -1997,7 +2011,7 @@ public class BlockManager {
   
   
     if (shouldPostponeBlocksFromFuture &&
     if (shouldPostponeBlocksFromFuture &&
         namesystem.isGenStampInFuture(block)) {
         namesystem.isGenStampInFuture(block)) {
-      queueReportedBlock(dn, storageID, block, reportedState,
+      queueReportedBlock(storageInfo, block, reportedState,
           QUEUE_REASON_FUTURE_GENSTAMP);
           QUEUE_REASON_FUTURE_GENSTAMP);
       return null;
       return null;
     }
     }
@@ -2037,7 +2051,7 @@ public class BlockManager {
         // TODO: Pretty confident this should be s/storedBlock/block below,
         // TODO: Pretty confident this should be s/storedBlock/block below,
         // since we should be postponing the info of the reported block, not
         // since we should be postponing the info of the reported block, not
         // the stored block. See HDFS-6289 for more context.
         // the stored block. See HDFS-6289 for more context.
-        queueReportedBlock(dn, storageID, storedBlock, reportedState,
+        queueReportedBlock(storageInfo, storedBlock, reportedState,
             QUEUE_REASON_CORRUPT_STATE);
             QUEUE_REASON_CORRUPT_STATE);
       } else {
       } else {
         toCorrupt.add(c);
         toCorrupt.add(c);
@@ -2066,17 +2080,17 @@ public class BlockManager {
    * standby node. @see PendingDataNodeMessages.
    * standby node. @see PendingDataNodeMessages.
    * @param reason a textual reason to report in the debug logs
    * @param reason a textual reason to report in the debug logs
    */
    */
-  private void queueReportedBlock(DatanodeDescriptor dn, String storageID, Block block,
+  private void queueReportedBlock(DatanodeStorageInfo storageInfo, Block block,
       ReplicaState reportedState, String reason) {
       ReplicaState reportedState, String reason) {
     assert shouldPostponeBlocksFromFuture;
     assert shouldPostponeBlocksFromFuture;
     
     
     if (LOG.isDebugEnabled()) {
     if (LOG.isDebugEnabled()) {
       LOG.debug("Queueing reported block " + block +
       LOG.debug("Queueing reported block " + block +
           " in state " + reportedState + 
           " in state " + reportedState + 
-          " from datanode " + dn + " for later processing " +
-          "because " + reason + ".");
+          " from datanode " + storageInfo.getDatanodeDescriptor() +
+          " for later processing because " + reason + ".");
     }
     }
-    pendingDNMessages.enqueueReportedBlock(dn, storageID, block, reportedState);
+    pendingDNMessages.enqueueReportedBlock(storageInfo, block, reportedState);
   }
   }
 
 
   /**
   /**
@@ -2099,7 +2113,7 @@ public class BlockManager {
       if (LOG.isDebugEnabled()) {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Processing previouly queued message " + rbi);
         LOG.debug("Processing previouly queued message " + rbi);
       }
       }
-      processAndHandleReportedBlock(rbi.getNode(), rbi.getStorageID(), 
+      processAndHandleReportedBlock(rbi.getStorageInfo(), 
           rbi.getBlock(), rbi.getReportedState(), null);
           rbi.getBlock(), rbi.getReportedState(), null);
     }
     }
   }
   }
@@ -2156,6 +2170,16 @@ public class BlockManager {
         } else {
         } else {
           return null; // not corrupt
           return null; // not corrupt
         }
         }
+      case UNDER_CONSTRUCTION:
+        if (storedBlock.getGenerationStamp() > reported.getGenerationStamp()) {
+          final long reportedGS = reported.getGenerationStamp();
+          return new BlockToMarkCorrupt(storedBlock, reportedGS, "block is "
+              + ucState + " and reported state " + reportedState
+              + ", But reported genstamp " + reportedGS
+              + " does not match genstamp in block map "
+              + storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH);
+        }
+        return null;
       default:
       default:
         return null;
         return null;
       }
       }
@@ -2219,19 +2243,20 @@ public class BlockManager {
   }
   }
 
 
   void addStoredBlockUnderConstruction(StatefulBlockInfo ucBlock,
   void addStoredBlockUnderConstruction(StatefulBlockInfo ucBlock,
-      DatanodeDescriptor node, String storageID) throws IOException {
+      DatanodeStorageInfo storageInfo) throws IOException {
     BlockInfoUnderConstruction block = ucBlock.storedBlock;
     BlockInfoUnderConstruction block = ucBlock.storedBlock;
-    block.addReplicaIfNotPresent(node.getStorageInfo(storageID),
-        ucBlock.reportedBlock, ucBlock.reportedState);
+    block.addReplicaIfNotPresent(
+        storageInfo, ucBlock.reportedBlock, ucBlock.reportedState);
 
 
-    if (ucBlock.reportedState == ReplicaState.FINALIZED && block.findDatanode(node) < 0) {
-      addStoredBlock(block, node, storageID, null, true);
+    if (ucBlock.reportedState == ReplicaState.FINALIZED &&
+        block.findDatanode(storageInfo.getDatanodeDescriptor()) < 0) {
+      addStoredBlock(block, storageInfo, null, true);
     }
     }
   } 
   } 
 
 
   /**
   /**
    * Faster version of
    * Faster version of
-   * {@link #addStoredBlock(BlockInfo, DatanodeDescriptor, String, DatanodeDescriptor, boolean)}
+   * {@link #addStoredBlock(BlockInfo, DatanodeStorageInfo, DatanodeDescriptor, boolean)}
    * , intended for use with initial block report at startup. If not in startup
    * , intended for use with initial block report at startup. If not in startup
    * safe mode, will call standard addStoredBlock(). Assumes this method is
    * safe mode, will call standard addStoredBlock(). Assumes this method is
    * called "immediately" so there is no need to refresh the storedBlock from
    * called "immediately" so there is no need to refresh the storedBlock from
@@ -2242,17 +2267,17 @@ public class BlockManager {
    * @throws IOException
    * @throws IOException
    */
    */
   private void addStoredBlockImmediate(BlockInfo storedBlock,
   private void addStoredBlockImmediate(BlockInfo storedBlock,
-      DatanodeDescriptor node, String storageID)
+      DatanodeStorageInfo storageInfo)
   throws IOException {
   throws IOException {
     assert (storedBlock != null && namesystem.hasWriteLock());
     assert (storedBlock != null && namesystem.hasWriteLock());
     if (!namesystem.isInStartupSafeMode() 
     if (!namesystem.isInStartupSafeMode() 
         || namesystem.isPopulatingReplQueues()) {
         || namesystem.isPopulatingReplQueues()) {
-      addStoredBlock(storedBlock, node, storageID, null, false);
+      addStoredBlock(storedBlock, storageInfo, null, false);
       return;
       return;
     }
     }
 
 
     // just add it
     // just add it
-    node.addBlock(storageID, storedBlock);
+    storageInfo.addBlock(storedBlock);
 
 
     // Now check for completion of blocks and safe block count
     // Now check for completion of blocks and safe block count
     int numCurrentReplica = countLiveNodes(storedBlock);
     int numCurrentReplica = countLiveNodes(storedBlock);
@@ -2274,13 +2299,13 @@ public class BlockManager {
    * @return the block that is stored in blockMap.
    * @return the block that is stored in blockMap.
    */
    */
   private Block addStoredBlock(final BlockInfo block,
   private Block addStoredBlock(final BlockInfo block,
-                               DatanodeDescriptor node,
-                               String storageID,
+                               DatanodeStorageInfo storageInfo,
                                DatanodeDescriptor delNodeHint,
                                DatanodeDescriptor delNodeHint,
                                boolean logEveryBlock)
                                boolean logEveryBlock)
   throws IOException {
   throws IOException {
     assert block != null && namesystem.hasWriteLock();
     assert block != null && namesystem.hasWriteLock();
     BlockInfo storedBlock;
     BlockInfo storedBlock;
+    DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
     if (block instanceof BlockInfoUnderConstruction) {
     if (block instanceof BlockInfoUnderConstruction) {
       //refresh our copy in case the block got completed in another thread
       //refresh our copy in case the block got completed in another thread
       storedBlock = blocksMap.getStoredBlock(block);
       storedBlock = blocksMap.getStoredBlock(block);
@@ -2300,7 +2325,7 @@ public class BlockManager {
     assert bc != null : "Block must belong to a file";
     assert bc != null : "Block must belong to a file";
 
 
     // add block to the datanode
     // add block to the datanode
-    boolean added = node.addBlock(storageID, storedBlock);
+    boolean added = storageInfo.addBlock(storedBlock);
 
 
     int curReplicaDelta;
     int curReplicaDelta;
     if (added) {
     if (added) {
@@ -2829,12 +2854,15 @@ public class BlockManager {
     } else {
     } else {
       final String[] datanodeUuids = new String[locations.size()];
       final String[] datanodeUuids = new String[locations.size()];
       final String[] storageIDs = new String[datanodeUuids.length];
       final String[] storageIDs = new String[datanodeUuids.length];
+      final StorageType[] storageTypes = new StorageType[datanodeUuids.length];
       for(int i = 0; i < locations.size(); i++) {
       for(int i = 0; i < locations.size(); i++) {
         final DatanodeStorageInfo s = locations.get(i);
         final DatanodeStorageInfo s = locations.get(i);
         datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid();
         datanodeUuids[i] = s.getDatanodeDescriptor().getDatanodeUuid();
         storageIDs[i] = s.getStorageID();
         storageIDs[i] = s.getStorageID();
+        storageTypes[i] = s.getStorageType();
       }
       }
-      results.add(new BlockWithLocations(block, datanodeUuids, storageIDs));
+      results.add(new BlockWithLocations(block, datanodeUuids, storageIDs,
+          storageTypes));
       return block.getNumBytes();
       return block.getNumBytes();
     }
     }
   }
   }
@@ -2843,8 +2871,9 @@ public class BlockManager {
    * The given node is reporting that it received a certain block.
    * The given node is reporting that it received a certain block.
    */
    */
   @VisibleForTesting
   @VisibleForTesting
-  void addBlock(DatanodeDescriptor node, String storageID, Block block, String delHint)
+  void addBlock(DatanodeStorageInfo storageInfo, Block block, String delHint)
       throws IOException {
       throws IOException {
+    DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
     // Decrement number of blocks scheduled to this datanode.
     // Decrement number of blocks scheduled to this datanode.
     // for a retry request (of DatanodeProtocol#blockReceivedAndDeleted with 
     // for a retry request (of DatanodeProtocol#blockReceivedAndDeleted with 
     // RECEIVED_BLOCK), we currently also decrease the approximate number. 
     // RECEIVED_BLOCK), we currently also decrease the approximate number. 
@@ -2864,12 +2893,12 @@ public class BlockManager {
     // Modify the blocks->datanode map and node's map.
     // Modify the blocks->datanode map and node's map.
     //
     //
     pendingReplications.decrement(block, node);
     pendingReplications.decrement(block, node);
-    processAndHandleReportedBlock(node, storageID, block, ReplicaState.FINALIZED,
+    processAndHandleReportedBlock(storageInfo, block, ReplicaState.FINALIZED,
         delHintNode);
         delHintNode);
   }
   }
   
   
-  private void processAndHandleReportedBlock(DatanodeDescriptor node,
-      String storageID, Block block,
+  private void processAndHandleReportedBlock(
+      DatanodeStorageInfo storageInfo, Block block,
       ReplicaState reportedState, DatanodeDescriptor delHintNode)
       ReplicaState reportedState, DatanodeDescriptor delHintNode)
       throws IOException {
       throws IOException {
     // blockReceived reports a finalized block
     // blockReceived reports a finalized block
@@ -2877,7 +2906,9 @@ public class BlockManager {
     Collection<Block> toInvalidate = new LinkedList<Block>();
     Collection<Block> toInvalidate = new LinkedList<Block>();
     Collection<BlockToMarkCorrupt> toCorrupt = new LinkedList<BlockToMarkCorrupt>();
     Collection<BlockToMarkCorrupt> toCorrupt = new LinkedList<BlockToMarkCorrupt>();
     Collection<StatefulBlockInfo> toUC = new LinkedList<StatefulBlockInfo>();
     Collection<StatefulBlockInfo> toUC = new LinkedList<StatefulBlockInfo>();
-    processReportedBlock(node, storageID, block, reportedState,
+    final DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
+
+    processReportedBlock(storageInfo, block, reportedState,
                               toAdd, toInvalidate, toCorrupt, toUC);
                               toAdd, toInvalidate, toCorrupt, toUC);
     // the block is only in one of the to-do lists
     // the block is only in one of the to-do lists
     // if it is in none then data-node already has it
     // if it is in none then data-node already has it
@@ -2885,11 +2916,11 @@ public class BlockManager {
       : "The block should be only in one of the lists.";
       : "The block should be only in one of the lists.";
 
 
     for (StatefulBlockInfo b : toUC) { 
     for (StatefulBlockInfo b : toUC) { 
-      addStoredBlockUnderConstruction(b, node, storageID);
+      addStoredBlockUnderConstruction(b, storageInfo);
     }
     }
     long numBlocksLogged = 0;
     long numBlocksLogged = 0;
     for (BlockInfo b : toAdd) {
     for (BlockInfo b : toAdd) {
-      addStoredBlock(b, node, storageID, delHintNode, numBlocksLogged < maxNumBlocksToLog);
+      addStoredBlock(b, storageInfo, delHintNode, numBlocksLogged < maxNumBlocksToLog);
       numBlocksLogged++;
       numBlocksLogged++;
     }
     }
     if (numBlocksLogged > maxNumBlocksToLog) {
     if (numBlocksLogged > maxNumBlocksToLog) {
@@ -2903,7 +2934,7 @@ public class BlockManager {
       addToInvalidates(b, node);
       addToInvalidates(b, node);
     }
     }
     for (BlockToMarkCorrupt b : toCorrupt) {
     for (BlockToMarkCorrupt b : toCorrupt) {
-      markBlockAsCorrupt(b, node, storageID);
+      markBlockAsCorrupt(b, storageInfo, node);
     }
     }
   }
   }
 
 
@@ -2930,13 +2961,15 @@ public class BlockManager {
           "Got incremental block report from unregistered or dead node");
           "Got incremental block report from unregistered or dead node");
     }
     }
 
 
-    if (node.getStorageInfo(srdb.getStorage().getStorageID()) == null) {
+    DatanodeStorageInfo storageInfo =
+        node.getStorageInfo(srdb.getStorage().getStorageID());
+    if (storageInfo == null) {
       // The DataNode is reporting an unknown storage. Usually the NN learns
       // The DataNode is reporting an unknown storage. Usually the NN learns
       // about new storages from heartbeats but during NN restart we may
       // about new storages from heartbeats but during NN restart we may
       // receive a block report or incremental report before the heartbeat.
       // receive a block report or incremental report before the heartbeat.
       // We must handle this for protocol compatibility. This issue was
       // We must handle this for protocol compatibility. This issue was
       // uncovered by HDFS-6094.
       // uncovered by HDFS-6094.
-      node.updateStorage(srdb.getStorage());
+      storageInfo = node.updateStorage(srdb.getStorage());
     }
     }
 
 
     for (ReceivedDeletedBlockInfo rdbi : srdb.getBlocks()) {
     for (ReceivedDeletedBlockInfo rdbi : srdb.getBlocks()) {
@@ -2946,14 +2979,13 @@ public class BlockManager {
         deleted++;
         deleted++;
         break;
         break;
       case RECEIVED_BLOCK:
       case RECEIVED_BLOCK:
-        addBlock(node, srdb.getStorage().getStorageID(),
-            rdbi.getBlock(), rdbi.getDelHints());
+        addBlock(storageInfo, rdbi.getBlock(), rdbi.getDelHints());
         received++;
         received++;
         break;
         break;
       case RECEIVING_BLOCK:
       case RECEIVING_BLOCK:
         receiving++;
         receiving++;
-        processAndHandleReportedBlock(node, srdb.getStorage().getStorageID(),
-            rdbi.getBlock(), ReplicaState.RBW, null);
+        processAndHandleReportedBlock(storageInfo, rdbi.getBlock(),
+                                      ReplicaState.RBW, null);
         break;
         break;
       default:
       default:
         String msg = 
         String msg = 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java

@@ -260,8 +260,8 @@ public class DatanodeDescriptor extends DatanodeInfo {
   }
   }
 
 
   public StorageReport[] getStorageReports() {
   public StorageReport[] getStorageReports() {
-    final StorageReport[] reports = new StorageReport[storageMap.size()];
     final DatanodeStorageInfo[] infos = getStorageInfos();
     final DatanodeStorageInfo[] infos = getStorageInfos();
+    final StorageReport[] reports = new StorageReport[infos.length];
     for(int i = 0; i < infos.length; i++) {
     for(int i = 0; i < infos.length; i++) {
       reports[i] = infos[i].toStorageReport();
       reports[i] = infos[i].toStorageReport();
     }
     }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java

@@ -207,7 +207,7 @@ public class DatanodeStorageInfo {
     return blockPoolUsed;
     return blockPoolUsed;
   }
   }
 
 
-  boolean addBlock(BlockInfo b) {
+  public boolean addBlock(BlockInfo b) {
     if(!b.addStorage(this))
     if(!b.addStorage(this))
       return false;
       return false;
     // add to the head of the data-node list
     // add to the head of the data-node list

+ 13 - 17
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingDataNodeMessages.java

@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Maps;
 import com.google.common.collect.Maps;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 
 
 /**
 /**
  * In the Standby Node, we can receive messages about blocks
  * In the Standby Node, we can receive messages about blocks
@@ -41,14 +42,12 @@ class PendingDataNodeMessages {
     
     
   static class ReportedBlockInfo {
   static class ReportedBlockInfo {
     private final Block block;
     private final Block block;
-    private final DatanodeDescriptor dn;
-    private final String storageID;
+    private final DatanodeStorageInfo storageInfo;
     private final ReplicaState reportedState;
     private final ReplicaState reportedState;
 
 
-    ReportedBlockInfo(DatanodeDescriptor dn, String storageID, Block block,
+    ReportedBlockInfo(DatanodeStorageInfo storageInfo, Block block,
         ReplicaState reportedState) {
         ReplicaState reportedState) {
-      this.dn = dn;
-      this.storageID = storageID;
+      this.storageInfo = storageInfo;
       this.block = block;
       this.block = block;
       this.reportedState = reportedState;
       this.reportedState = reportedState;
     }
     }
@@ -57,21 +56,18 @@ class PendingDataNodeMessages {
       return block;
       return block;
     }
     }
 
 
-    DatanodeDescriptor getNode() {
-      return dn;
-    }
-    
-    String getStorageID() {
-      return storageID;
-    }
-
     ReplicaState getReportedState() {
     ReplicaState getReportedState() {
       return reportedState;
       return reportedState;
     }
     }
+    
+    DatanodeStorageInfo getStorageInfo() {
+      return storageInfo;
+    }
 
 
     @Override
     @Override
     public String toString() {
     public String toString() {
-      return "ReportedBlockInfo [block=" + block + ", dn=" + dn
+      return "ReportedBlockInfo [block=" + block + ", dn="
+          + storageInfo.getDatanodeDescriptor()
           + ", reportedState=" + reportedState + "]";
           + ", reportedState=" + reportedState + "]";
     }
     }
   }
   }
@@ -87,7 +83,7 @@ class PendingDataNodeMessages {
       Queue<ReportedBlockInfo> oldQueue = entry.getValue();
       Queue<ReportedBlockInfo> oldQueue = entry.getValue();
       while (!oldQueue.isEmpty()) {
       while (!oldQueue.isEmpty()) {
         ReportedBlockInfo rbi = oldQueue.remove();
         ReportedBlockInfo rbi = oldQueue.remove();
-        if (!rbi.getNode().equals(dn)) {
+        if (!rbi.getStorageInfo().getDatanodeDescriptor().equals(dn)) {
           newQueue.add(rbi);
           newQueue.add(rbi);
         } else {
         } else {
           count--;
           count--;
@@ -97,11 +93,11 @@ class PendingDataNodeMessages {
     }
     }
   }
   }
   
   
-  void enqueueReportedBlock(DatanodeDescriptor dn, String storageID, Block block,
+  void enqueueReportedBlock(DatanodeStorageInfo storageInfo, Block block,
       ReplicaState reportedState) {
       ReplicaState reportedState) {
     block = new Block(block);
     block = new Block(block);
     getBlockQueue(block).add(
     getBlockQueue(block).add(
-        new ReportedBlockInfo(dn, storageID, block, reportedState));
+        new ReportedBlockInfo(storageInfo, block, reportedState));
     count++;
     count++;
   }
   }
   
   

+ 188 - 115
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java

@@ -21,6 +21,7 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 import com.google.common.collect.Sets;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
@@ -38,6 +39,8 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.List;
 import java.util.Set;
 import java.util.Set;
 import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.CopyOnWriteArrayList;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 
 /**
 /**
  * One instance per block-pool/namespace on the DN, which handles the
  * One instance per block-pool/namespace on the DN, which handles the
@@ -91,6 +94,28 @@ class BPOfferService {
    */
    */
   private long lastActiveClaimTxId = -1;
   private long lastActiveClaimTxId = -1;
 
 
+  private final ReentrantReadWriteLock mReadWriteLock =
+      new ReentrantReadWriteLock();
+  private final Lock mReadLock  = mReadWriteLock.readLock();
+  private final Lock mWriteLock = mReadWriteLock.writeLock();
+
+  // utility methods to acquire and release read lock and write lock
+  void readLock() {
+    mReadLock.lock();
+  }
+
+  void readUnlock() {
+    mReadLock.unlock();
+  }
+
+  void writeLock() {
+    mWriteLock.lock();
+  }
+
+  void writeUnlock() {
+    mWriteLock.unlock();
+  }
+
   BPOfferService(List<InetSocketAddress> nnAddrs, DataNode dn) {
   BPOfferService(List<InetSocketAddress> nnAddrs, DataNode dn) {
     Preconditions.checkArgument(!nnAddrs.isEmpty(),
     Preconditions.checkArgument(!nnAddrs.isEmpty(),
         "Must pass at least one NN.");
         "Must pass at least one NN.");
@@ -135,14 +160,19 @@ class BPOfferService {
     }
     }
     return false;
     return false;
   }
   }
-  
-  synchronized String getBlockPoolId() {
-    if (bpNSInfo != null) {
-      return bpNSInfo.getBlockPoolID();
-    } else {
-      LOG.warn("Block pool ID needed, but service not yet registered with NN",
-          new Exception("trace"));
-      return null;
+
+  String getBlockPoolId() {
+    readLock();
+    try {
+      if (bpNSInfo != null) {
+        return bpNSInfo.getBlockPoolID();
+      } else {
+        LOG.warn("Block pool ID needed, but service not yet registered with NN",
+            new Exception("trace"));
+        return null;
+      }
+    } finally {
+      readUnlock();
     }
     }
   }
   }
 
 
@@ -150,27 +180,37 @@ class BPOfferService {
     return getNamespaceInfo() != null;
     return getNamespaceInfo() != null;
   }
   }
 
 
-  synchronized NamespaceInfo getNamespaceInfo() {
-    return bpNSInfo;
+  NamespaceInfo getNamespaceInfo() {
+    readLock();
+    try {
+      return bpNSInfo;
+    } finally {
+      readUnlock();
+    }
   }
   }
 
 
   @Override
   @Override
-  public synchronized String toString() {
-    if (bpNSInfo == null) {
-      // If we haven't yet connected to our NN, we don't yet know our
-      // own block pool ID.
-      // If _none_ of the block pools have connected yet, we don't even
-      // know the DatanodeID ID of this DN.
-      String datanodeUuid = dn.getDatanodeUuid();
-
-      if (datanodeUuid == null || datanodeUuid.isEmpty()) {
-        datanodeUuid = "unassigned";
+  public String toString() {
+    readLock();
+    try {
+      if (bpNSInfo == null) {
+        // If we haven't yet connected to our NN, we don't yet know our
+        // own block pool ID.
+        // If _none_ of the block pools have connected yet, we don't even
+        // know the DatanodeID ID of this DN.
+        String datanodeUuid = dn.getDatanodeUuid();
+
+        if (datanodeUuid == null || datanodeUuid.isEmpty()) {
+          datanodeUuid = "unassigned";
+        }
+        return "Block pool <registering> (Datanode Uuid " + datanodeUuid + ")";
+      } else {
+        return "Block pool " + getBlockPoolId() +
+            " (Datanode Uuid " + dn.getDatanodeUuid() +
+            ")";
       }
       }
-      return "Block pool <registering> (Datanode Uuid " + datanodeUuid + ")";
-    } else {
-      return "Block pool " + getBlockPoolId() +
-          " (Datanode Uuid " + dn.getDatanodeUuid() +
-          ")";
+    } finally {
+      readUnlock();
     }
     }
   }
   }
   
   
@@ -266,32 +306,37 @@ class BPOfferService {
    * verifies that this namespace matches (eg to prevent a misconfiguration
    * verifies that this namespace matches (eg to prevent a misconfiguration
    * where a StandbyNode from a different cluster is specified)
    * where a StandbyNode from a different cluster is specified)
    */
    */
-  synchronized void verifyAndSetNamespaceInfo(NamespaceInfo nsInfo) throws IOException {
-    if (this.bpNSInfo == null) {
-      this.bpNSInfo = nsInfo;
-      boolean success = false;
-
-      // Now that we know the namespace ID, etc, we can pass this to the DN.
-      // The DN can now initialize its local storage if we are the
-      // first BP to handshake, etc.
-      try {
-        dn.initBlockPool(this);
-        success = true;
-      } finally {
-        if (!success) {
-          // The datanode failed to initialize the BP. We need to reset
-          // the namespace info so that other BPService actors still have
-          // a chance to set it, and re-initialize the datanode.
-          this.bpNSInfo = null;
+  void verifyAndSetNamespaceInfo(NamespaceInfo nsInfo) throws IOException {
+    writeLock();
+    try {
+      if (this.bpNSInfo == null) {
+        this.bpNSInfo = nsInfo;
+        boolean success = false;
+
+        // Now that we know the namespace ID, etc, we can pass this to the DN.
+        // The DN can now initialize its local storage if we are the
+        // first BP to handshake, etc.
+        try {
+          dn.initBlockPool(this);
+          success = true;
+        } finally {
+          if (!success) {
+            // The datanode failed to initialize the BP. We need to reset
+            // the namespace info so that other BPService actors still have
+            // a chance to set it, and re-initialize the datanode.
+            this.bpNSInfo = null;
+          }
         }
         }
+      } else {
+        checkNSEquality(bpNSInfo.getBlockPoolID(), nsInfo.getBlockPoolID(),
+            "Blockpool ID");
+        checkNSEquality(bpNSInfo.getNamespaceID(), nsInfo.getNamespaceID(),
+            "Namespace ID");
+        checkNSEquality(bpNSInfo.getClusterID(), nsInfo.getClusterID(),
+            "Cluster ID");
       }
       }
-    } else {
-      checkNSEquality(bpNSInfo.getBlockPoolID(), nsInfo.getBlockPoolID(),
-          "Blockpool ID");
-      checkNSEquality(bpNSInfo.getNamespaceID(), nsInfo.getNamespaceID(),
-          "Namespace ID");
-      checkNSEquality(bpNSInfo.getClusterID(), nsInfo.getClusterID(),
-          "Cluster ID");
+    } finally {
+      writeUnlock();
     }
     }
   }
   }
 
 
@@ -300,22 +345,27 @@ class BPOfferService {
    * NN, it calls this function to verify that the NN it connected to
    * NN, it calls this function to verify that the NN it connected to
    * is consistent with other NNs serving the block-pool.
    * is consistent with other NNs serving the block-pool.
    */
    */
-  synchronized void registrationSucceeded(BPServiceActor bpServiceActor,
+  void registrationSucceeded(BPServiceActor bpServiceActor,
       DatanodeRegistration reg) throws IOException {
       DatanodeRegistration reg) throws IOException {
-    if (bpRegistration != null) {
-      checkNSEquality(bpRegistration.getStorageInfo().getNamespaceID(),
-          reg.getStorageInfo().getNamespaceID(), "namespace ID");
-      checkNSEquality(bpRegistration.getStorageInfo().getClusterID(),
-          reg.getStorageInfo().getClusterID(), "cluster ID");
-    } else {
-      bpRegistration = reg;
-    }
-    
-    dn.bpRegistrationSucceeded(bpRegistration, getBlockPoolId());
-    // Add the initial block token secret keys to the DN's secret manager.
-    if (dn.isBlockTokenEnabled) {
-      dn.blockPoolTokenSecretManager.addKeys(getBlockPoolId(),
-          reg.getExportedKeys());
+    writeLock();
+    try {
+      if (bpRegistration != null) {
+        checkNSEquality(bpRegistration.getStorageInfo().getNamespaceID(),
+            reg.getStorageInfo().getNamespaceID(), "namespace ID");
+        checkNSEquality(bpRegistration.getStorageInfo().getClusterID(),
+            reg.getStorageInfo().getClusterID(), "cluster ID");
+      } else {
+        bpRegistration = reg;
+      }
+
+      dn.bpRegistrationSucceeded(bpRegistration, getBlockPoolId());
+      // Add the initial block token secret keys to the DN's secret manager.
+      if (dn.isBlockTokenEnabled) {
+        dn.blockPoolTokenSecretManager.addKeys(getBlockPoolId(),
+            reg.getExportedKeys());
+      }
+    } finally {
+      writeUnlock();
     }
     }
   }
   }
 
 
@@ -333,25 +383,35 @@ class BPOfferService {
     }
     }
   }
   }
 
 
-  synchronized DatanodeRegistration createRegistration() {
-    Preconditions.checkState(bpNSInfo != null,
-        "getRegistration() can only be called after initial handshake");
-    return dn.createBPRegistration(bpNSInfo);
+  DatanodeRegistration createRegistration() {
+    writeLock();
+    try {
+      Preconditions.checkState(bpNSInfo != null,
+          "getRegistration() can only be called after initial handshake");
+      return dn.createBPRegistration(bpNSInfo);
+    } finally {
+      writeUnlock();
+    }
   }
   }
 
 
   /**
   /**
    * Called when an actor shuts down. If this is the last actor
    * Called when an actor shuts down. If this is the last actor
    * to shut down, shuts down the whole blockpool in the DN.
    * to shut down, shuts down the whole blockpool in the DN.
    */
    */
-  synchronized void shutdownActor(BPServiceActor actor) {
-    if (bpServiceToActive == actor) {
-      bpServiceToActive = null;
-    }
+  void shutdownActor(BPServiceActor actor) {
+    writeLock();
+    try {
+      if (bpServiceToActive == actor) {
+        bpServiceToActive = null;
+      }
 
 
-    bpServices.remove(actor);
+      bpServices.remove(actor);
 
 
-    if (bpServices.isEmpty()) {
-      dn.shutdownBlockPool(this);
+      if (bpServices.isEmpty()) {
+        dn.shutdownBlockPool(this);
+      }
+    } finally {
+      writeUnlock();
     }
     }
   }
   }
 
 
@@ -392,11 +452,16 @@ class BPOfferService {
    * @return a proxy to the active NN, or null if the BPOS has not
    * @return a proxy to the active NN, or null if the BPOS has not
    * acknowledged any NN as active yet.
    * acknowledged any NN as active yet.
    */
    */
-  synchronized DatanodeProtocolClientSideTranslatorPB getActiveNN() {
-    if (bpServiceToActive != null) {
-      return bpServiceToActive.bpNamenode;
-    } else {
-      return null;
+  DatanodeProtocolClientSideTranslatorPB getActiveNN() {
+    readLock();
+    try {
+      if (bpServiceToActive != null) {
+        return bpServiceToActive.bpNamenode;
+      } else {
+        return null;
+      }
+    } finally {
+      readUnlock();
     }
     }
   }
   }
 
 
@@ -424,45 +489,50 @@ class BPOfferService {
    * @param actor the actor which received the heartbeat
    * @param actor the actor which received the heartbeat
    * @param nnHaState the HA-related heartbeat contents
    * @param nnHaState the HA-related heartbeat contents
    */
    */
-  synchronized void updateActorStatesFromHeartbeat(
+  void updateActorStatesFromHeartbeat(
       BPServiceActor actor,
       BPServiceActor actor,
       NNHAStatusHeartbeat nnHaState) {
       NNHAStatusHeartbeat nnHaState) {
-    final long txid = nnHaState.getTxId();
-    
-    final boolean nnClaimsActive =
-      nnHaState.getState() == HAServiceState.ACTIVE;
-    final boolean bposThinksActive = bpServiceToActive == actor;
-    final boolean isMoreRecentClaim = txid > lastActiveClaimTxId; 
-    
-    if (nnClaimsActive && !bposThinksActive) {
-      LOG.info("Namenode " + actor + " trying to claim ACTIVE state with " +
-          "txid=" + txid);
-      if (!isMoreRecentClaim) {
-        // Split-brain scenario - an NN is trying to claim active
-        // state when a different NN has already claimed it with a higher
-        // txid.
-        LOG.warn("NN " + actor + " tried to claim ACTIVE state at txid=" +
-            txid + " but there was already a more recent claim at txid=" +
-            lastActiveClaimTxId);
-        return;
-      } else {
-        if (bpServiceToActive == null) {
-          LOG.info("Acknowledging ACTIVE Namenode " + actor);
+    writeLock();
+    try {
+      final long txid = nnHaState.getTxId();
+
+      final boolean nnClaimsActive =
+          nnHaState.getState() == HAServiceState.ACTIVE;
+      final boolean bposThinksActive = bpServiceToActive == actor;
+      final boolean isMoreRecentClaim = txid > lastActiveClaimTxId;
+
+      if (nnClaimsActive && !bposThinksActive) {
+        LOG.info("Namenode " + actor + " trying to claim ACTIVE state with " +
+            "txid=" + txid);
+        if (!isMoreRecentClaim) {
+          // Split-brain scenario - an NN is trying to claim active
+          // state when a different NN has already claimed it with a higher
+          // txid.
+          LOG.warn("NN " + actor + " tried to claim ACTIVE state at txid=" +
+              txid + " but there was already a more recent claim at txid=" +
+              lastActiveClaimTxId);
+          return;
         } else {
         } else {
-          LOG.info("Namenode " + actor + " taking over ACTIVE state from " +
-              bpServiceToActive + " at higher txid=" + txid);
+          if (bpServiceToActive == null) {
+            LOG.info("Acknowledging ACTIVE Namenode " + actor);
+          } else {
+            LOG.info("Namenode " + actor + " taking over ACTIVE state from " +
+                bpServiceToActive + " at higher txid=" + txid);
+          }
+          bpServiceToActive = actor;
         }
         }
-        bpServiceToActive = actor;
+      } else if (!nnClaimsActive && bposThinksActive) {
+        LOG.info("Namenode " + actor + " relinquishing ACTIVE state with " +
+            "txid=" + nnHaState.getTxId());
+        bpServiceToActive = null;
       }
       }
-    } else if (!nnClaimsActive && bposThinksActive) {
-      LOG.info("Namenode " + actor + " relinquishing ACTIVE state with " +
-          "txid=" + nnHaState.getTxId());
-      bpServiceToActive = null;
-    }
-    
-    if (bpServiceToActive == actor) {
-      assert txid >= lastActiveClaimTxId;
-      lastActiveClaimTxId = txid;
+
+      if (bpServiceToActive == actor) {
+        assert txid >= lastActiveClaimTxId;
+        lastActiveClaimTxId = txid;
+      }
+    } finally {
+      writeUnlock();
     }
     }
   }
   }
 
 
@@ -533,12 +603,15 @@ class BPOfferService {
       actor.reRegister();
       actor.reRegister();
       return true;
       return true;
     }
     }
-    synchronized (this) {
+    writeLock();
+    try {
       if (actor == bpServiceToActive) {
       if (actor == bpServiceToActive) {
         return processCommandFromActive(cmd, actor);
         return processCommandFromActive(cmd, actor);
       } else {
       } else {
         return processCommandFromStandby(cmd, actor);
         return processCommandFromStandby(cmd, actor);
       }
       }
+    } finally {
+      writeUnlock();
     }
     }
   }
   }
 
 

+ 11 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java

@@ -152,7 +152,7 @@ public class BlockPoolSliceStorage extends Storage {
     // During startup some of them can upgrade or roll back
     // During startup some of them can upgrade or roll back
     // while others could be up-to-date for the regular startup.
     // while others could be up-to-date for the regular startup.
     for (int idx = 0; idx < getNumStorageDirs(); idx++) {
     for (int idx = 0; idx < getNumStorageDirs(); idx++) {
-      doTransition(getStorageDir(idx), nsInfo, startOpt);
+      doTransition(datanode, getStorageDir(idx), nsInfo, startOpt);
       assert getCTime() == nsInfo.getCTime() 
       assert getCTime() == nsInfo.getCTime() 
           : "Data-node and name-node CTimes must be the same.";
           : "Data-node and name-node CTimes must be the same.";
     }
     }
@@ -242,7 +242,7 @@ public class BlockPoolSliceStorage extends Storage {
    * @param startOpt startup option
    * @param startOpt startup option
    * @throws IOException
    * @throws IOException
    */
    */
-  private void doTransition(StorageDirectory sd,
+  private void doTransition(DataNode datanode, StorageDirectory sd,
       NamespaceInfo nsInfo, StartupOption startOpt) throws IOException {
       NamespaceInfo nsInfo, StartupOption startOpt) throws IOException {
     if (startOpt == StartupOption.ROLLBACK) {
     if (startOpt == StartupOption.ROLLBACK) {
       doRollback(sd, nsInfo); // rollback if applicable
       doRollback(sd, nsInfo); // rollback if applicable
@@ -275,7 +275,7 @@ public class BlockPoolSliceStorage extends Storage {
     }
     }
     if (this.layoutVersion > HdfsConstants.DATANODE_LAYOUT_VERSION
     if (this.layoutVersion > HdfsConstants.DATANODE_LAYOUT_VERSION
         || this.cTime < nsInfo.getCTime()) {
         || this.cTime < nsInfo.getCTime()) {
-      doUpgrade(sd, nsInfo); // upgrade
+      doUpgrade(datanode, sd, nsInfo); // upgrade
       return;
       return;
     }
     }
     // layoutVersion == LAYOUT_VERSION && this.cTime > nsInfo.cTime
     // layoutVersion == LAYOUT_VERSION && this.cTime > nsInfo.cTime
@@ -304,7 +304,8 @@ public class BlockPoolSliceStorage extends Storage {
    * @param nsInfo Namespace Info from the namenode
    * @param nsInfo Namespace Info from the namenode
    * @throws IOException on error
    * @throws IOException on error
    */
    */
-  void doUpgrade(StorageDirectory bpSd, NamespaceInfo nsInfo) throws IOException {
+  void doUpgrade(DataNode datanode, StorageDirectory bpSd, NamespaceInfo nsInfo)
+      throws IOException {
     // Upgrading is applicable only to release with federation or after
     // Upgrading is applicable only to release with federation or after
     if (!DataNodeLayoutVersion.supports(
     if (!DataNodeLayoutVersion.supports(
         LayoutVersion.Feature.FEDERATION, layoutVersion)) {
         LayoutVersion.Feature.FEDERATION, layoutVersion)) {
@@ -312,7 +313,7 @@ public class BlockPoolSliceStorage extends Storage {
     }
     }
     LOG.info("Upgrading block pool storage directory " + bpSd.getRoot()
     LOG.info("Upgrading block pool storage directory " + bpSd.getRoot()
         + ".\n   old LV = " + this.getLayoutVersion() + "; old CTime = "
         + ".\n   old LV = " + this.getLayoutVersion() + "; old CTime = "
-        + this.getCTime() + ".\n   new LV = " + nsInfo.getLayoutVersion()
+        + this.getCTime() + ".\n   new LV = " + HdfsConstants.DATANODE_LAYOUT_VERSION
         + "; new CTime = " + nsInfo.getCTime());
         + "; new CTime = " + nsInfo.getCTime());
     // get <SD>/previous directory
     // get <SD>/previous directory
     String dnRoot = getDataNodeStorageRoot(bpSd.getRoot().getCanonicalPath());
     String dnRoot = getDataNodeStorageRoot(bpSd.getRoot().getCanonicalPath());
@@ -340,7 +341,7 @@ public class BlockPoolSliceStorage extends Storage {
     rename(bpCurDir, bpTmpDir);
     rename(bpCurDir, bpTmpDir);
     
     
     // 3. Create new <SD>/current with block files hardlinks and VERSION
     // 3. Create new <SD>/current with block files hardlinks and VERSION
-    linkAllBlocks(bpTmpDir, bpCurDir);
+    linkAllBlocks(datanode, bpTmpDir, bpCurDir);
     this.layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION;
     this.layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION;
     assert this.namespaceID == nsInfo.getNamespaceID() 
     assert this.namespaceID == nsInfo.getNamespaceID() 
         : "Data-node and name-node layout versions must be the same.";
         : "Data-node and name-node layout versions must be the same.";
@@ -517,14 +518,15 @@ public class BlockPoolSliceStorage extends Storage {
    * @param toDir the current data directory
    * @param toDir the current data directory
    * @throws IOException if error occurs during hardlink
    * @throws IOException if error occurs during hardlink
    */
    */
-  private void linkAllBlocks(File fromDir, File toDir) throws IOException {
+  private void linkAllBlocks(DataNode datanode, File fromDir, File toDir)
+      throws IOException {
     // do the link
     // do the link
     int diskLayoutVersion = this.getLayoutVersion();
     int diskLayoutVersion = this.getLayoutVersion();
     // hardlink finalized blocks in tmpDir
     // hardlink finalized blocks in tmpDir
     HardLink hardLink = new HardLink();
     HardLink hardLink = new HardLink();
-    DataStorage.linkBlocks(new File(fromDir, DataStorage.STORAGE_DIR_FINALIZED), 
+    DataStorage.linkBlocks(datanode, new File(fromDir, DataStorage.STORAGE_DIR_FINALIZED),
       new File(toDir,DataStorage.STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
       new File(toDir,DataStorage.STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
-    DataStorage.linkBlocks(new File(fromDir, DataStorage.STORAGE_DIR_RBW), 
+    DataStorage.linkBlocks(datanode, new File(fromDir, DataStorage.STORAGE_DIR_RBW),
         new File(toDir, DataStorage.STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
         new File(toDir, DataStorage.STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
     LOG.info( hardLink.linkStats.report() );
     LOG.info( hardLink.linkStats.report() );
   }
   }

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java

@@ -253,7 +253,7 @@ class BlockReceiver implements Closeable {
       
       
       if (cause != null) { // possible disk error
       if (cause != null) { // possible disk error
         ioe = cause;
         ioe = cause;
-        datanode.checkDiskError();
+        datanode.checkDiskErrorAsync();
       }
       }
       
       
       throw ioe;
       throw ioe;
@@ -329,7 +329,7 @@ class BlockReceiver implements Closeable {
     }
     }
     // disk check
     // disk check
     if(ioe != null) {
     if(ioe != null) {
-      datanode.checkDiskError();
+      datanode.checkDiskErrorAsync();
       throw ioe;
       throw ioe;
     }
     }
   }
   }
@@ -639,7 +639,7 @@ class BlockReceiver implements Closeable {
           manageWriterOsCache(offsetInBlock);
           manageWriterOsCache(offsetInBlock);
         }
         }
       } catch (IOException iex) {
       } catch (IOException iex) {
-        datanode.checkDiskError();
+        datanode.checkDiskErrorAsync();
         throw iex;
         throw iex;
       }
       }
     }
     }
@@ -1208,7 +1208,7 @@ class BlockReceiver implements Closeable {
         } catch (IOException e) {
         } catch (IOException e) {
           LOG.warn("IOException in BlockReceiver.run(): ", e);
           LOG.warn("IOException in BlockReceiver.run(): ", e);
           if (running) {
           if (running) {
-            datanode.checkDiskError();
+            datanode.checkDiskErrorAsync();
             LOG.info(myString, e);
             LOG.info(myString, e);
             running = false;
             running = false;
             if (!Thread.interrupted()) { // failure not caused by interruption
             if (!Thread.interrupted()) { // failure not caused by interruption

+ 21 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -1075,6 +1075,11 @@ public class DataNode extends Configured
     // In the case that this is the first block pool to connect, initialize
     // In the case that this is the first block pool to connect, initialize
     // the dataset, block scanners, etc.
     // the dataset, block scanners, etc.
     initStorage(nsInfo);
     initStorage(nsInfo);
+
+    // Exclude failed disks before initializing the block pools to avoid startup
+    // failures.
+    checkDiskError();
+
     initPeriodicScanners(conf);
     initPeriodicScanners(conf);
     
     
     data.addBlockPool(nsInfo.getBlockPoolID(), conf);
     data.addBlockPool(nsInfo.getBlockPoolID(), conf);
@@ -1510,9 +1515,9 @@ public class DataNode extends Configured
   
   
   
   
   /**
   /**
-   *  Check if there is a disk failure and if so, handle the error
+   * Check if there is a disk failure asynchronously and if so, handle the error
    */
    */
-  public void checkDiskError() {
+  public void checkDiskErrorAsync() {
     synchronized(checkDiskErrorMutex) {
     synchronized(checkDiskErrorMutex) {
       checkDiskErrorFlag = true;
       checkDiskErrorFlag = true;
       if(checkDiskErrorThread == null) {
       if(checkDiskErrorThread == null) {
@@ -1821,7 +1826,7 @@ public class DataNode extends Configured
         LOG.warn(bpReg + ":Failed to transfer " + b + " to " +
         LOG.warn(bpReg + ":Failed to transfer " + b + " to " +
             targets[0] + " got ", ie);
             targets[0] + " got ", ie);
         // check if there are any disk problem
         // check if there are any disk problem
-        checkDiskError();
+        checkDiskErrorAsync();
       } finally {
       } finally {
         xmitsInProgress.getAndDecrement();
         xmitsInProgress.getAndDecrement();
         IOUtils.closeStream(blockSender);
         IOUtils.closeStream(blockSender);
@@ -2759,7 +2764,18 @@ public class DataNode extends Configured
   public ShortCircuitRegistry getShortCircuitRegistry() {
   public ShortCircuitRegistry getShortCircuitRegistry() {
     return shortCircuitRegistry;
     return shortCircuitRegistry;
   }
   }
-  
+
+  /**
+   * Check the disk error
+   */
+  private void checkDiskError() {
+    try {
+      data.checkDataDir();
+    } catch (DiskErrorException de) {
+      handleDiskError(de.getMessage());
+    }
+  }
+
   /**
   /**
    * Starts a new thread which will check for disk error check request 
    * Starts a new thread which will check for disk error check request 
    * every 5 sec
    * every 5 sec
@@ -2776,9 +2792,7 @@ public class DataNode extends Configured
               }
               }
               if(tempFlag) {
               if(tempFlag) {
                 try {
                 try {
-                  data.checkDataDir();
-                } catch (DiskErrorException de) {
-                  handleDiskError(de.getMessage());
+                  checkDiskError();
                 } catch (Exception e) {
                 } catch (Exception e) {
                   LOG.warn("Unexpected exception occurred while checking disk error  " + e);
                   LOG.warn("Unexpected exception occurred while checking disk error  " + e);
                   checkDiskErrorThread = null;
                   checkDiskErrorThread = null;

+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNodeLayoutVersion.java

@@ -62,7 +62,10 @@ public class DataNodeLayoutVersion {
    * </ul>
    * </ul>
    */
    */
   public static enum Feature implements LayoutFeature {
   public static enum Feature implements LayoutFeature {
-    FIRST_LAYOUT(-55, -53, "First datanode layout", false);
+    FIRST_LAYOUT(-55, -53, "First datanode layout", false),
+    BLOCKID_BASED_LAYOUT(-56,
+        "The block ID of a finalized block uniquely determines its position " +
+            "in the directory structure");
    
    
     private final FeatureInfo info;
     private final FeatureInfo info;
 
 

+ 122 - 22
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java

@@ -18,13 +18,19 @@
 
 
 package org.apache.hadoop.hdfs.server.datanode;
 package org.apache.hadoop.hdfs.server.datanode;
 
 
+import com.google.common.collect.Lists;
+import com.google.common.util.concurrent.Futures;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.HardLink;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
@@ -35,13 +41,30 @@ import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.DiskChecker;
 
 
-import java.io.*;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.RandomAccessFile;
 import java.nio.channels.FileLock;
 import java.nio.channels.FileLock;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.concurrent.Callable;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 
 
 /** 
 /** 
  * Data storage information file.
  * Data storage information file.
@@ -261,6 +284,7 @@ public class DataStorage extends Storage {
           STORAGE_DIR_CURRENT));
           STORAGE_DIR_CURRENT));
       bpDataDirs.add(bpRoot);
       bpDataDirs.add(bpRoot);
     }
     }
+
     // mkdir for the list of BlockPoolStorage
     // mkdir for the list of BlockPoolStorage
     makeBlockPoolDataDir(bpDataDirs, null);
     makeBlockPoolDataDir(bpDataDirs, null);
     BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(
     BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(
@@ -488,7 +512,7 @@ public class DataStorage extends Storage {
     
     
     // do upgrade
     // do upgrade
     if (this.layoutVersion > HdfsConstants.DATANODE_LAYOUT_VERSION) {
     if (this.layoutVersion > HdfsConstants.DATANODE_LAYOUT_VERSION) {
-      doUpgrade(sd, nsInfo);  // upgrade
+      doUpgrade(datanode, sd, nsInfo);  // upgrade
       return;
       return;
     }
     }
     
     
@@ -523,7 +547,8 @@ public class DataStorage extends Storage {
    * @param sd  storage directory
    * @param sd  storage directory
    * @throws IOException on error
    * @throws IOException on error
    */
    */
-  void doUpgrade(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException {
+  void doUpgrade(DataNode datanode, StorageDirectory sd, NamespaceInfo nsInfo)
+      throws IOException {
     // If the existing on-disk layout version supportes federation, simply
     // If the existing on-disk layout version supportes federation, simply
     // update its layout version.
     // update its layout version.
     if (DataNodeLayoutVersion.supports(
     if (DataNodeLayoutVersion.supports(
@@ -568,7 +593,8 @@ public class DataStorage extends Storage {
     BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(nsInfo.getNamespaceID(), 
     BlockPoolSliceStorage bpStorage = new BlockPoolSliceStorage(nsInfo.getNamespaceID(), 
         nsInfo.getBlockPoolID(), nsInfo.getCTime(), nsInfo.getClusterID());
         nsInfo.getBlockPoolID(), nsInfo.getCTime(), nsInfo.getClusterID());
     bpStorage.format(curDir, nsInfo);
     bpStorage.format(curDir, nsInfo);
-    linkAllBlocks(tmpDir, bbwDir, new File(curBpDir, STORAGE_DIR_CURRENT));
+    linkAllBlocks(datanode, tmpDir, bbwDir, new File(curBpDir,
+        STORAGE_DIR_CURRENT));
     
     
     // 4. Write version file under <SD>/current
     // 4. Write version file under <SD>/current
     layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION;
     layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION;
@@ -746,22 +772,22 @@ public class DataStorage extends Storage {
    *
    *
    * @throws IOException If error occurs during hardlink
    * @throws IOException If error occurs during hardlink
    */
    */
-  private void linkAllBlocks(File fromDir, File fromBbwDir, File toDir)
-      throws IOException {
+  private void linkAllBlocks(DataNode datanode, File fromDir, File fromBbwDir,
+      File toDir) throws IOException {
     HardLink hardLink = new HardLink();
     HardLink hardLink = new HardLink();
     // do the link
     // do the link
     int diskLayoutVersion = this.getLayoutVersion();
     int diskLayoutVersion = this.getLayoutVersion();
     if (DataNodeLayoutVersion.supports(
     if (DataNodeLayoutVersion.supports(
         LayoutVersion.Feature.APPEND_RBW_DIR, diskLayoutVersion)) {
         LayoutVersion.Feature.APPEND_RBW_DIR, diskLayoutVersion)) {
       // hardlink finalized blocks in tmpDir/finalized
       // hardlink finalized blocks in tmpDir/finalized
-      linkBlocks(new File(fromDir, STORAGE_DIR_FINALIZED), 
+      linkBlocks(datanode, new File(fromDir, STORAGE_DIR_FINALIZED),
           new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
           new File(toDir, STORAGE_DIR_FINALIZED), diskLayoutVersion, hardLink);
       // hardlink rbw blocks in tmpDir/rbw
       // hardlink rbw blocks in tmpDir/rbw
-      linkBlocks(new File(fromDir, STORAGE_DIR_RBW), 
+      linkBlocks(datanode, new File(fromDir, STORAGE_DIR_RBW),
           new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
           new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
     } else { // pre-RBW version
     } else { // pre-RBW version
       // hardlink finalized blocks in tmpDir
       // hardlink finalized blocks in tmpDir
-      linkBlocks(fromDir, new File(toDir, STORAGE_DIR_FINALIZED), 
+      linkBlocks(datanode, fromDir, new File(toDir, STORAGE_DIR_FINALIZED),
           diskLayoutVersion, hardLink);      
           diskLayoutVersion, hardLink);      
       if (fromBbwDir.exists()) {
       if (fromBbwDir.exists()) {
         /*
         /*
@@ -770,15 +796,67 @@ public class DataStorage extends Storage {
          * NOT underneath the 'current' directory in those releases.  See
          * NOT underneath the 'current' directory in those releases.  See
          * HDFS-3731 for details.
          * HDFS-3731 for details.
          */
          */
-        linkBlocks(fromBbwDir,
+        linkBlocks(datanode, fromBbwDir,
             new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
             new File(toDir, STORAGE_DIR_RBW), diskLayoutVersion, hardLink);
       }
       }
     } 
     } 
     LOG.info( hardLink.linkStats.report() );
     LOG.info( hardLink.linkStats.report() );
   }
   }
+
+  private static class LinkArgs {
+    public File src;
+    public File dst;
+
+    public LinkArgs(File src, File dst) {
+      this.src = src;
+      this.dst = dst;
+    }
+  }
+
+  static void linkBlocks(DataNode datanode, File from, File to, int oldLV,
+      HardLink hl) throws IOException {
+    boolean upgradeToIdBasedLayout = false;
+    // If we are upgrading from a version older than the one where we introduced
+    // block ID-based layout AND we're working with the finalized directory,
+    // we'll need to upgrade from the old flat layout to the block ID-based one
+    if (oldLV > DataNodeLayoutVersion.Feature.BLOCKID_BASED_LAYOUT.getInfo().
+        getLayoutVersion() && to.getName().equals(STORAGE_DIR_FINALIZED)) {
+      upgradeToIdBasedLayout = true;
+    }
+
+    final List<LinkArgs> idBasedLayoutSingleLinks = Lists.newArrayList();
+    linkBlocksHelper(from, to, oldLV, hl, upgradeToIdBasedLayout, to,
+        idBasedLayoutSingleLinks);
+    int numLinkWorkers = datanode.getConf().getInt(
+        DFSConfigKeys.DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS_KEY,
+        DFSConfigKeys.DFS_DATANODE_BLOCK_ID_LAYOUT_UPGRADE_THREADS);
+    ExecutorService linkWorkers = Executors.newFixedThreadPool(numLinkWorkers);
+    final int step = idBasedLayoutSingleLinks.size() / numLinkWorkers + 1;
+    List<Future<Void>> futures = Lists.newArrayList();
+    for (int i = 0; i < idBasedLayoutSingleLinks.size(); i += step) {
+      final int iCopy = i;
+      futures.add(linkWorkers.submit(new Callable<Void>() {
+        @Override
+        public Void call() throws IOException {
+          int upperBound = Math.min(iCopy + step,
+              idBasedLayoutSingleLinks.size());
+          for (int j = iCopy; j < upperBound; j++) {
+            LinkArgs cur = idBasedLayoutSingleLinks.get(j);
+            NativeIO.link(cur.src, cur.dst);
+          }
+          return null;
+        }
+      }));
+    }
+    linkWorkers.shutdown();
+    for (Future<Void> f : futures) {
+      Futures.get(f, IOException.class);
+    }
+  }
   
   
-  static void linkBlocks(File from, File to, int oldLV, HardLink hl) 
-  throws IOException {
+  static void linkBlocksHelper(File from, File to, int oldLV, HardLink hl,
+  boolean upgradeToIdBasedLayout, File blockRoot,
+      List<LinkArgs> idBasedLayoutSingleLinks) throws IOException {
     if (!from.exists()) {
     if (!from.exists()) {
       return;
       return;
     }
     }
@@ -805,9 +883,6 @@ public class DataStorage extends Storage {
     // from is a directory
     // from is a directory
     hl.linkStats.countDirs++;
     hl.linkStats.countDirs++;
     
     
-    if (!to.mkdirs())
-      throw new IOException("Cannot create directory " + to);
-    
     String[] blockNames = from.list(new java.io.FilenameFilter() {
     String[] blockNames = from.list(new java.io.FilenameFilter() {
       @Override
       @Override
       public boolean accept(File dir, String name) {
       public boolean accept(File dir, String name) {
@@ -815,12 +890,36 @@ public class DataStorage extends Storage {
       }
       }
     });
     });
 
 
+    // If we are upgrading to block ID-based layout, we don't want to recreate
+    // any subdirs from the source that contain blocks, since we have a new
+    // directory structure
+    if (!upgradeToIdBasedLayout || !to.getName().startsWith(
+        BLOCK_SUBDIR_PREFIX)) {
+      if (!to.mkdirs())
+        throw new IOException("Cannot create directory " + to);
+    }
+
     // Block files just need hard links with the same file names
     // Block files just need hard links with the same file names
     // but a different directory
     // but a different directory
     if (blockNames.length > 0) {
     if (blockNames.length > 0) {
-      HardLink.createHardLinkMult(from, blockNames, to);
-      hl.linkStats.countMultLinks++;
-      hl.linkStats.countFilesMultLinks += blockNames.length;
+      if (upgradeToIdBasedLayout) {
+        for (String blockName : blockNames) {
+          long blockId = Block.getBlockId(blockName);
+          File blockLocation = DatanodeUtil.idToBlockDir(blockRoot, blockId);
+          if (!blockLocation.exists()) {
+            if (!blockLocation.mkdirs()) {
+              throw new IOException("Failed to mkdirs " + blockLocation);
+            }
+          }
+          idBasedLayoutSingleLinks.add(new LinkArgs(new File(from, blockName),
+              new File(blockLocation, blockName)));
+          hl.linkStats.countSingleLinks++;
+        }
+      } else {
+        HardLink.createHardLinkMult(from, blockNames, to);
+        hl.linkStats.countMultLinks++;
+        hl.linkStats.countFilesMultLinks += blockNames.length;
+      }
     } else {
     } else {
       hl.linkStats.countEmptyDirs++;
       hl.linkStats.countEmptyDirs++;
     }
     }
@@ -834,8 +933,9 @@ public class DataStorage extends Storage {
         }
         }
       });
       });
     for(int i = 0; i < otherNames.length; i++)
     for(int i = 0; i < otherNames.length; i++)
-      linkBlocks(new File(from, otherNames[i]), 
-          new File(to, otherNames[i]), oldLV, hl);
+      linkBlocksHelper(new File(from, otherNames[i]),
+          new File(to, otherNames[i]), oldLV, hl, upgradeToIdBasedLayout,
+          blockRoot, idBasedLayoutSingleLinks);
   }
   }
 
 
   /**
   /**

+ 36 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java

@@ -30,6 +30,8 @@ public class DatanodeUtil {
 
 
   public static final String DISK_ERROR = "Possible disk error: ";
   public static final String DISK_ERROR = "Possible disk error: ";
 
 
+  private static final String SEP = System.getProperty("file.separator");
+
   /** Get the cause of an I/O exception if caused by a possible disk error
   /** Get the cause of an I/O exception if caused by a possible disk error
    * @param ioe an I/O exception
    * @param ioe an I/O exception
    * @return cause if the I/O exception is caused by a possible disk error;
    * @return cause if the I/O exception is caused by a possible disk error;
@@ -78,4 +80,38 @@ public class DatanodeUtil {
   public static File getUnlinkTmpFile(File f) {
   public static File getUnlinkTmpFile(File f) {
     return new File(f.getParentFile(), f.getName()+UNLINK_BLOCK_SUFFIX);
     return new File(f.getParentFile(), f.getName()+UNLINK_BLOCK_SUFFIX);
   }
   }
+
+  /**
+   * Checks whether there are any files anywhere in the directory tree rooted
+   * at dir (directories don't count as files). dir must exist
+   * @return true if there are no files
+   * @throws IOException if unable to list subdirectories
+   */
+  public static boolean dirNoFilesRecursive(File dir) throws IOException {
+    File[] contents = dir.listFiles();
+    if (contents == null) {
+      throw new IOException("Cannot list contents of " + dir);
+    }
+    for (File f : contents) {
+      if (!f.isDirectory() || (f.isDirectory() && !dirNoFilesRecursive(f))) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  /**
+   * Get the directory where a finalized block with this ID should be stored.
+   * Do not attempt to create the directory.
+   * @param root the root directory where finalized blocks are stored
+   * @param blockId
+   * @return
+   */
+  public static File idToBlockDir(File root, long blockId) {
+    int d1 = (int)((blockId >> 16) & 0xff);
+    int d2 = (int)((blockId >> 8) & 0xff);
+    String path = DataStorage.BLOCK_SUBDIR_PREFIX + d1 + SEP +
+        DataStorage.BLOCK_SUBDIR_PREFIX + d2;
+    return new File(root, path);
+  }
 }
 }

+ 22 - 40
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java

@@ -54,10 +54,10 @@ abstract public class ReplicaInfo extends Block implements Replica {
   private File baseDir;
   private File baseDir;
   
   
   /**
   /**
-   * Ints representing the sub directory path from base dir to the directory
-   * containing this replica.
+   * Whether or not this replica's parent directory includes subdirs, in which
+   * case we can generate them based on the replica's block ID
    */
    */
-  private int[] subDirs;
+  private boolean hasSubdirs;
   
   
   private static final Map<String, File> internedBaseDirs = new HashMap<String, File>();
   private static final Map<String, File> internedBaseDirs = new HashMap<String, File>();
 
 
@@ -151,18 +151,8 @@ abstract public class ReplicaInfo extends Block implements Replica {
    * @return the parent directory path where this replica is located
    * @return the parent directory path where this replica is located
    */
    */
   File getDir() {
   File getDir() {
-    if (subDirs == null) {
-      return null;
-    }
-
-    StringBuilder sb = new StringBuilder();
-    for (int i : subDirs) {
-      sb.append(DataStorage.BLOCK_SUBDIR_PREFIX);
-      sb.append(i);
-      sb.append("/");
-    }
-    File ret = new File(baseDir, sb.toString());
-    return ret;
+    return hasSubdirs ? DatanodeUtil.idToBlockDir(baseDir,
+        getBlockId()) : baseDir;
   }
   }
 
 
   /**
   /**
@@ -175,54 +165,46 @@ abstract public class ReplicaInfo extends Block implements Replica {
 
 
   private void setDirInternal(File dir) {
   private void setDirInternal(File dir) {
     if (dir == null) {
     if (dir == null) {
-      subDirs = null;
       baseDir = null;
       baseDir = null;
       return;
       return;
     }
     }
 
 
-    ReplicaDirInfo replicaDirInfo = parseSubDirs(dir);
-    this.subDirs = replicaDirInfo.subDirs;
+    ReplicaDirInfo dirInfo = parseBaseDir(dir);
+    this.hasSubdirs = dirInfo.hasSubidrs;
     
     
     synchronized (internedBaseDirs) {
     synchronized (internedBaseDirs) {
-      if (!internedBaseDirs.containsKey(replicaDirInfo.baseDirPath)) {
+      if (!internedBaseDirs.containsKey(dirInfo.baseDirPath)) {
         // Create a new String path of this file and make a brand new File object
         // Create a new String path of this file and make a brand new File object
         // to guarantee we drop the reference to the underlying char[] storage.
         // to guarantee we drop the reference to the underlying char[] storage.
-        File baseDir = new File(replicaDirInfo.baseDirPath);
-        internedBaseDirs.put(replicaDirInfo.baseDirPath, baseDir);
+        File baseDir = new File(dirInfo.baseDirPath);
+        internedBaseDirs.put(dirInfo.baseDirPath, baseDir);
       }
       }
-      this.baseDir = internedBaseDirs.get(replicaDirInfo.baseDirPath);
+      this.baseDir = internedBaseDirs.get(dirInfo.baseDirPath);
     }
     }
   }
   }
-  
+
   @VisibleForTesting
   @VisibleForTesting
   public static class ReplicaDirInfo {
   public static class ReplicaDirInfo {
-    @VisibleForTesting
     public String baseDirPath;
     public String baseDirPath;
-    
-    @VisibleForTesting
-    public int[] subDirs;
+    public boolean hasSubidrs;
+
+    public ReplicaDirInfo (String baseDirPath, boolean hasSubidrs) {
+      this.baseDirPath = baseDirPath;
+      this.hasSubidrs = hasSubidrs;
+    }
   }
   }
   
   
   @VisibleForTesting
   @VisibleForTesting
-  public static ReplicaDirInfo parseSubDirs(File dir) {
-    ReplicaDirInfo ret = new ReplicaDirInfo();
+  public static ReplicaDirInfo parseBaseDir(File dir) {
     
     
     File currentDir = dir;
     File currentDir = dir;
-    List<Integer> subDirList = new ArrayList<Integer>();
+    boolean hasSubdirs = false;
     while (currentDir.getName().startsWith(DataStorage.BLOCK_SUBDIR_PREFIX)) {
     while (currentDir.getName().startsWith(DataStorage.BLOCK_SUBDIR_PREFIX)) {
-      // Prepend the integer into the list.
-      subDirList.add(0, Integer.parseInt(currentDir.getName().replaceFirst(
-          DataStorage.BLOCK_SUBDIR_PREFIX, "")));
+      hasSubdirs = true;
       currentDir = currentDir.getParentFile();
       currentDir = currentDir.getParentFile();
     }
     }
-    ret.subDirs = new int[subDirList.size()];
-    for (int i = 0; i < subDirList.size(); i++) {
-      ret.subDirs[i] = subDirList.get(i);
-    }
-    
-    ret.baseDirPath = currentDir.getAbsolutePath();
     
     
-    return ret;
+    return new ReplicaDirInfo(currentDir.getAbsolutePath(), hasSubdirs);
   }
   }
 
 
   /**
   /**

+ 68 - 28
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/BlockPoolSlice.java

@@ -59,7 +59,8 @@ class BlockPoolSlice {
   private final String bpid;
   private final String bpid;
   private final FsVolumeImpl volume; // volume to which this BlockPool belongs to
   private final FsVolumeImpl volume; // volume to which this BlockPool belongs to
   private final File currentDir; // StorageDirectory/current/bpid/current
   private final File currentDir; // StorageDirectory/current/bpid/current
-  private final LDir finalizedDir; // directory store Finalized replica
+  // directory where finalized replicas are stored
+  private final File finalizedDir;
   private final File rbwDir; // directory store RBW replica
   private final File rbwDir; // directory store RBW replica
   private final File tmpDir; // directory store Temporary replica
   private final File tmpDir; // directory store Temporary replica
   private static final String DU_CACHE_FILE = "dfsUsed";
   private static final String DU_CACHE_FILE = "dfsUsed";
@@ -82,8 +83,13 @@ class BlockPoolSlice {
     this.bpid = bpid;
     this.bpid = bpid;
     this.volume = volume;
     this.volume = volume;
     this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
     this.currentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT); 
-    final File finalizedDir = new File(
+    this.finalizedDir = new File(
         currentDir, DataStorage.STORAGE_DIR_FINALIZED);
         currentDir, DataStorage.STORAGE_DIR_FINALIZED);
+    if (!this.finalizedDir.exists()) {
+      if (!this.finalizedDir.mkdirs()) {
+        throw new IOException("Failed to mkdirs " + this.finalizedDir);
+      }
+    }
 
 
     // Files that were being written when the datanode was last shutdown
     // Files that were being written when the datanode was last shutdown
     // are now moved back to the data directory. It is possible that
     // are now moved back to the data directory. It is possible that
@@ -95,10 +101,6 @@ class BlockPoolSlice {
       FileUtil.fullyDelete(tmpDir);
       FileUtil.fullyDelete(tmpDir);
     }
     }
     this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
     this.rbwDir = new File(currentDir, DataStorage.STORAGE_DIR_RBW);
-    final int maxBlocksPerDir = conf.getInt(
-        DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_KEY,
-        DFSConfigKeys.DFS_DATANODE_NUMBLOCKS_DEFAULT);
-    this.finalizedDir = new LDir(finalizedDir, maxBlocksPerDir);
     if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
     if (!rbwDir.mkdirs()) {  // create rbw directory if not exist
       if (!rbwDir.isDirectory()) {
       if (!rbwDir.isDirectory()) {
         throw new IOException("Mkdirs failed to create " + rbwDir.toString());
         throw new IOException("Mkdirs failed to create " + rbwDir.toString());
@@ -131,7 +133,7 @@ class BlockPoolSlice {
   }
   }
 
 
   File getFinalizedDir() {
   File getFinalizedDir() {
-    return finalizedDir.dir;
+    return finalizedDir;
   }
   }
   
   
   File getRbwDir() {
   File getRbwDir() {
@@ -239,25 +241,56 @@ class BlockPoolSlice {
   }
   }
 
 
   File addBlock(Block b, File f) throws IOException {
   File addBlock(Block b, File f) throws IOException {
-    File blockFile = finalizedDir.addBlock(b, f);
+    File blockDir = DatanodeUtil.idToBlockDir(finalizedDir, b.getBlockId());
+    if (!blockDir.exists()) {
+      if (!blockDir.mkdirs()) {
+        throw new IOException("Failed to mkdirs " + blockDir);
+      }
+    }
+    File blockFile = FsDatasetImpl.moveBlockFiles(b, f, blockDir);
     File metaFile = FsDatasetUtil.getMetaFile(blockFile, b.getGenerationStamp());
     File metaFile = FsDatasetUtil.getMetaFile(blockFile, b.getGenerationStamp());
     dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
     dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
     return blockFile;
     return blockFile;
   }
   }
     
     
   void checkDirs() throws DiskErrorException {
   void checkDirs() throws DiskErrorException {
-    finalizedDir.checkDirTree();
+    DiskChecker.checkDirs(finalizedDir);
     DiskChecker.checkDir(tmpDir);
     DiskChecker.checkDir(tmpDir);
     DiskChecker.checkDir(rbwDir);
     DiskChecker.checkDir(rbwDir);
   }
   }
     
     
   void getVolumeMap(ReplicaMap volumeMap) throws IOException {
   void getVolumeMap(ReplicaMap volumeMap) throws IOException {
     // add finalized replicas
     // add finalized replicas
-    finalizedDir.getVolumeMap(bpid, volumeMap, volume);
+    addToReplicasMap(volumeMap, finalizedDir, true);
     // add rbw replicas
     // add rbw replicas
     addToReplicasMap(volumeMap, rbwDir, false);
     addToReplicasMap(volumeMap, rbwDir, false);
   }
   }
 
 
+  /**
+   * Recover an unlinked tmp file on datanode restart. If the original block
+   * does not exist, then the tmp file is renamed to be the
+   * original file name and the original name is returned; otherwise the tmp
+   * file is deleted and null is returned.
+   */
+  File recoverTempUnlinkedBlock(File unlinkedTmp) throws IOException {
+    File blockFile = FsDatasetUtil.getOrigFile(unlinkedTmp);
+    if (blockFile.exists()) {
+      // If the original block file still exists, then no recovery is needed.
+      if (!unlinkedTmp.delete()) {
+        throw new IOException("Unable to cleanup unlinked tmp file " +
+            unlinkedTmp);
+      }
+      return null;
+    } else {
+      if (!unlinkedTmp.renameTo(blockFile)) {
+        throw new IOException("Unable to rename unlinked tmp file " +
+            unlinkedTmp);
+      }
+      return blockFile;
+    }
+  }
+
+
   /**
   /**
    * Add replicas under the given directory to the volume map
    * Add replicas under the given directory to the volume map
    * @param volumeMap the replicas map
    * @param volumeMap the replicas map
@@ -267,23 +300,34 @@ class BlockPoolSlice {
    */
    */
   void addToReplicasMap(ReplicaMap volumeMap, File dir, boolean isFinalized
   void addToReplicasMap(ReplicaMap volumeMap, File dir, boolean isFinalized
       ) throws IOException {
       ) throws IOException {
-    File blockFiles[] = FileUtil.listFiles(dir);
-    for (File blockFile : blockFiles) {
-      if (!Block.isBlockFilename(blockFile))
+    File files[] = FileUtil.listFiles(dir);
+    for (File file : files) {
+      if (file.isDirectory()) {
+        addToReplicasMap(volumeMap, file, isFinalized);
+      }
+
+      if (isFinalized && FsDatasetUtil.isUnlinkTmpFile(file)) {
+        file = recoverTempUnlinkedBlock(file);
+        if (file == null) { // the original block still exists, so we cover it
+          // in another iteration and can continue here
+          continue;
+        }
+      }
+      if (!Block.isBlockFilename(file))
         continue;
         continue;
       
       
       long genStamp = FsDatasetUtil.getGenerationStampFromFile(
       long genStamp = FsDatasetUtil.getGenerationStampFromFile(
-          blockFiles, blockFile);
-      long blockId = Block.filename2id(blockFile.getName());
+          files, file);
+      long blockId = Block.filename2id(file.getName());
       ReplicaInfo newReplica = null;
       ReplicaInfo newReplica = null;
       if (isFinalized) {
       if (isFinalized) {
         newReplica = new FinalizedReplica(blockId, 
         newReplica = new FinalizedReplica(blockId, 
-            blockFile.length(), genStamp, volume, blockFile.getParentFile());
+            file.length(), genStamp, volume, file.getParentFile());
       } else {
       } else {
 
 
         boolean loadRwr = true;
         boolean loadRwr = true;
-        File restartMeta = new File(blockFile.getParent()  +
-            File.pathSeparator + "." + blockFile.getName() + ".restart");
+        File restartMeta = new File(file.getParent()  +
+            File.pathSeparator + "." + file.getName() + ".restart");
         Scanner sc = null;
         Scanner sc = null;
         try {
         try {
           sc = new Scanner(restartMeta);
           sc = new Scanner(restartMeta);
@@ -291,8 +335,8 @@ class BlockPoolSlice {
           if (sc.hasNextLong() && (sc.nextLong() > Time.now())) {
           if (sc.hasNextLong() && (sc.nextLong() > Time.now())) {
             // It didn't expire. Load the replica as a RBW.
             // It didn't expire. Load the replica as a RBW.
             newReplica = new ReplicaBeingWritten(blockId,
             newReplica = new ReplicaBeingWritten(blockId,
-                validateIntegrityAndSetLength(blockFile, genStamp), 
-                genStamp, volume, blockFile.getParentFile(), null);
+                validateIntegrityAndSetLength(file, genStamp),
+                genStamp, volume, file.getParentFile(), null);
             loadRwr = false;
             loadRwr = false;
           }
           }
           sc.close();
           sc.close();
@@ -301,7 +345,7 @@ class BlockPoolSlice {
               restartMeta.getPath());
               restartMeta.getPath());
           }
           }
         } catch (FileNotFoundException fnfe) {
         } catch (FileNotFoundException fnfe) {
-          // nothing to do here
+          // nothing to do hereFile dir =
         } finally {
         } finally {
           if (sc != null) {
           if (sc != null) {
             sc.close();
             sc.close();
@@ -310,15 +354,15 @@ class BlockPoolSlice {
         // Restart meta doesn't exist or expired.
         // Restart meta doesn't exist or expired.
         if (loadRwr) {
         if (loadRwr) {
           newReplica = new ReplicaWaitingToBeRecovered(blockId,
           newReplica = new ReplicaWaitingToBeRecovered(blockId,
-              validateIntegrityAndSetLength(blockFile, genStamp), 
-              genStamp, volume, blockFile.getParentFile());
+              validateIntegrityAndSetLength(file, genStamp),
+              genStamp, volume, file.getParentFile());
         }
         }
       }
       }
 
 
       ReplicaInfo oldReplica = volumeMap.add(bpid, newReplica);
       ReplicaInfo oldReplica = volumeMap.add(bpid, newReplica);
       if (oldReplica != null) {
       if (oldReplica != null) {
         FsDatasetImpl.LOG.warn("Two block files with the same block id exist " +
         FsDatasetImpl.LOG.warn("Two block files with the same block id exist " +
-            "on disk: " + oldReplica.getBlockFile() + " and " + blockFile );
+            "on disk: " + oldReplica.getBlockFile() + " and " + file );
       }
       }
     }
     }
   }
   }
@@ -405,10 +449,6 @@ class BlockPoolSlice {
     }
     }
   }
   }
     
     
-  void clearPath(File f) {
-    finalizedDir.clearPath(f);
-  }
-    
   @Override
   @Override
   public String toString() {
   public String toString() {
     return currentDir.getAbsolutePath();
     return currentDir.getAbsolutePath();

+ 1 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

@@ -1151,7 +1151,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
         return f;
         return f;
    
    
       // if file is not null, but doesn't exist - possibly disk failed
       // if file is not null, but doesn't exist - possibly disk failed
-      datanode.checkDiskError();
+      datanode.checkDiskErrorAsync();
     }
     }
     
     
     if (LOG.isDebugEnabled()) {
     if (LOG.isDebugEnabled()) {
@@ -1224,13 +1224,6 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
               +  ". Parent not found for file " + f);
               +  ". Parent not found for file " + f);
           continue;
           continue;
         }
         }
-        ReplicaState replicaState = info.getState();
-        if (replicaState == ReplicaState.FINALIZED || 
-            (replicaState == ReplicaState.RUR && 
-                ((ReplicaUnderRecovery)info).getOriginalReplica().getState() == 
-                  ReplicaState.FINALIZED)) {
-          v.clearPath(bpid, parent);
-        }
         volumeMap.remove(bpid, invalidBlks[i]);
         volumeMap.remove(bpid, invalidBlks[i]);
       }
       }
 
 

+ 5 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java

@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
+import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@@ -235,10 +236,6 @@ class FsVolumeImpl implements FsVolumeSpi {
     // dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
     // dfsUsage.incDfsUsed(b.getNumBytes()+metaFile.length());
     bp.addToReplicasMap(volumeMap, dir, isFinalized);
     bp.addToReplicasMap(volumeMap, dir, isFinalized);
   }
   }
-  
-  void clearPath(String bpid, File f) throws IOException {
-    getBlockPoolSlice(bpid).clearPath(f);
-  }
 
 
   @Override
   @Override
   public String toString() {
   public String toString() {
@@ -274,7 +271,8 @@ class FsVolumeImpl implements FsVolumeSpi {
     File finalizedDir = new File(bpCurrentDir,
     File finalizedDir = new File(bpCurrentDir,
         DataStorage.STORAGE_DIR_FINALIZED);
         DataStorage.STORAGE_DIR_FINALIZED);
     File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
     File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
-    if (finalizedDir.exists() && FileUtil.list(finalizedDir).length != 0) {
+    if (finalizedDir.exists() && !DatanodeUtil.dirNoFilesRecursive(
+        finalizedDir)) {
       return false;
       return false;
     }
     }
     if (rbwDir.exists() && FileUtil.list(rbwDir).length != 0) {
     if (rbwDir.exists() && FileUtil.list(rbwDir).length != 0) {
@@ -301,7 +299,8 @@ class FsVolumeImpl implements FsVolumeSpi {
       if (!rbwDir.delete()) {
       if (!rbwDir.delete()) {
         throw new IOException("Failed to delete " + rbwDir);
         throw new IOException("Failed to delete " + rbwDir);
       }
       }
-      if (!finalizedDir.delete()) {
+      if (!DatanodeUtil.dirNoFilesRecursive(finalizedDir) ||
+          !FileUtil.fullyDelete(finalizedDir)) {
         throw new IOException("Failed to delete " + finalizedDir);
         throw new IOException("Failed to delete " + finalizedDir);
       }
       }
       FileUtil.fullyDelete(tmpDir);
       FileUtil.fullyDelete(tmpDir);

+ 0 - 228
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LDir.java

@@ -1,228 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.datanode.DataStorage;
-import org.apache.hadoop.util.DiskChecker;
-import org.apache.hadoop.util.DiskChecker.DiskErrorException;
-
-/**
- * A node type that can be built into a tree reflecting the
- * hierarchy of replicas on the local disk.
- */
-class LDir {
-  final File dir;
-  final int maxBlocksPerDir;
-
-  private int numBlocks = 0;
-  private LDir[] children = null;
-  private int lastChildIdx = 0;
-
-  LDir(File dir, int maxBlocksPerDir) throws IOException {
-    this.dir = dir;
-    this.maxBlocksPerDir = maxBlocksPerDir;
-
-    if (!dir.exists()) {
-      if (!dir.mkdirs()) {
-        throw new IOException("Failed to mkdirs " + dir);
-      }
-    } else {
-      File[] files = FileUtil.listFiles(dir); 
-      List<LDir> dirList = new ArrayList<LDir>();
-      for (int idx = 0; idx < files.length; idx++) {
-        if (files[idx].isDirectory()) {
-          dirList.add(new LDir(files[idx], maxBlocksPerDir));
-        } else if (Block.isBlockFilename(files[idx])) {
-          numBlocks++;
-        }
-      }
-      if (dirList.size() > 0) {
-        children = dirList.toArray(new LDir[dirList.size()]);
-      }
-    }
-  }
-      
-  File addBlock(Block b, File src) throws IOException {
-    //First try without creating subdirectories
-    File file = addBlock(b, src, false, false);          
-    return (file != null) ? file : addBlock(b, src, true, true);
-  }
-
-  private File addBlock(Block b, File src, boolean createOk, boolean resetIdx
-      ) throws IOException {
-    if (numBlocks < maxBlocksPerDir) {
-      final File dest = FsDatasetImpl.moveBlockFiles(b, src, dir);
-      numBlocks += 1;
-      return dest;
-    }
-          
-    if (lastChildIdx < 0 && resetIdx) {
-      //reset so that all children will be checked
-      lastChildIdx = DFSUtil.getRandom().nextInt(children.length);              
-    }
-          
-    if (lastChildIdx >= 0 && children != null) {
-      //Check if any child-tree has room for a block.
-      for (int i=0; i < children.length; i++) {
-        int idx = (lastChildIdx + i)%children.length;
-        File file = children[idx].addBlock(b, src, false, resetIdx);
-        if (file != null) {
-          lastChildIdx = idx;
-          return file; 
-        }
-      }
-      lastChildIdx = -1;
-    }
-          
-    if (!createOk) {
-      return null;
-    }
-          
-    if (children == null || children.length == 0) {
-      children = new LDir[maxBlocksPerDir];
-      for (int idx = 0; idx < maxBlocksPerDir; idx++) {
-        final File sub = new File(dir, DataStorage.BLOCK_SUBDIR_PREFIX+idx);
-        children[idx] = new LDir(sub, maxBlocksPerDir);
-      }
-    }
-          
-    //now pick a child randomly for creating a new set of subdirs.
-    lastChildIdx = DFSUtil.getRandom().nextInt(children.length);
-    return children[ lastChildIdx ].addBlock(b, src, true, false); 
-  }
-
-  void getVolumeMap(String bpid, ReplicaMap volumeMap, FsVolumeImpl volume
-      ) throws IOException {
-    if (children != null) {
-      for (int i = 0; i < children.length; i++) {
-        children[i].getVolumeMap(bpid, volumeMap, volume);
-      }
-    }
-
-    recoverTempUnlinkedBlock();
-    volume.addToReplicasMap(bpid, volumeMap, dir, true);
-  }
-      
-  /**
-   * Recover unlinked tmp files on datanode restart. If the original block
-   * does not exist, then the tmp file is renamed to be the
-   * original file name; otherwise the tmp file is deleted.
-   */
-  private void recoverTempUnlinkedBlock() throws IOException {
-    File files[] = FileUtil.listFiles(dir);
-    for (File file : files) {
-      if (!FsDatasetUtil.isUnlinkTmpFile(file)) {
-        continue;
-      }
-      File blockFile = FsDatasetUtil.getOrigFile(file);
-      if (blockFile.exists()) {
-        // If the original block file still exists, then no recovery  is needed.
-        if (!file.delete()) {
-          throw new IOException("Unable to cleanup unlinked tmp file " + file);
-        }
-      } else {
-        if (!file.renameTo(blockFile)) {
-          throw new IOException("Unable to cleanup detached file " + file);
-        }
-      }
-    }
-  }
-  
-  /**
-   * check if a data diretory is healthy
-   * @throws DiskErrorException
-   */
-  void checkDirTree() throws DiskErrorException {
-    DiskChecker.checkDir(dir);
-          
-    if (children != null) {
-      for (int i = 0; i < children.length; i++) {
-        children[i].checkDirTree();
-      }
-    }
-  }
-      
-  void clearPath(File f) {
-    String root = dir.getAbsolutePath();
-    String dir = f.getAbsolutePath();
-    if (dir.startsWith(root)) {
-      String[] dirNames = dir.substring(root.length()).
-        split(File.separator + DataStorage.BLOCK_SUBDIR_PREFIX);
-      if (clearPath(f, dirNames, 1))
-        return;
-    }
-    clearPath(f, null, -1);
-  }
-      
-  /**
-   * dirNames is an array of string integers derived from
-   * usual directory structure data/subdirN/subdirXY/subdirM ...
-   * If dirName array is non-null, we only check the child at 
-   * the children[dirNames[idx]]. This avoids iterating over
-   * children in common case. If directory structure changes 
-   * in later versions, we need to revisit this.
-   */
-  private boolean clearPath(File f, String[] dirNames, int idx) {
-    if ((dirNames == null || idx == dirNames.length) &&
-        dir.compareTo(f) == 0) {
-      numBlocks--;
-      return true;
-    }
-        
-    if (dirNames != null) {
-      //guess the child index from the directory name
-      if (idx > (dirNames.length - 1) || children == null) {
-        return false;
-      }
-      int childIdx; 
-      try {
-        childIdx = Integer.parseInt(dirNames[idx]);
-      } catch (NumberFormatException ignored) {
-        // layout changed? we could print a warning.
-        return false;
-      }
-      return (childIdx >= 0 && childIdx < children.length) ?
-        children[childIdx].clearPath(f, dirNames, idx+1) : false;
-    }
-
-    //guesses failed. back to blind iteration.
-    if (children != null) {
-      for(int i=0; i < children.length; i++) {
-        if (children[i].clearPath(f, null, -1)){
-          return true;
-        }
-      }
-    }
-    return false;
-  }
-
-  @Override
-  public String toString() {
-    return "FSDir{dir=" + dir + ", children="
-        + (children == null ? null : Arrays.asList(children)) + "}";
-  }
-}

+ 0 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -1103,9 +1103,6 @@ public class FSDirectory implements Closeable {
       count++;
       count++;
     }
     }
     
     
-    // update inodeMap
-    removeFromInodeMap(Arrays.asList(allSrcInodes));
-    
     trgInode.setModificationTime(timestamp, trgLatestSnapshot);
     trgInode.setModificationTime(timestamp, trgLatestSnapshot);
     trgParent.updateModificationTime(timestamp, trgLatestSnapshot);
     trgParent.updateModificationTime(timestamp, trgLatestSnapshot);
     // update quota on the parent directory ('count' files removed, 0 space)
     // update quota on the parent directory ('count' files removed, 0 space)

+ 29 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -4585,8 +4585,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
           // Otherwise fsck will report these blocks as MISSING, especially if the
           // Otherwise fsck will report these blocks as MISSING, especially if the
           // blocksReceived from Datanodes take a long time to arrive.
           // blocksReceived from Datanodes take a long time to arrive.
           for (int i = 0; i < trimmedTargets.size(); i++) {
           for (int i = 0; i < trimmedTargets.size(); i++) {
-            trimmedTargets.get(i).addBlock(
-              trimmedStorages.get(i), storedBlock);
+            DatanodeStorageInfo storageInfo =
+                trimmedTargets.get(i).getStorageInfo(trimmedStorages.get(i));
+            if (storageInfo != null) {
+              storageInfo.addBlock(storedBlock);
+            }
           }
           }
         }
         }
 
 
@@ -6066,7 +6069,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
   }
   }
 
 
   public void processIncrementalBlockReport(final DatanodeID nodeID,
   public void processIncrementalBlockReport(final DatanodeID nodeID,
-      final String poolId, final StorageReceivedDeletedBlocks srdb)
+      final StorageReceivedDeletedBlocks srdb)
       throws IOException {
       throws IOException {
     writeLock();
     writeLock();
     try {
     try {
@@ -8824,6 +8827,29 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     }
     }
   }
   }
 
 
+  void checkAccess(String src, FsAction mode) throws AccessControlException,
+      FileNotFoundException, UnresolvedLinkException, IOException {
+    checkOperation(OperationCategory.READ);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
+    readLock();
+    try {
+      checkOperation(OperationCategory.READ);
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
+      if (dir.getINode(src) == null) {
+        throw new FileNotFoundException("Path not found");
+      }
+      if (isPermissionEnabled) {
+        FSPermissionChecker pc = getPermissionChecker();
+        checkPathAccess(pc, src, mode);
+      }
+    } catch (AccessControlException e) {
+      logAuditEvent(false, "checkAccess", src);
+      throw e;
+    } finally {
+      readUnlock();
+    }
+  }
+
   /**
   /**
    * Default AuditLogger implementation; used when no access logger is
    * Default AuditLogger implementation; used when no access logger is
    * defined in the config file. It can also be explicitly listed in the
    * defined in the config file. It can also be explicitly listed in the

+ 8 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

@@ -55,6 +55,7 @@ import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.ha.HAServiceStatus;
 import org.apache.hadoop.ha.HAServiceStatus;
 import org.apache.hadoop.ha.HealthCheckFailedException;
 import org.apache.hadoop.ha.HealthCheckFailedException;
@@ -1067,7 +1068,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
       // for the same node and storage, so the value returned by the last
       // for the same node and storage, so the value returned by the last
       // call of this loop is the final updated value for noStaleStorage.
       // call of this loop is the final updated value for noStaleStorage.
       //
       //
-      noStaleStorages = bm.processReport(nodeReg, r.getStorage(), poolId, blocks);
+      noStaleStorages = bm.processReport(nodeReg, r.getStorage(), blocks);
       metrics.incrStorageBlockReportOps();
       metrics.incrStorageBlockReportOps();
     }
     }
 
 
@@ -1103,7 +1104,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
           +" blocks.");
           +" blocks.");
     }
     }
     for(StorageReceivedDeletedBlocks r : receivedAndDeletedBlocks) {
     for(StorageReceivedDeletedBlocks r : receivedAndDeletedBlocks) {
-      namesystem.processIncrementalBlockReport(nodeReg, poolId, r);
+      namesystem.processIncrementalBlockReport(nodeReg, r);
     }
     }
   }
   }
 
 
@@ -1458,5 +1459,10 @@ class NameNodeRpcServer implements NamenodeProtocols {
   public void removeXAttr(String src, XAttr xAttr) throws IOException {
   public void removeXAttr(String src, XAttr xAttr) throws IOException {
     namesystem.removeXAttr(src, xAttr);
     namesystem.removeXAttr(src, xAttr);
   }
   }
+
+  @Override
+  public void checkAccess(String path, FsAction mode) throws IOException {
+    namesystem.checkAccess(path, mode);
+  }
 }
 }
 
 

+ 17 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java

@@ -57,6 +57,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -112,6 +113,7 @@ import org.apache.hadoop.hdfs.web.resources.XAttrEncodingParam;
 import org.apache.hadoop.hdfs.web.resources.XAttrNameParam;
 import org.apache.hadoop.hdfs.web.resources.XAttrNameParam;
 import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam;
 import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam;
 import org.apache.hadoop.hdfs.web.resources.XAttrValueParam;
 import org.apache.hadoop.hdfs.web.resources.XAttrValueParam;
+import org.apache.hadoop.hdfs.web.resources.FsActionParam;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.Server;
@@ -755,10 +757,12 @@ public class NamenodeWebHdfsMethods {
       @QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT) 
       @QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT) 
           final XAttrEncodingParam xattrEncoding,
           final XAttrEncodingParam xattrEncoding,
       @QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
       @QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
-          final ExcludeDatanodesParam excludeDatanodes          
+          final ExcludeDatanodesParam excludeDatanodes,
+      @QueryParam(FsActionParam.NAME) @DefaultValue(FsActionParam.DEFAULT)
+          final FsActionParam fsAction
       ) throws IOException, InterruptedException {
       ) throws IOException, InterruptedException {
     return get(ugi, delegation, username, doAsUser, ROOT, op, offset, length,
     return get(ugi, delegation, username, doAsUser, ROOT, op, offset, length,
-        renewer, bufferSize, xattrNames, xattrEncoding, excludeDatanodes);
+        renewer, bufferSize, xattrNames, xattrEncoding, excludeDatanodes, fsAction);
   }
   }
 
 
   /** Handle HTTP GET request. */
   /** Handle HTTP GET request. */
@@ -789,11 +793,13 @@ public class NamenodeWebHdfsMethods {
       @QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT) 
       @QueryParam(XAttrEncodingParam.NAME) @DefaultValue(XAttrEncodingParam.DEFAULT) 
           final XAttrEncodingParam xattrEncoding,
           final XAttrEncodingParam xattrEncoding,
       @QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
       @QueryParam(ExcludeDatanodesParam.NAME) @DefaultValue(ExcludeDatanodesParam.DEFAULT)
-          final ExcludeDatanodesParam excludeDatanodes
+          final ExcludeDatanodesParam excludeDatanodes,
+      @QueryParam(FsActionParam.NAME) @DefaultValue(FsActionParam.DEFAULT)
+          final FsActionParam fsAction
       ) throws IOException, InterruptedException {
       ) throws IOException, InterruptedException {
 
 
     init(ugi, delegation, username, doAsUser, path, op, offset, length,
     init(ugi, delegation, username, doAsUser, path, op, offset, length,
-        renewer, bufferSize, xattrEncoding, excludeDatanodes);
+        renewer, bufferSize, xattrEncoding, excludeDatanodes, fsAction);
 
 
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
     return ugi.doAs(new PrivilegedExceptionAction<Response>() {
       @Override
       @Override
@@ -801,7 +807,7 @@ public class NamenodeWebHdfsMethods {
         try {
         try {
           return get(ugi, delegation, username, doAsUser,
           return get(ugi, delegation, username, doAsUser,
               path.getAbsolutePath(), op, offset, length, renewer, bufferSize,
               path.getAbsolutePath(), op, offset, length, renewer, bufferSize,
-              xattrNames, xattrEncoding, excludeDatanodes);
+              xattrNames, xattrEncoding, excludeDatanodes, fsAction);
         } finally {
         } finally {
           reset();
           reset();
         }
         }
@@ -822,7 +828,8 @@ public class NamenodeWebHdfsMethods {
       final BufferSizeParam bufferSize,
       final BufferSizeParam bufferSize,
       final List<XAttrNameParam> xattrNames,
       final List<XAttrNameParam> xattrNames,
       final XAttrEncodingParam xattrEncoding,
       final XAttrEncodingParam xattrEncoding,
-      final ExcludeDatanodesParam excludeDatanodes
+      final ExcludeDatanodesParam excludeDatanodes,
+      final FsActionParam fsAction
       ) throws IOException, URISyntaxException {
       ) throws IOException, URISyntaxException {
     final NameNode namenode = (NameNode)context.getAttribute("name.node");
     final NameNode namenode = (NameNode)context.getAttribute("name.node");
     final NamenodeProtocols np = getRPCServer(namenode);
     final NamenodeProtocols np = getRPCServer(namenode);
@@ -919,6 +926,10 @@ public class NamenodeWebHdfsMethods {
       final String js = JsonUtil.toJsonString(xAttrs);
       final String js = JsonUtil.toJsonString(xAttrs);
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
     }
     }
+    case CHECKACCESS: {
+      np.checkAccess(fullpath, FsAction.getFsAction(fsAction.getValue()));
+      return Response.ok().build();
+    }
     default:
     default:
       throw new UnsupportedOperationException(op + " is not supported");
       throw new UnsupportedOperationException(op + " is not supported");
     }
     }

+ 19 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java

@@ -17,10 +17,9 @@
  */
  */
 package org.apache.hadoop.hdfs.server.protocol;
 package org.apache.hadoop.hdfs.server.protocol;
 
 
-import java.util.Arrays;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 
 
 /**
 /**
@@ -39,12 +38,15 @@ public class BlocksWithLocations {
     final Block block;
     final Block block;
     final String[] datanodeUuids;
     final String[] datanodeUuids;
     final String[] storageIDs;
     final String[] storageIDs;
+    final StorageType[] storageTypes;
     
     
     /** constructor */
     /** constructor */
-    public BlockWithLocations(Block block, String[] datanodeUuids, String[] storageIDs) {
+    public BlockWithLocations(Block block, String[] datanodeUuids,
+        String[] storageIDs, StorageType[] storageTypes) {
       this.block = block;
       this.block = block;
       this.datanodeUuids = datanodeUuids;
       this.datanodeUuids = datanodeUuids;
       this.storageIDs = storageIDs;
       this.storageIDs = storageIDs;
+      this.storageTypes = storageTypes;
     }
     }
     
     
     /** get the block */
     /** get the block */
@@ -61,7 +63,12 @@ public class BlocksWithLocations {
     public String[] getStorageIDs() {
     public String[] getStorageIDs() {
       return storageIDs;
       return storageIDs;
     }
     }
-    
+
+    /** @return the storage types */
+    public StorageType[] getStorageTypes() {
+      return storageTypes;
+    }
+
     @Override
     @Override
     public String toString() {
     public String toString() {
       final StringBuilder b = new StringBuilder();
       final StringBuilder b = new StringBuilder();
@@ -70,12 +77,18 @@ public class BlocksWithLocations {
         return b.append("[]").toString();
         return b.append("[]").toString();
       }
       }
       
       
-      b.append(storageIDs[0]).append('@').append(datanodeUuids[0]);
+      appendString(0, b.append("["));
       for(int i = 1; i < datanodeUuids.length; i++) {
       for(int i = 1; i < datanodeUuids.length; i++) {
-        b.append(", ").append(storageIDs[i]).append("@").append(datanodeUuids[i]);
+        appendString(i, b.append(","));
       }
       }
       return b.append("]").toString();
       return b.append("]").toString();
     }
     }
+    
+    private StringBuilder appendString(int i, StringBuilder b) {
+      return b.append("[").append(storageTypes[i]).append("]")
+              .append(storageIDs[i])
+              .append("@").append(datanodeUuids[i]);
+    }
   }
   }
 
 
   private final BlockWithLocations[] blocks;
   private final BlockWithLocations[] blocks;

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/XmlEditsVisitor.java

@@ -29,8 +29,8 @@ import org.xml.sax.ContentHandler;
 import org.xml.sax.SAXException;
 import org.xml.sax.SAXException;
 import org.xml.sax.helpers.AttributesImpl;
 import org.xml.sax.helpers.AttributesImpl;
 
 
-import com.sun.org.apache.xml.internal.serialize.OutputFormat;
-import com.sun.org.apache.xml.internal.serialize.XMLSerializer;
+import org.apache.xml.serialize.OutputFormat;
+import org.apache.xml.serialize.XMLSerializer;
 
 
 /**
 /**
  * An XmlEditsVisitor walks over an EditLog structure and writes out
  * An XmlEditsVisitor walks over an EditLog structure and writes out

+ 8 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java

@@ -37,7 +37,7 @@ import com.google.common.base.Preconditions;
 public class EnumCounters<E extends Enum<E>> {
 public class EnumCounters<E extends Enum<E>> {
   /** The class of the enum. */
   /** The class of the enum. */
   private final Class<E> enumClass;
   private final Class<E> enumClass;
-  /** The counter array, counters[i] corresponds to the enumConstants[i]. */
+  /** An array of longs corresponding to the enum type. */
   private final long[] counters;
   private final long[] counters;
 
 
   /**
   /**
@@ -75,6 +75,13 @@ public class EnumCounters<E extends Enum<E>> {
     }
     }
   }
   }
 
 
+  /** Reset all counters to zero. */
+  public final void reset() {
+    for(int i = 0; i < counters.length; i++) {
+      this.counters[i] = 0L;
+    }
+  }
+
   /** Add the given value to counter e. */
   /** Add the given value to counter e. */
   public final void add(final E e, final long value) {
   public final void add(final E e, final long value) {
     counters[e.ordinal()] += value;
     counters[e.ordinal()] += value;

+ 128 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java

@@ -0,0 +1,128 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.util;
+
+import java.util.Arrays;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * Similar to {@link EnumCounters} except that the value type is double.
+ *
+ * @param <E> the enum type
+ */
+public class EnumDoubles<E extends Enum<E>> {
+  /** The class of the enum. */
+  private final Class<E> enumClass;
+  /** An array of doubles corresponding to the enum type. */
+  private final double[] doubles;
+
+  /**
+   * Construct doubles for the given enum constants.
+   * @param enumClass the enum class.
+   */
+  public EnumDoubles(final Class<E> enumClass) {
+    final E[] enumConstants = enumClass.getEnumConstants();
+    Preconditions.checkNotNull(enumConstants);
+    this.enumClass = enumClass;
+    this.doubles = new double[enumConstants.length];
+  }
+  
+  /** @return the value corresponding to e. */
+  public final double get(final E e) {
+    return doubles[e.ordinal()];
+  }
+
+  /** Negate all values. */
+  public final void negation() {
+    for(int i = 0; i < doubles.length; i++) {
+      doubles[i] = -doubles[i];
+    }
+  }
+  
+  /** Set e to the given value. */
+  public final void set(final E e, final double value) {
+    doubles[e.ordinal()] = value;
+  }
+
+  /** Set the values of this object to that object. */
+  public final void set(final EnumDoubles<E> that) {
+    for(int i = 0; i < doubles.length; i++) {
+      this.doubles[i] = that.doubles[i];
+    }
+  }
+
+  /** Reset all values to zero. */
+  public final void reset() {
+    for(int i = 0; i < doubles.length; i++) {
+      this.doubles[i] = 0.0;
+    }
+  }
+
+  /** Add the given value to e. */
+  public final void add(final E e, final double value) {
+    doubles[e.ordinal()] += value;
+  }
+
+  /** Add the values of that object to this. */
+  public final void add(final EnumDoubles<E> that) {
+    for(int i = 0; i < doubles.length; i++) {
+      this.doubles[i] += that.doubles[i];
+    }
+  }
+
+  /** Subtract the given value from e. */
+  public final void subtract(final E e, final double value) {
+    doubles[e.ordinal()] -= value;
+  }
+
+  /** Subtract the values of this object from that object. */
+  public final void subtract(final EnumDoubles<E> that) {
+    for(int i = 0; i < doubles.length; i++) {
+      this.doubles[i] -= that.doubles[i];
+    }
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    if (obj == this) {
+      return true;
+    } else if (obj == null || !(obj instanceof EnumDoubles)) {
+      return false;
+    }
+    final EnumDoubles<?> that = (EnumDoubles<?>)obj;
+    return this.enumClass == that.enumClass
+        && Arrays.equals(this.doubles, that.doubles);
+  }
+
+  @Override
+  public int hashCode() {
+    return Arrays.hashCode(doubles);
+  }
+
+  @Override
+  public String toString() {
+    final E[] enumConstants = enumClass.getEnumConstants();
+    final StringBuilder b = new StringBuilder();
+    for(int i = 0; i < doubles.length; i++) {
+      final String name = enumConstants[i].name();
+      b.append(name).append("=").append(doubles[i]).append(", ");
+    }
+    return b.substring(0, b.length() - 2);
+  }
+}

+ 7 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java

@@ -54,6 +54,7 @@ import org.apache.hadoop.fs.XAttrCodec;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
@@ -1356,6 +1357,12 @@ public class WebHdfsFileSystem extends FileSystem
     }.run();
     }.run();
   }
   }
 
 
+  @Override
+  public void access(final Path path, final FsAction mode) throws IOException {
+    final HttpOpParam.Op op = GetOpParam.Op.CHECKACCESS;
+    new FsPathRunner(op, path, new FsActionParam(mode)).run();
+  }
+
   @Override
   @Override
   public ContentSummary getContentSummary(final Path p) throws IOException {
   public ContentSummary getContentSummary(final Path p) throws IOException {
     statistics.incrementReadOps(1);
     statistics.incrementReadOps(1);

+ 58 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/FsActionParam.java

@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import org.apache.hadoop.fs.permission.FsAction;
+
+import java.util.regex.Pattern;
+
+/** {@link FsAction} Parameter */
+public class FsActionParam extends StringParam {
+
+  /** Parameter name. */
+  public static final String NAME = "fsaction";
+
+  /** Default parameter value. */
+  public static final String DEFAULT = NULL;
+
+  private static String FS_ACTION_PATTERN = "[rwx-]{3}";
+
+  private static final Domain DOMAIN = new Domain(NAME,
+      Pattern.compile(FS_ACTION_PATTERN));
+
+  /**
+   * Constructor.
+   * @param str a string representation of the parameter value.
+   */
+  public FsActionParam(final String str) {
+    super(DOMAIN, str == null || str.equals(DEFAULT)? null: str);
+  }
+
+  /**
+   * Constructor.
+   * @param value the parameter value.
+   */
+  public FsActionParam(final FsAction value) {
+    super(DOMAIN, value == null? null: value.SYMBOL);
+  }
+
+  @Override
+  public String getName() {
+    return NAME;
+  }
+}

+ 3 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java

@@ -39,7 +39,9 @@ public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
     GETXATTRS(false, HttpURLConnection.HTTP_OK),
     GETXATTRS(false, HttpURLConnection.HTTP_OK),
     LISTXATTRS(false, HttpURLConnection.HTTP_OK),
     LISTXATTRS(false, HttpURLConnection.HTTP_OK),
 
 
-    NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
+    NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED),
+
+    CHECKACCESS(false, HttpURLConnection.HTTP_OK);
 
 
     final boolean redirect;
     final boolean redirect;
     final int expectedHttpResponseCode;
     final int expectedHttpResponseCode;

+ 10 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto

@@ -656,6 +656,14 @@ message DeleteSnapshotRequestProto {
 message DeleteSnapshotResponseProto { // void response
 message DeleteSnapshotResponseProto { // void response
 }
 }
 
 
+message CheckAccessRequestProto {
+  required string path = 1;
+  required AclEntryProto.FsActionProto mode = 2;
+}
+
+message CheckAccessResponseProto { // void response
+}
+
 service ClientNamenodeProtocol {
 service ClientNamenodeProtocol {
   rpc getBlockLocations(GetBlockLocationsRequestProto)
   rpc getBlockLocations(GetBlockLocationsRequestProto)
       returns(GetBlockLocationsResponseProto);
       returns(GetBlockLocationsResponseProto);
@@ -785,6 +793,8 @@ service ClientNamenodeProtocol {
       returns(ListXAttrsResponseProto);
       returns(ListXAttrsResponseProto);
   rpc removeXAttr(RemoveXAttrRequestProto)
   rpc removeXAttr(RemoveXAttrRequestProto)
       returns(RemoveXAttrResponseProto);
       returns(RemoveXAttrResponseProto);
+  rpc checkAccess(CheckAccessRequestProto)
+      returns(CheckAccessResponseProto);
   rpc createEncryptionZone(CreateEncryptionZoneRequestProto)
   rpc createEncryptionZone(CreateEncryptionZoneRequestProto)
       returns(CreateEncryptionZoneResponseProto);
       returns(CreateEncryptionZoneResponseProto);
   rpc listEncryptionZones(ListEncryptionZonesRequestProto)
   rpc listEncryptionZones(ListEncryptionZonesRequestProto)

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto

@@ -424,6 +424,7 @@ message BlockWithLocationsProto {
   required BlockProto block = 1;   // Block
   required BlockProto block = 1;   // Block
   repeated string datanodeUuids = 2; // Datanodes with replicas of the block
   repeated string datanodeUuids = 2; // Datanodes with replicas of the block
   repeated string storageUuids = 3;  // Storages with replicas of the block
   repeated string storageUuids = 3;  // Storages with replicas of the block
+  repeated StorageTypeProto storageTypes = 4;
 }
 }
 
 
 /**
 /**

+ 8 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

@@ -2052,6 +2052,14 @@
   </description>
   </description>
 </property>
 </property>
 
 
+<property>
+  <name>dfs.datanode.block.id.layout.upgrade.threads</name>
+  <value>12</value>
+  <description>The number of threads to use when creating hard links from
+    current to previous blocks during upgrade of a DataNode to block ID-based
+    block layout (see HDFS-6482 for details on the layout).</description>
+</property>
+
 <property>
 <property>
   <name>dfs.namenode.list.encryption.zones.num.responses</name>
   <name>dfs.namenode.list.encryption.zones.num.responses</name>
   <value>100</value>
   <value>100</value>

+ 8 - 5
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm

@@ -47,18 +47,21 @@ HDFS NFS Gateway
    The NFS-gateway uses proxy user to proxy all the users accessing the NFS mounts. 
    The NFS-gateway uses proxy user to proxy all the users accessing the NFS mounts. 
    In non-secure mode, the user running the gateway is the proxy user, while in secure mode the
    In non-secure mode, the user running the gateway is the proxy user, while in secure mode the
    user in Kerberos keytab is the proxy user. Suppose the proxy user is 'nfsserver'
    user in Kerberos keytab is the proxy user. Suppose the proxy user is 'nfsserver'
-   and users belonging to the groups 'nfs-users1'
-   and 'nfs-users2' use the NFS mounts, then in core-site.xml of the NameNode, the following
+   and users belonging to the groups 'users-group1'
+   and 'users-group2' use the NFS mounts, then in core-site.xml of the NameNode, the following
    two properities must be set and only NameNode needs restart after the configuration change
    two properities must be set and only NameNode needs restart after the configuration change
    (NOTE: replace the string 'nfsserver' with the proxy user name in your cluster):
    (NOTE: replace the string 'nfsserver' with the proxy user name in your cluster):
 
 
 ----
 ----
 <property>
 <property>
   <name>hadoop.proxyuser.nfsserver.groups</name>
   <name>hadoop.proxyuser.nfsserver.groups</name>
-  <value>nfs-users1,nfs-users2</value>
+  <value>root,users-group1,users-group2</value>
   <description>
   <description>
-         The 'nfsserver' user is allowed to proxy all members of the 'nfs-users1' and 
-         'nfs-users2' groups. Set this to '*' to allow nfsserver user to proxy any group.
+         The 'nfsserver' user is allowed to proxy all members of the 'users-group1' and 
+         'users-group2' groups. Note that in most cases you will need to include the
+         group "root" because the user "root" (which usually belonges to "root" group) will
+         generally be the user that initially executes the mount on the NFS client system. 
+         Set this to '*' to allow nfsserver user to proxy any group.
   </description>
   </description>
 </property>
 </property>
 ----
 ----

+ 44 - 0
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/WebHDFS.apt.vm

@@ -82,6 +82,9 @@ WebHDFS REST API
     * {{{List all XAttrs}<<<LISTXATTRS>>>}}
     * {{{List all XAttrs}<<<LISTXATTRS>>>}}
         (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.listXAttrs)
         (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.listXAttrs)
 
 
+    * {{{Check access}<<<CHECKACCESS>>>}}
+        (see  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.access)
+
   * HTTP PUT
   * HTTP PUT
 
 
     * {{{Create and Write to a File}<<<CREATE>>>}}
     * {{{Create and Write to a File}<<<CREATE>>>}}
@@ -927,6 +930,28 @@ Transfer-Encoding: chunked
   {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getAclStatus
   {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.getAclStatus
 
 
 
 
+** {Check access}
+
+  * Submit a HTTP GET request.
+  
++---------------------------------
+curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=CHECKACCESS
+                              &fsaction=<FSACTION>
++---------------------------------
+
+  The client receives a response with zero content length:
+
++---------------------------------
+HTTP/1.1 200 OK
+Content-Length: 0
++---------------------------------
+
+  []
+
+  See also:
+  {{{../../api/org/apache/hadoop/fs/FileSystem.html}FileSystem}}.access
+    
+
 * {Extended Attributes(XAttrs) Operations}
 * {Extended Attributes(XAttrs) Operations}
 
 
 ** {Set XAttr}
 ** {Set XAttr}
@@ -2166,6 +2191,25 @@ var tokenProperties =
   {{Proxy Users}}
   {{Proxy Users}}
 
 
 
 
+** {Fs Action}
+
+*----------------+-------------------------------------------------------------------+
+|| Name          | <<<fsaction>>> |
+*----------------+-------------------------------------------------------------------+
+|| Description   | File system operation read/write/execute |
+*----------------+-------------------------------------------------------------------+
+|| Type          | String |
+*----------------+-------------------------------------------------------------------+
+|| Default Value | null (an invalid value) |
+*----------------+-------------------------------------------------------------------+
+|| Valid Values  | Strings matching regex pattern \"[rwx-]\{3\}\" |
+*----------------+-------------------------------------------------------------------+
+|| Syntax        | \"[rwx-]\{3\}\" |
+*----------------+-------------------------------------------------------------------+
+
+  See also:
+  {{{Check access}<<<CHECKACCESS>>>}},
+
 ** {Group}
 ** {Group}
 
 
 *----------------+-------------------------------------------------------------------+
 *----------------+-------------------------------------------------------------------+

+ 18 - 11
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java

@@ -47,7 +47,6 @@ import org.mockito.Mockito;
 public class TestGenericRefresh {
 public class TestGenericRefresh {
   private static MiniDFSCluster cluster;
   private static MiniDFSCluster cluster;
   private static Configuration config;
   private static Configuration config;
-  private static final int NNPort = 54222;
 
 
   private static RefreshHandler firstHandler;
   private static RefreshHandler firstHandler;
   private static RefreshHandler secondHandler;
   private static RefreshHandler secondHandler;
@@ -57,8 +56,8 @@ public class TestGenericRefresh {
     config = new Configuration();
     config = new Configuration();
     config.set("hadoop.security.authorization", "true");
     config.set("hadoop.security.authorization", "true");
 
 
-    FileSystem.setDefaultUri(config, "hdfs://localhost:" + NNPort);
-    cluster = new MiniDFSCluster.Builder(config).nameNodePort(NNPort).build();
+    FileSystem.setDefaultUri(config, "hdfs://localhost:0");
+    cluster = new MiniDFSCluster.Builder(config).build();
     cluster.waitActive();
     cluster.waitActive();
   }
   }
 
 
@@ -103,7 +102,8 @@ public class TestGenericRefresh {
   @Test
   @Test
   public void testInvalidIdentifier() throws Exception {
   public void testInvalidIdentifier() throws Exception {
     DFSAdmin admin = new DFSAdmin(config);
     DFSAdmin admin = new DFSAdmin(config);
-    String [] args = new String[]{"-refresh", "localhost:" + NNPort, "unregisteredIdentity"};
+    String [] args = new String[]{"-refresh", "localhost:" + 
+        cluster.getNameNodePort(), "unregisteredIdentity"};
     int exitCode = admin.run(args);
     int exitCode = admin.run(args);
     assertEquals("DFSAdmin should fail due to no handler registered", -1, exitCode);
     assertEquals("DFSAdmin should fail due to no handler registered", -1, exitCode);
   }
   }
@@ -111,7 +111,8 @@ public class TestGenericRefresh {
   @Test
   @Test
   public void testValidIdentifier() throws Exception {
   public void testValidIdentifier() throws Exception {
     DFSAdmin admin = new DFSAdmin(config);
     DFSAdmin admin = new DFSAdmin(config);
-    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "firstHandler"};
+    String[] args = new String[]{"-refresh",
+        "localhost:" + cluster.getNameNodePort(), "firstHandler"};
     int exitCode = admin.run(args);
     int exitCode = admin.run(args);
     assertEquals("DFSAdmin should succeed", 0, exitCode);
     assertEquals("DFSAdmin should succeed", 0, exitCode);
 
 
@@ -124,11 +125,13 @@ public class TestGenericRefresh {
   @Test
   @Test
   public void testVariableArgs() throws Exception {
   public void testVariableArgs() throws Exception {
     DFSAdmin admin = new DFSAdmin(config);
     DFSAdmin admin = new DFSAdmin(config);
-    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "secondHandler", "one"};
+    String[] args = new String[]{"-refresh", "localhost:" +
+        cluster.getNameNodePort(), "secondHandler", "one"};
     int exitCode = admin.run(args);
     int exitCode = admin.run(args);
     assertEquals("DFSAdmin should return 2", 2, exitCode);
     assertEquals("DFSAdmin should return 2", 2, exitCode);
 
 
-    exitCode = admin.run(new String[]{"-refresh", "localhost:" + NNPort, "secondHandler", "one", "two"});
+    exitCode = admin.run(new String[]{"-refresh", "localhost:" +
+        cluster.getNameNodePort(), "secondHandler", "one", "two"});
     assertEquals("DFSAdmin should now return 3", 3, exitCode);
     assertEquals("DFSAdmin should now return 3", 3, exitCode);
 
 
     Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[]{"one"});
     Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[]{"one"});
@@ -141,7 +144,8 @@ public class TestGenericRefresh {
 
 
     // And now this should fail
     // And now this should fail
     DFSAdmin admin = new DFSAdmin(config);
     DFSAdmin admin = new DFSAdmin(config);
-    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "firstHandler"};
+    String[] args = new String[]{"-refresh", "localhost:" +
+        cluster.getNameNodePort(), "firstHandler"};
     int exitCode = admin.run(args);
     int exitCode = admin.run(args);
     assertEquals("DFSAdmin should return -1", -1, exitCode);
     assertEquals("DFSAdmin should return -1", -1, exitCode);
   }
   }
@@ -161,7 +165,8 @@ public class TestGenericRefresh {
 
 
     // this should trigger both
     // this should trigger both
     DFSAdmin admin = new DFSAdmin(config);
     DFSAdmin admin = new DFSAdmin(config);
-    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "sharedId", "one"};
+    String[] args = new String[]{"-refresh", "localhost:" +
+        cluster.getNameNodePort(), "sharedId", "one"};
     int exitCode = admin.run(args);
     int exitCode = admin.run(args);
     assertEquals(-1, exitCode); // -1 because one of the responses is unregistered
     assertEquals(-1, exitCode); // -1 because one of the responses is unregistered
 
 
@@ -189,7 +194,8 @@ public class TestGenericRefresh {
 
 
     // We refresh both
     // We refresh both
     DFSAdmin admin = new DFSAdmin(config);
     DFSAdmin admin = new DFSAdmin(config);
-    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "shared"};
+    String[] args = new String[]{"-refresh", "localhost:" +
+        cluster.getNameNodePort(), "shared"};
     int exitCode = admin.run(args);
     int exitCode = admin.run(args);
     assertEquals(-1, exitCode); // We get -1 because of our logic for melding non-zero return codes
     assertEquals(-1, exitCode); // We get -1 because of our logic for melding non-zero return codes
 
 
@@ -215,7 +221,8 @@ public class TestGenericRefresh {
     RefreshRegistry.defaultRegistry().register("exceptional", otherExceptionalHandler);
     RefreshRegistry.defaultRegistry().register("exceptional", otherExceptionalHandler);
 
 
     DFSAdmin admin = new DFSAdmin(config);
     DFSAdmin admin = new DFSAdmin(config);
-    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "exceptional"};
+    String[] args = new String[]{"-refresh", "localhost:" +
+        cluster.getNameNodePort(), "exceptional"};
     int exitCode = admin.run(args);
     int exitCode = admin.run(args);
     assertEquals(-1, exitCode); // Exceptions result in a -1
     assertEquals(-1, exitCode); // Exceptions result in a -1
 
 

+ 32 - 12
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestRefreshCallQueue.java

@@ -24,6 +24,8 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.junit.Assert.fail;
 
 
 import java.io.IOException;
 import java.io.IOException;
+import java.net.BindException;
+import java.util.Random;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 
 
@@ -42,24 +44,42 @@ public class TestRefreshCallQueue {
   private FileSystem fs;
   private FileSystem fs;
   static int mockQueueConstructions;
   static int mockQueueConstructions;
   static int mockQueuePuts;
   static int mockQueuePuts;
-  private static final int NNPort = 54222;
-  private static String CALLQUEUE_CONFIG_KEY = "ipc." + NNPort + ".callqueue.impl";
+  private String callQueueConfigKey = "";
+  private final Random rand = new Random();
 
 
   @Before
   @Before
   public void setUp() throws Exception {
   public void setUp() throws Exception {
     // We want to count additional events, so we reset here
     // We want to count additional events, so we reset here
     mockQueueConstructions = 0;
     mockQueueConstructions = 0;
     mockQueuePuts = 0;
     mockQueuePuts = 0;
-
-    config = new Configuration();
-    config.setClass(CALLQUEUE_CONFIG_KEY,
-        MockCallQueue.class, BlockingQueue.class);
-    config.set("hadoop.security.authorization", "true");
-
-    FileSystem.setDefaultUri(config, "hdfs://localhost:" + NNPort);
-    fs = FileSystem.get(config);
-    cluster = new MiniDFSCluster.Builder(config).nameNodePort(NNPort).build();
-    cluster.waitActive();
+    int portRetries = 5;
+    int nnPort;
+
+    for (; portRetries > 0; --portRetries) {
+      // Pick a random port in the range [30000,60000).
+      nnPort = 30000 + rand.nextInt(30000);  
+      config = new Configuration();
+      callQueueConfigKey = "ipc." + nnPort + ".callqueue.impl";
+      config.setClass(callQueueConfigKey,
+          MockCallQueue.class, BlockingQueue.class);
+      config.set("hadoop.security.authorization", "true");
+
+      FileSystem.setDefaultUri(config, "hdfs://localhost:" + nnPort);
+      fs = FileSystem.get(config);
+      
+      try {
+        cluster = new MiniDFSCluster.Builder(config).nameNodePort(nnPort).build();
+        cluster.waitActive();
+        break;
+      } catch (BindException be) {
+        // Retry with a different port number.
+      }
+    }
+    
+    if (portRetries == 0) {
+      // Bail if we get very unlucky with our choice of ports.
+      fail("Failed to pick an ephemeral port for the NameNode RPC server.");
+    }
   }
   }
 
 
   @After
   @After

+ 28 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -2353,8 +2353,8 @@ public class MiniDFSCluster {
    * @return data file corresponding to the block
    * @return data file corresponding to the block
    */
    */
   public static File getBlockFile(File storageDir, ExtendedBlock blk) {
   public static File getBlockFile(File storageDir, ExtendedBlock blk) {
-    return new File(getFinalizedDir(storageDir, blk.getBlockPoolId()), 
-        blk.getBlockName());
+    return new File(DatanodeUtil.idToBlockDir(getFinalizedDir(storageDir,
+        blk.getBlockPoolId()), blk.getBlockId()), blk.getBlockName());
   }
   }
 
 
   /**
   /**
@@ -2364,10 +2364,32 @@ public class MiniDFSCluster {
    * @return metadata file corresponding to the block
    * @return metadata file corresponding to the block
    */
    */
   public static File getBlockMetadataFile(File storageDir, ExtendedBlock blk) {
   public static File getBlockMetadataFile(File storageDir, ExtendedBlock blk) {
-    return new File(getFinalizedDir(storageDir, blk.getBlockPoolId()), 
-        blk.getBlockName() + "_" + blk.getGenerationStamp() +
-        Block.METADATA_EXTENSION);
-    
+    return new File(DatanodeUtil.idToBlockDir(getFinalizedDir(storageDir,
+        blk.getBlockPoolId()), blk.getBlockId()), blk.getBlockName() + "_" +
+        blk.getGenerationStamp() + Block.METADATA_EXTENSION);
+  }
+
+  /**
+   * Return all block metadata files in given directory (recursive search)
+   */
+  public static List<File> getAllBlockMetadataFiles(File storageDir) {
+    List<File> results = new ArrayList<File>();
+    File[] files = storageDir.listFiles();
+    if (files == null) {
+      return null;
+    }
+    for (File f : files) {
+      if (f.getName().startsWith("blk_") && f.getName().endsWith(
+          Block.METADATA_EXTENSION)) {
+        results.add(f);
+      } else if (f.isDirectory()) {
+        List<File> subdirResults = getAllBlockMetadataFiles(f);
+        if (subdirResults != null) {
+          results.addAll(subdirResults);
+        }
+      }
+    }
+    return results;
   }
   }
 
 
   /**
   /**

+ 11 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientFailover.java

@@ -52,6 +52,7 @@ import org.apache.hadoop.io.retry.DefaultFailoverProxyProvider;
 import org.apache.hadoop.io.retry.FailoverProxyProvider;
 import org.apache.hadoop.io.retry.FailoverProxyProvider;
 import org.apache.hadoop.net.ConnectTimeoutException;
 import org.apache.hadoop.net.ConnectTimeoutException;
 import org.apache.hadoop.net.StandardSocketFactory;
 import org.apache.hadoop.net.StandardSocketFactory;
+import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -89,6 +90,11 @@ public class TestDFSClientFailover {
     cluster.shutdown();
     cluster.shutdown();
   }
   }
 
 
+  @After
+  public void clearConfig() {
+    SecurityUtil.setTokenServiceUseIp(true);
+  }
+
   /**
   /**
    * Make sure that client failover works when an active NN dies and the standby
    * Make sure that client failover works when an active NN dies and the standby
    * takes over.
    * takes over.
@@ -323,6 +329,7 @@ public class TestDFSClientFailover {
   /**
   /**
    * Test to verify legacy proxy providers are correctly wrapped.
    * Test to verify legacy proxy providers are correctly wrapped.
    */
    */
+  @Test
   public void testWrappedFailoverProxyProvider() throws Exception {
   public void testWrappedFailoverProxyProvider() throws Exception {
     // setup the config with the dummy provider class
     // setup the config with the dummy provider class
     Configuration config = new HdfsConfiguration(conf);
     Configuration config = new HdfsConfiguration(conf);
@@ -332,6 +339,9 @@ public class TestDFSClientFailover {
         DummyLegacyFailoverProxyProvider.class.getName());
         DummyLegacyFailoverProxyProvider.class.getName());
     Path p = new Path("hdfs://" + logicalName + "/");
     Path p = new Path("hdfs://" + logicalName + "/");
 
 
+    // not to use IP address for token service
+    SecurityUtil.setTokenServiceUseIp(false);
+
     // Logical URI should be used.
     // Logical URI should be used.
     assertTrue("Legacy proxy providers should use logical URI.",
     assertTrue("Legacy proxy providers should use logical URI.",
         HAUtil.useLogicalUri(config, p.toUri()));
         HAUtil.useLogicalUri(config, p.toUri()));
@@ -340,6 +350,7 @@ public class TestDFSClientFailover {
   /**
   /**
    * Test to verify IPFailoverProxyProvider is not requiring logical URI.
    * Test to verify IPFailoverProxyProvider is not requiring logical URI.
    */
    */
+  @Test
   public void testIPFailoverProxyProviderLogicalUri() throws Exception {
   public void testIPFailoverProxyProviderLogicalUri() throws Exception {
     // setup the config with the IP failover proxy provider class
     // setup the config with the IP failover proxy provider class
     Configuration config = new HdfsConfiguration(conf);
     Configuration config = new HdfsConfiguration(conf);

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSFinalize.java

@@ -79,8 +79,8 @@ public class TestDFSFinalize {
     File dnCurDirs[] = new File[dataNodeDirs.length];
     File dnCurDirs[] = new File[dataNodeDirs.length];
     for (int i = 0; i < dataNodeDirs.length; i++) {
     for (int i = 0; i < dataNodeDirs.length; i++) {
       dnCurDirs[i] = new File(dataNodeDirs[i],"current");
       dnCurDirs[i] = new File(dataNodeDirs[i],"current");
-      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, dnCurDirs[i]),
-                   UpgradeUtilities.checksumMasterDataNodeContents());
+      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, dnCurDirs[i],
+              false), UpgradeUtilities.checksumMasterDataNodeContents());
     }
     }
     for (int i = 0; i < nameNodeDirs.length; i++) {
     for (int i = 0; i < nameNodeDirs.length; i++) {
       assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
       assertFalse(new File(nameNodeDirs[i],"previous").isDirectory());
@@ -96,8 +96,9 @@ public class TestDFSFinalize {
         assertFalse(new File(bpRoot,"previous").isDirectory());
         assertFalse(new File(bpRoot,"previous").isDirectory());
         
         
         File bpCurFinalizeDir = new File(bpRoot,"current/"+DataStorage.STORAGE_DIR_FINALIZED);
         File bpCurFinalizeDir = new File(bpRoot,"current/"+DataStorage.STORAGE_DIR_FINALIZED);
-        assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurFinalizeDir),
-                     UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
+        assertEquals(UpgradeUtilities.checksumContents(DATA_NODE,
+                bpCurFinalizeDir, true),
+                UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
       }
       }
     }
     }
   }
   }

+ 77 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSPermission.java

@@ -20,8 +20,11 @@ package org.apache.hadoop.hdfs;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Map;
 import java.util.Random;
 import java.util.Random;
@@ -36,6 +39,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Time;
@@ -421,6 +425,79 @@ public class TestDFSPermission {
     }
     }
   }
   }
 
 
+  @Test
+  public void testAccessOwner() throws IOException, InterruptedException {
+    FileSystem rootFs = FileSystem.get(conf);
+    Path p1 = new Path("/p1");
+    rootFs.mkdirs(p1);
+    rootFs.setOwner(p1, USER1_NAME, GROUP1_NAME);
+    fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
+      @Override
+      public FileSystem run() throws Exception {
+        return FileSystem.get(conf);
+      }
+    });
+    fs.setPermission(p1, new FsPermission((short) 0444));
+    fs.access(p1, FsAction.READ);
+    try {
+      fs.access(p1, FsAction.WRITE);
+      fail("The access call should have failed.");
+    } catch (AccessControlException e) {
+      // expected
+    }
+
+    Path badPath = new Path("/bad/bad");
+    try {
+      fs.access(badPath, FsAction.READ);
+      fail("The access call should have failed");
+    } catch (FileNotFoundException e) {
+      // expected
+    }
+  }
+
+  @Test
+  public void testAccessGroupMember() throws IOException, InterruptedException {
+    FileSystem rootFs = FileSystem.get(conf);
+    Path p2 = new Path("/p2");
+    rootFs.mkdirs(p2);
+    rootFs.setOwner(p2, UserGroupInformation.getCurrentUser().getShortUserName(), GROUP1_NAME);
+    rootFs.setPermission(p2, new FsPermission((short) 0740));
+    fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
+      @Override
+      public FileSystem run() throws Exception {
+        return FileSystem.get(conf);
+      }
+    });
+    fs.access(p2, FsAction.READ);
+    try {
+      fs.access(p2, FsAction.EXECUTE);
+      fail("The access call should have failed.");
+    } catch (AccessControlException e) {
+      // expected
+    }
+  }
+
+  @Test
+  public void testAccessOthers() throws IOException, InterruptedException {
+    FileSystem rootFs = FileSystem.get(conf);
+    Path p3 = new Path("/p3");
+    rootFs.mkdirs(p3);
+    rootFs.setPermission(p3, new FsPermission((short) 0774));
+    fs = USER1.doAs(new PrivilegedExceptionAction<FileSystem>() {
+      @Override
+      public FileSystem run() throws Exception {
+        return FileSystem.get(conf);
+      }
+    });
+    fs.access(p3, FsAction.READ);
+    try {
+      fs.access(p3, FsAction.READ_WRITE);
+      fail("The access call should have failed.");
+    } catch (AccessControlException e) {
+      // expected
+    }
+  }
+
   /* Check if namenode performs permission checking correctly 
   /* Check if namenode performs permission checking correctly 
    * for the given user for operations mkdir, open, setReplication, 
    * for the given user for operations mkdir, open, setReplication, 
    * getFileInfo, isDirectory, exists, getContentLength, list, rename,
    * getFileInfo, isDirectory, exists, getContentLength, list, rename,

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java

@@ -81,7 +81,7 @@ public class TestDFSRollback {
         break;
         break;
       case DATA_NODE:
       case DATA_NODE:
         assertEquals(
         assertEquals(
-            UpgradeUtilities.checksumContents(nodeType, curDir),
+            UpgradeUtilities.checksumContents(nodeType, curDir, false),
             UpgradeUtilities.checksumMasterDataNodeContents());
             UpgradeUtilities.checksumMasterDataNodeContents());
         break;
         break;
       }
       }

+ 9 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java

@@ -239,7 +239,7 @@ public class TestDFSStorageStateRecovery {
         assertTrue(new File(baseDirs[i],"previous").isDirectory());
         assertTrue(new File(baseDirs[i],"previous").isDirectory());
         assertEquals(
         assertEquals(
                      UpgradeUtilities.checksumContents(
                      UpgradeUtilities.checksumContents(
-                                                       NAME_NODE, new File(baseDirs[i],"previous")),
+                     NAME_NODE, new File(baseDirs[i],"previous"), false),
                      UpgradeUtilities.checksumMasterNameNodeContents());
                      UpgradeUtilities.checksumMasterNameNodeContents());
       }
       }
     }
     }
@@ -259,7 +259,8 @@ public class TestDFSStorageStateRecovery {
     if (currentShouldExist) {
     if (currentShouldExist) {
       for (int i = 0; i < baseDirs.length; i++) {
       for (int i = 0; i < baseDirs.length; i++) {
         assertEquals(
         assertEquals(
-                     UpgradeUtilities.checksumContents(DATA_NODE, new File(baseDirs[i],"current")),
+                     UpgradeUtilities.checksumContents(DATA_NODE,
+                     new File(baseDirs[i],"current"), false),
                      UpgradeUtilities.checksumMasterDataNodeContents());
                      UpgradeUtilities.checksumMasterDataNodeContents());
       }
       }
     }
     }
@@ -267,7 +268,8 @@ public class TestDFSStorageStateRecovery {
       for (int i = 0; i < baseDirs.length; i++) {
       for (int i = 0; i < baseDirs.length; i++) {
         assertTrue(new File(baseDirs[i],"previous").isDirectory());
         assertTrue(new File(baseDirs[i],"previous").isDirectory());
         assertEquals(
         assertEquals(
-                     UpgradeUtilities.checksumContents(DATA_NODE, new File(baseDirs[i],"previous")),
+                     UpgradeUtilities.checksumContents(DATA_NODE,
+                     new File(baseDirs[i],"previous"), false),
                      UpgradeUtilities.checksumMasterDataNodeContents());
                      UpgradeUtilities.checksumMasterDataNodeContents());
       }
       }
     }
     }
@@ -290,8 +292,8 @@ public class TestDFSStorageStateRecovery {
     if (currentShouldExist) {
     if (currentShouldExist) {
       for (int i = 0; i < baseDirs.length; i++) {
       for (int i = 0; i < baseDirs.length; i++) {
         File bpCurDir = new File(baseDirs[i], Storage.STORAGE_DIR_CURRENT);
         File bpCurDir = new File(baseDirs[i], Storage.STORAGE_DIR_CURRENT);
-        assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurDir),
-                     UpgradeUtilities.checksumMasterBlockPoolContents());
+        assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, bpCurDir,
+                false), UpgradeUtilities.checksumMasterBlockPoolContents());
       }
       }
     }
     }
     if (previousShouldExist) {
     if (previousShouldExist) {
@@ -299,8 +301,8 @@ public class TestDFSStorageStateRecovery {
         File bpPrevDir = new File(baseDirs[i], Storage.STORAGE_DIR_PREVIOUS);
         File bpPrevDir = new File(baseDirs[i], Storage.STORAGE_DIR_PREVIOUS);
         assertTrue(bpPrevDir.isDirectory());
         assertTrue(bpPrevDir.isDirectory());
         assertEquals(
         assertEquals(
-                     UpgradeUtilities.checksumContents(DATA_NODE, bpPrevDir),
-                     UpgradeUtilities.checksumMasterBlockPoolContents());
+                     UpgradeUtilities.checksumContents(DATA_NODE, bpPrevDir,
+                     false), UpgradeUtilities.checksumMasterBlockPoolContents());
       }
       }
     }
     }
   }
   }

+ 7 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java

@@ -100,7 +100,7 @@ public class TestDFSUpgrade {
       
       
       File previous = new File(baseDir, "previous");
       File previous = new File(baseDir, "previous");
       assertExists(previous);
       assertExists(previous);
-      assertEquals(UpgradeUtilities.checksumContents(NAME_NODE, previous),
+      assertEquals(UpgradeUtilities.checksumContents(NAME_NODE, previous, false),
           UpgradeUtilities.checksumMasterNameNodeContents());
           UpgradeUtilities.checksumMasterNameNodeContents());
     }
     }
   }
   }
@@ -114,23 +114,25 @@ public class TestDFSUpgrade {
   void checkDataNode(String[] baseDirs, String bpid) throws IOException {
   void checkDataNode(String[] baseDirs, String bpid) throws IOException {
     for (int i = 0; i < baseDirs.length; i++) {
     for (int i = 0; i < baseDirs.length; i++) {
       File current = new File(baseDirs[i], "current/" + bpid + "/current");
       File current = new File(baseDirs[i], "current/" + bpid + "/current");
-      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, current),
+      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, current, false),
         UpgradeUtilities.checksumMasterDataNodeContents());
         UpgradeUtilities.checksumMasterDataNodeContents());
       
       
       // block files are placed under <sd>/current/<bpid>/current/finalized
       // block files are placed under <sd>/current/<bpid>/current/finalized
       File currentFinalized = 
       File currentFinalized = 
         MiniDFSCluster.getFinalizedDir(new File(baseDirs[i]), bpid);
         MiniDFSCluster.getFinalizedDir(new File(baseDirs[i]), bpid);
-      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, currentFinalized),
+      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE,
+          currentFinalized, true),
           UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
           UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
       
       
       File previous = new File(baseDirs[i], "current/" + bpid + "/previous");
       File previous = new File(baseDirs[i], "current/" + bpid + "/previous");
       assertTrue(previous.isDirectory());
       assertTrue(previous.isDirectory());
-      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, previous),
+      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, previous, false),
           UpgradeUtilities.checksumMasterDataNodeContents());
           UpgradeUtilities.checksumMasterDataNodeContents());
       
       
       File previousFinalized = 
       File previousFinalized = 
         new File(baseDirs[i], "current/" + bpid + "/previous"+"/finalized");
         new File(baseDirs[i], "current/" + bpid + "/previous"+"/finalized");
-      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE, previousFinalized),
+      assertEquals(UpgradeUtilities.checksumContents(DATA_NODE,
+          previousFinalized, true),
           UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
           UpgradeUtilities.checksumMasterBlockPoolFinalizedContents());
       
       
     }
     }

+ 11 - 10
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java

@@ -24,6 +24,7 @@ import static org.junit.Assert.fail;
 
 
 import java.io.BufferedReader;
 import java.io.BufferedReader;
 import java.io.File;
 import java.io.File;
+import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.FileOutputStream;
 import java.io.FileReader;
 import java.io.FileReader;
 import java.io.IOException;
 import java.io.IOException;
@@ -80,7 +81,7 @@ public class TestDFSUpgradeFromImage {
     long checksum;
     long checksum;
   }
   }
   
   
-  private static final Configuration upgradeConf;
+  static final Configuration upgradeConf;
   
   
   static {
   static {
     upgradeConf = new HdfsConfiguration();
     upgradeConf = new HdfsConfiguration();
@@ -95,7 +96,7 @@ public class TestDFSUpgradeFromImage {
   
   
   boolean printChecksum = false;
   boolean printChecksum = false;
   
   
-  private void unpackStorage(String tarFileName)
+  void unpackStorage(String tarFileName, String referenceName)
       throws IOException {
       throws IOException {
     String tarFile = System.getProperty("test.cache.data", "build/test/cache")
     String tarFile = System.getProperty("test.cache.data", "build/test/cache")
         + "/" + tarFileName;
         + "/" + tarFileName;
@@ -110,7 +111,7 @@ public class TestDFSUpgradeFromImage {
     
     
     BufferedReader reader = new BufferedReader(new FileReader(
     BufferedReader reader = new BufferedReader(new FileReader(
         System.getProperty("test.cache.data", "build/test/cache")
         System.getProperty("test.cache.data", "build/test/cache")
-            + "/" + HADOOP_DFS_DIR_TXT));
+            + "/" + referenceName));
     String line;
     String line;
     while ( (line = reader.readLine()) != null ) {
     while ( (line = reader.readLine()) != null ) {
       
       
@@ -285,7 +286,7 @@ public class TestDFSUpgradeFromImage {
    */
    */
   @Test
   @Test
   public void testUpgradeFromRel22Image() throws IOException {
   public void testUpgradeFromRel22Image() throws IOException {
-    unpackStorage(HADOOP22_IMAGE);
+    unpackStorage(HADOOP22_IMAGE, HADOOP_DFS_DIR_TXT);
     upgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf).
     upgradeAndVerify(new MiniDFSCluster.Builder(upgradeConf).
         numDataNodes(4));
         numDataNodes(4));
   }
   }
@@ -296,7 +297,7 @@ public class TestDFSUpgradeFromImage {
    */
    */
   @Test
   @Test
   public void testUpgradeFromCorruptRel22Image() throws IOException {
   public void testUpgradeFromCorruptRel22Image() throws IOException {
-    unpackStorage(HADOOP22_IMAGE);
+    unpackStorage(HADOOP22_IMAGE, HADOOP_DFS_DIR_TXT);
     
     
     // Overwrite the md5 stored in the VERSION files
     // Overwrite the md5 stored in the VERSION files
     File baseDir = new File(MiniDFSCluster.getBaseDirectory());
     File baseDir = new File(MiniDFSCluster.getBaseDirectory());
@@ -333,7 +334,7 @@ public class TestDFSUpgradeFromImage {
    */
    */
   @Test
   @Test
   public void testUpgradeFromRel1ReservedImage() throws Exception {
   public void testUpgradeFromRel1ReservedImage() throws Exception {
-    unpackStorage(HADOOP1_RESERVED_IMAGE);
+    unpackStorage(HADOOP1_RESERVED_IMAGE, HADOOP_DFS_DIR_TXT);
     MiniDFSCluster cluster = null;
     MiniDFSCluster cluster = null;
     // Try it once without setting the upgrade flag to ensure it fails
     // Try it once without setting the upgrade flag to ensure it fails
     final Configuration conf = new Configuration();
     final Configuration conf = new Configuration();
@@ -403,7 +404,7 @@ public class TestDFSUpgradeFromImage {
    */
    */
   @Test
   @Test
   public void testUpgradeFromRel023ReservedImage() throws Exception {
   public void testUpgradeFromRel023ReservedImage() throws Exception {
-    unpackStorage(HADOOP023_RESERVED_IMAGE);
+    unpackStorage(HADOOP023_RESERVED_IMAGE, HADOOP_DFS_DIR_TXT);
     MiniDFSCluster cluster = null;
     MiniDFSCluster cluster = null;
     // Try it once without setting the upgrade flag to ensure it fails
     // Try it once without setting the upgrade flag to ensure it fails
     final Configuration conf = new Configuration();
     final Configuration conf = new Configuration();
@@ -468,7 +469,7 @@ public class TestDFSUpgradeFromImage {
    */
    */
   @Test
   @Test
   public void testUpgradeFromRel2ReservedImage() throws Exception {
   public void testUpgradeFromRel2ReservedImage() throws Exception {
-    unpackStorage(HADOOP2_RESERVED_IMAGE);
+    unpackStorage(HADOOP2_RESERVED_IMAGE, HADOOP_DFS_DIR_TXT);
     MiniDFSCluster cluster = null;
     MiniDFSCluster cluster = null;
     // Try it once without setting the upgrade flag to ensure it fails
     // Try it once without setting the upgrade flag to ensure it fails
     final Configuration conf = new Configuration();
     final Configuration conf = new Configuration();
@@ -572,7 +573,7 @@ public class TestDFSUpgradeFromImage {
     } while (dirList.hasMore());
     } while (dirList.hasMore());
   }
   }
   
   
-  private void upgradeAndVerify(MiniDFSCluster.Builder bld)
+  void upgradeAndVerify(MiniDFSCluster.Builder bld)
       throws IOException {
       throws IOException {
     MiniDFSCluster cluster = null;
     MiniDFSCluster cluster = null;
     try {
     try {
@@ -601,7 +602,7 @@ public class TestDFSUpgradeFromImage {
    */
    */
   @Test
   @Test
   public void testUpgradeFromRel1BBWImage() throws IOException {
   public void testUpgradeFromRel1BBWImage() throws IOException {
-    unpackStorage(HADOOP1_BBW_IMAGE);
+    unpackStorage(HADOOP1_BBW_IMAGE, HADOOP_DFS_DIR_TXT);
     Configuration conf = new Configuration(upgradeConf);
     Configuration conf = new Configuration(upgradeConf);
     conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, 
     conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, 
         System.getProperty("test.build.data") + File.separator + 
         System.getProperty("test.build.data") + File.separator + 

+ 5 - 10
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java

@@ -445,19 +445,14 @@ public class TestDatanodeBlockScanner {
   
   
   @Test
   @Test
   public void testReplicaInfoParsing() throws Exception {
   public void testReplicaInfoParsing() throws Exception {
-    testReplicaInfoParsingSingle(BASE_PATH, new int[0]);
-    testReplicaInfoParsingSingle(BASE_PATH + "/subdir1", new int[]{1});
-    testReplicaInfoParsingSingle(BASE_PATH + "/subdir43", new int[]{43});
-    testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir2/subdir3", new int[]{1, 2, 3});
-    testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir2/subdir43", new int[]{1, 2, 43});
-    testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir23/subdir3", new int[]{1, 23, 3});
-    testReplicaInfoParsingSingle(BASE_PATH + "/subdir13/subdir2/subdir3", new int[]{13, 2, 3});
+    testReplicaInfoParsingSingle(BASE_PATH);
+    testReplicaInfoParsingSingle(BASE_PATH + "/subdir1");
+    testReplicaInfoParsingSingle(BASE_PATH + "/subdir1/subdir2/subdir3");
   }
   }
   
   
-  private static void testReplicaInfoParsingSingle(String subDirPath, int[] expectedSubDirs) {
+  private static void testReplicaInfoParsingSingle(String subDirPath) {
     File testFile = new File(subDirPath);
     File testFile = new File(subDirPath);
-    assertArrayEquals(expectedSubDirs, ReplicaInfo.parseSubDirs(testFile).subDirs);
-    assertEquals(BASE_PATH, ReplicaInfo.parseSubDirs(testFile).baseDirPath);
+    assertEquals(BASE_PATH, ReplicaInfo.parseBaseDir(testFile).baseDirPath);
   }
   }
 
 
   @Test
   @Test

+ 48 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeLayoutUpgrade.java

@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+
+public class TestDatanodeLayoutUpgrade {
+  private static final String HADOOP_DATANODE_DIR_TXT =
+      "hadoop-datanode-dir.txt";
+  private static final String HADOOP24_DATANODE = "hadoop-24-datanode-dir.tgz";
+
+  @Test
+  // Upgrade from LDir-based layout to block ID-based layout -- change described
+  // in HDFS-6482
+  public void testUpgradeToIdBasedLayout() throws IOException {
+    TestDFSUpgradeFromImage upgrade = new TestDFSUpgradeFromImage();
+    upgrade.unpackStorage(HADOOP24_DATANODE, HADOOP_DATANODE_DIR_TXT);
+    Configuration conf = new Configuration(TestDFSUpgradeFromImage.upgradeConf);
+    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
+        System.getProperty("test.build.data") + File.separator +
+            "dfs" + File.separator + "data");
+    conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+        System.getProperty("test.build.data") + File.separator +
+            "dfs" + File.separator + "name");
+    upgrade.upgradeAndVerify(new MiniDFSCluster.Builder(conf).numDataNodes(1)
+    .manageDataDfsDirs(false).manageNameDfsDirs(false));
+  }
+}

部分文件因为文件数量过多而无法显示