Przeglądaj źródła

Merge r1609845 through r1614231 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-6584@1614234 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 10 lat temu
rodzic
commit
10d6210322
100 zmienionych plików z 2125 dodań i 425 usunięć
  1. 47 21
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 24 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java
  3. 3 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
  4. 29 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
  5. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  6. 15 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
  7. 34 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
  8. 34 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java
  9. 80 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java
  10. 4 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java
  11. 4 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DirectBufferPool.java
  12. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java
  13. 6 3
      hadoop-common-project/hadoop-common/src/site/apt/Compatibility.apt.vm
  14. 20 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java
  15. 24 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java
  16. 8 4
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathData.java
  17. 30 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
  18. 30 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
  19. 2 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDirectBufferPool.java
  20. 6 1
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java
  21. 15 4
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
  22. 4 1
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java
  23. 61 19
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  24. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
  25. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
  26. 16 10
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  27. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
  28. 11 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
  29. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java
  30. 18 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
  31. 16 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
  32. 1 13
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
  33. 49 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
  34. 9 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
  35. 6 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
  36. 42 12
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
  37. 27 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
  38. 8 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
  39. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
  40. 4 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
  41. 5 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
  42. 11 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
  43. 80 18
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  44. 28 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
  45. 14 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
  46. 15 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java
  47. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
  48. 42 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorageReport.java
  49. 13 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java
  50. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java
  51. 9 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java
  52. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java
  53. 15 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto
  54. 0 24
      hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto
  55. 24 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
  56. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
  57. 1 12
      hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html
  58. 0 11
      hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html
  59. 21 39
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm
  60. 69 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
  61. 137 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
  62. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java
  63. 66 24
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java
  64. 13 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
  65. 330 21
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
  66. 14 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
  67. 9 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java
  68. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
  69. 52 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java
  70. 98 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
  71. 38 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java
  72. 69 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java
  73. 0 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
  74. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml
  75. BIN
      hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-0.23-reserved.tgz
  76. BIN
      hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-1-reserved.tgz
  77. 3 0
      hadoop-mapreduce-project/CHANGES.txt
  78. 12 6
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java
  79. 15 7
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java
  80. 6 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
  81. 4 0
      hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/RumenToSLSConverter.java
  82. 4 0
      hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
  83. 37 36
      hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java
  84. 6 3
      hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java
  85. 5 0
      hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/conf/SLSConfiguration.java
  86. 43 29
      hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NMSimulator.java
  87. 7 1
      hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
  88. 5 0
      hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/CapacitySchedulerMetrics.java
  89. 4 0
      hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ContainerSimulator.java
  90. 4 0
      hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java
  91. 4 0
      hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FifoSchedulerMetrics.java
  92. 4 0
      hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/NodeUpdateSchedulerEventWrapper.java
  93. 4 0
      hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
  94. 30 13
      hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java
  95. 16 10
      hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java
  96. 4 0
      hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SchedulerMetrics.java
  97. 4 0
      hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SchedulerWrapper.java
  98. 12 10
      hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/TaskRunner.java
  99. 4 0
      hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java
  100. 4 0
      hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java

+ 47 - 21
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -189,6 +189,9 @@ Trunk (Unreleased)
     HADOOP-10720. KMS: Implement generateEncryptedKey and decryptEncryptedKey
     HADOOP-10720. KMS: Implement generateEncryptedKey and decryptEncryptedKey
     in the REST API. (asuresh via tucu)
     in the REST API. (asuresh via tucu)
 
 
+    HADOOP-10891. Add EncryptedKeyVersion factory method to
+    KeyProviderCryptoExtension. (wang)
+
   BUG FIXES
   BUG FIXES
 
 
     HADOOP-9451. Fault single-layer config if node group topology is enabled.
     HADOOP-9451. Fault single-layer config if node group topology is enabled.
@@ -452,6 +455,11 @@ Release 2.6.0 - UNRELEASED
 
 
     HADOOP-10855. Allow Text to be read with a known Length. (todd)
     HADOOP-10855. Allow Text to be read with a known Length. (todd)
 
 
+    HADOOP-10887. Add XAttrs to ViewFs and make XAttrs + ViewFileSystem
+    internal dir behavior consistent. (Stephen Chu via wang)
+
+    HADOOP-10882. Move DirectBufferPool into common util. (todd)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -480,6 +488,12 @@ Release 2.6.0 - UNRELEASED
     command when the format of the stat command uses non-curly quotes (yzhang
     command when the format of the stat command uses non-curly quotes (yzhang
     via cmccabe)
     via cmccabe)
 
 
+    HADOOP-10830. Missing lock in JavaKeyStoreProvider.createCredentialEntry.
+    (Benoy Antony via umamahesh)
+
+    HADOOP-10876. The constructor of Path should not take an empty URL as a
+    parameter. (Zhihai Xu via wang)
+
 Release 2.5.0 - UNRELEASED
 Release 2.5.0 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -492,6 +506,9 @@ Release 2.5.0 - UNRELEASED
 
 
     HADOOP-8943. Support multiple group mapping providers. (Kai Zheng via brandonli)
     HADOOP-8943. Support multiple group mapping providers. (Kai Zheng via brandonli)
 
 
+    HADOOP-9361 Strictly define the expected behavior of filesystem APIs and
+    write tests to verify compliance (stevel)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HADOOP-10451. Remove unused field and imports from SaslRpcServer.
     HADOOP-10451. Remove unused field and imports from SaslRpcServer.
@@ -586,9 +603,6 @@ Release 2.5.0 - UNRELEASED
     HADOOP-10747. Support configurable retries on SASL connection failures in
     HADOOP-10747. Support configurable retries on SASL connection failures in
     RPC client. (cnauroth)
     RPC client. (cnauroth)
 
 
-    HADOOP-10674. Improve PureJavaCrc32 performance and use java.util.zip.CRC32
-    for Java 7 and above. (szetszwo)
-
     HADOOP-10754. Reenable several HA ZooKeeper-related tests on Windows.
     HADOOP-10754. Reenable several HA ZooKeeper-related tests on Windows.
     (cnauroth)
     (cnauroth)
 
 
@@ -600,9 +614,6 @@ Release 2.5.0 - UNRELEASED
 
 
     HADOOP-10767. Clean up unused code in Ls shell command. (cnauroth)
     HADOOP-10767. Clean up unused code in Ls shell command. (cnauroth)
 
 
-    HADOOP-9361 Strictly define the expected behavior of filesystem APIs and
-    write tests to verify compliance (stevel)
-
     HADOOP-9651 Filesystems to throw FileAlreadyExistsException in
     HADOOP-9651 Filesystems to throw FileAlreadyExistsException in
     createFile(path, overwrite=false) when the file exists (stevel)
     createFile(path, overwrite=false) when the file exists (stevel)
     
     
@@ -613,8 +624,14 @@ Release 2.5.0 - UNRELEASED
 
 
     HADOOP-10782. Fix typo in DataChecksum class. (Jingguo Yao via suresh)
     HADOOP-10782. Fix typo in DataChecksum class. (Jingguo Yao via suresh)
 
 
+    HADOOP-10896. Update compatibility doc to capture visibility of 
+    un-annotated classes/ methods. (kasha)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
+    HADOOP-10674. Improve PureJavaCrc32 performance and use java.util.zip.CRC32
+    for Java 7 and above. (szetszwo)
+
   BUG FIXES 
   BUG FIXES 
 
 
     HADOOP-10378. Typo in help printed by hdfs dfs -help.
     HADOOP-10378. Typo in help printed by hdfs dfs -help.
@@ -769,6 +786,30 @@ Release 2.5.0 - UNRELEASED
 
 
     HADOOP-10801 dead link in site.xml (Akira AJISAKA via stevel)
     HADOOP-10801 dead link in site.xml (Akira AJISAKA via stevel)
 
 
+    HADOOP-10590. ServiceAuthorizationManager is not threadsafe. (Benoy Antony via vinayakumarb)
+
+    HADOOP-10711. Cleanup some extra dependencies from hadoop-auth. (rkanter via tucu)
+
+    HADOOP-10479. Fix new findbugs warnings in hadoop-minikdc.
+    (Swarnim Kulkarni via wheat9)
+
+    HADOOP-10715. Remove public GraphiteSink#setWriter (Babak Behzad via raviprak)
+
+    HADOOP-10710. hadoop.auth cookie is not properly constructed according to 
+    RFC2109. (Juan Yu via tucu)
+
+    HADOOP-10864. Tool documentenation is broken. (Akira Ajisaka
+    via Arpit Agarwal)
+
+    HADOOP-10872. TestPathData fails intermittently with "Mkdirs failed
+    to create d1". (Yongjun Zhang via Arpit Agarwal)
+
+    HADOOP-10890. TestDFVariations.testMount fails intermittently. (Yongjun
+    Zhang via Arpit Agarwal)
+
+    HADOOP-10894. Fix dead link in ToolRunner documentation. (Akira Ajisaka
+    via Arpit Agarwal)
+
   BREAKDOWN OF HADOOP-10514 SUBTASKS AND RELATED JIRAS
   BREAKDOWN OF HADOOP-10514 SUBTASKS AND RELATED JIRAS
 
 
     HADOOP-10520. Extended attributes definition and FileSystem APIs for
     HADOOP-10520. Extended attributes definition and FileSystem APIs for
@@ -790,21 +831,6 @@ Release 2.5.0 - UNRELEASED
     HADOOP-10561. Copy command with preserve option should handle Xattrs.
     HADOOP-10561. Copy command with preserve option should handle Xattrs.
     (Yi Liu via cnauroth)
     (Yi Liu via cnauroth)
 
 
-    HADOOP-10590. ServiceAuthorizationManager is not threadsafe. (Benoy Antony via vinayakumarb)
-
-    HADOOP-10711. Cleanup some extra dependencies from hadoop-auth. (rkanter via tucu)
-
-    HADOOP-10479. Fix new findbugs warnings in hadoop-minikdc.
-    (Swarnim Kulkarni via wheat9)
-
-    HADOOP-10715. Remove public GraphiteSink#setWriter (Babak Behzad via raviprak)
-
-    HADOOP-10710. hadoop.auth cookie is not properly constructed according to 
-    RFC2109. (Juan Yu via tucu)
-
-    HADOOP-10864. Tool documentenation is broken. (Akira Ajisaka
-    via Arpit Agarwal)
-
 Release 2.4.1 - 2014-06-23 
 Release 2.4.1 - 2014-06-23 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 24 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java

@@ -79,6 +79,30 @@ public class KeyProviderCryptoExtension extends
       this.encryptedKeyVersion = encryptedKeyVersion;
       this.encryptedKeyVersion = encryptedKeyVersion;
     }
     }
 
 
+    /**
+     * Factory method to create a new EncryptedKeyVersion that can then be
+     * passed into {@link #decryptEncryptedKey}. Note that the fields of the
+     * returned EncryptedKeyVersion will only partially be populated; it is not
+     * necessarily suitable for operations besides decryption.
+     *
+     * @param encryptionKeyVersionName Version name of the encryption key used
+     *                                 to encrypt the encrypted key.
+     * @param encryptedKeyIv           Initialization vector of the encrypted
+     *                                 key. The IV of the encryption key used to
+     *                                 encrypt the encrypted key is derived from
+     *                                 this IV.
+     * @param encryptedKeyMaterial     Key material of the encrypted key.
+     * @return EncryptedKeyVersion suitable for decryption.
+     */
+    public static EncryptedKeyVersion createForDecryption(String
+        encryptionKeyVersionName, byte[] encryptedKeyIv,
+        byte[] encryptedKeyMaterial) {
+      KeyVersion encryptedKeyVersion = new KeyVersion(null, null,
+          encryptedKeyMaterial);
+      return new EncryptedKeyVersion(null, encryptionKeyVersionName,
+          encryptedKeyIv, encryptedKeyVersion);
+    }
+
     /**
     /**
      * @return Name of the encryption key used to encrypt the encrypted key.
      * @return Name of the encryption key used to encrypt the encrypted key.
      */
      */

+ 3 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java

@@ -437,7 +437,9 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
         throw new FileNotFoundException("Parent directory doesn't exist: "
         throw new FileNotFoundException("Parent directory doesn't exist: "
             + parent);
             + parent);
       } else if (!mkdirs(parent)) {
       } else if (!mkdirs(parent)) {
-        throw new IOException("Mkdirs failed to create " + parent);
+        throw new IOException("Mkdirs failed to create " + parent
+            + " (exists=" + exists(parent) + ", cwd=" + getWorkingDirectory()
+            + ")");
       }
       }
     }
     }
     final FSDataOutputStream out;
     final FSDataOutputStream out;

+ 29 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java

@@ -2484,4 +2484,33 @@ public final class FileContext {
       }
       }
     }.resolve(this, absF);
     }.resolve(this, absF);
   }
   }
+
+  /**
+   * Get all of the xattr names for a file or directory.
+   * Only those xattr names which the logged-in user has permissions to view
+   * are returned.
+   * <p/>
+   * A regular user can only get xattr names for the "user" namespace.
+   * The super user can only get xattr names for "user" and "trusted"
+   * namespaces.
+   * The xattrs of the "security" and "system" namespaces are only
+   * used/exposed internally by/to the FS impl.
+   * <p/>
+   * @see <a href="http://en.wikipedia.org/wiki/Extended_file_attributes">
+   * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
+   *
+   * @param path Path to get extended attributes
+   * @return List<String> of the XAttr names of the file or directory
+   * @throws IOException
+   */
+  public List<String> listXAttrs(Path path) throws IOException {
+    final Path absF = fixRelativePart(path);
+    return new FSLinkResolver<List<String>>() {
+      @Override
+      public List<String> next(final AbstractFileSystem fs, final Path p)
+          throws IOException {
+        return fs.listXAttrs(p);
+      }
+    }.resolve(this, absF);
+  }
 }
 }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -2509,7 +2509,7 @@ public abstract class FileSystem extends Configured implements Closeable {
    * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
    * http://en.wikipedia.org/wiki/Extended_file_attributes</a>
    *
    *
    * @param path Path to get extended attributes
    * @param path Path to get extended attributes
-   * @return Map<String, byte[]> describing the XAttrs of the file or directory
+   * @return List<String> of the XAttr names of the file or directory
    * @throws IOException
    * @throws IOException
    */
    */
   public List<String> listXAttrs(Path path) throws IOException {
   public List<String> listXAttrs(Path path) throws IOException {

+ 15 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java

@@ -128,7 +128,20 @@ public class Path implements Comparable {
            "Can not create a Path from an empty string");
            "Can not create a Path from an empty string");
     }   
     }   
   }
   }
-  
+
+  /** check URI parameter of Path constructor. */
+  private void checkPathArg(URI aUri) throws IllegalArgumentException {
+    // disallow construction of a Path from an empty URI
+    if (aUri == null) {
+      throw new IllegalArgumentException(
+          "Can not create a Path from a null URI");
+    }
+    if (aUri.toString().isEmpty()) {
+      throw new IllegalArgumentException(
+          "Can not create a Path from an empty URI");
+    }
+  }
+
   /** Construct a path from a String.  Path strings are URIs, but with
   /** Construct a path from a String.  Path strings are URIs, but with
    * unescaped elements and some additional normalization. */
    * unescaped elements and some additional normalization. */
   public Path(String pathString) throws IllegalArgumentException {
   public Path(String pathString) throws IllegalArgumentException {
@@ -176,6 +189,7 @@ public class Path implements Comparable {
    * Construct a path from a URI
    * Construct a path from a URI
    */
    */
   public Path(URI aUri) {
   public Path(URI aUri) {
+    checkPathArg(aUri);
     uri = aUri.normalize();
     uri = aUri.normalize();
   }
   }
   
   

+ 34 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java

@@ -22,6 +22,7 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 import java.util.EnumSet;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.List;
+import java.util.Map;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -37,6 +38,7 @@ import org.apache.hadoop.fs.FsStatus;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -313,6 +315,38 @@ class ChRootedFs extends AbstractFileSystem {
     return myFs.getAclStatus(fullPath(path));
     return myFs.getAclStatus(fullPath(path));
   }
   }
 
 
+  @Override
+  public void setXAttr(Path path, String name, byte[] value,
+                       EnumSet<XAttrSetFlag> flag) throws IOException {
+    myFs.setXAttr(fullPath(path), name, value, flag);
+  }
+
+  @Override
+  public byte[] getXAttr(Path path, String name) throws IOException {
+    return myFs.getXAttr(fullPath(path), name);
+  }
+
+  @Override
+  public Map<String, byte[]> getXAttrs(Path path) throws IOException {
+    return myFs.getXAttrs(fullPath(path));
+  }
+
+  @Override
+  public Map<String, byte[]> getXAttrs(Path path, List<String> names)
+      throws IOException {
+    return myFs.getXAttrs(fullPath(path), names);
+  }
+
+  @Override
+  public List<String> listXAttrs(Path path) throws IOException {
+    return myFs.listXAttrs(fullPath(path));
+  }
+
+  @Override
+  public void removeXAttr(Path path, String name) throws IOException {
+    myFs.removeXAttr(fullPath(path), name);
+  }
+
   @Override
   @Override
   public void setVerifyChecksum(final boolean verifyChecksum) 
   public void setVerifyChecksum(final boolean verifyChecksum) 
       throws IOException, UnresolvedLinkException {
       throws IOException, UnresolvedLinkException {

+ 34 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java

@@ -913,5 +913,39 @@ public class ViewFileSystem extends FileSystem {
           .addEntries(AclUtil.getMinimalAcl(PERMISSION_555))
           .addEntries(AclUtil.getMinimalAcl(PERMISSION_555))
           .stickyBit(false).build();
           .stickyBit(false).build();
     }
     }
+
+    @Override
+    public void setXAttr(Path path, String name, byte[] value,
+                         EnumSet<XAttrSetFlag> flag) throws IOException {
+      checkPathIsSlash(path);
+      throw readOnlyMountTable("setXAttr", path);
+    }
+
+    @Override
+    public byte[] getXAttr(Path path, String name) throws IOException {
+      throw new NotInMountpointException(path, "getXAttr");
+    }
+
+    @Override
+    public Map<String, byte[]> getXAttrs(Path path) throws IOException {
+      throw new NotInMountpointException(path, "getXAttrs");
+    }
+
+    @Override
+    public Map<String, byte[]> getXAttrs(Path path, List<String> names)
+        throws IOException {
+      throw new NotInMountpointException(path, "getXAttrs");
+    }
+
+    @Override
+    public List<String> listXAttrs(Path path) throws IOException {
+      throw new NotInMountpointException(path, "listXAttrs");
+    }
+
+    @Override
+    public void removeXAttr(Path path, String name) throws IOException {
+      checkPathIsSlash(path);
+      throw readOnlyMountTable("removeXAttr", path);
+    }
   }
   }
 }
 }

+ 80 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java

@@ -26,6 +26,7 @@ import java.net.URISyntaxException;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.EnumSet;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.List;
+import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Map.Entry;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -48,6 +49,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.local.LocalConfigKeys;
 import org.apache.hadoop.fs.local.LocalConfigKeys;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclUtil;
 import org.apache.hadoop.fs.permission.AclUtil;
@@ -651,6 +653,50 @@ public class ViewFs extends AbstractFileSystem {
         fsState.resolve(getUriPath(path), true);
         fsState.resolve(getUriPath(path), true);
     return res.targetFileSystem.getAclStatus(res.remainingPath);
     return res.targetFileSystem.getAclStatus(res.remainingPath);
   }
   }
+
+  @Override
+  public void setXAttr(Path path, String name, byte[] value,
+                       EnumSet<XAttrSetFlag> flag) throws IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res =
+        fsState.resolve(getUriPath(path), true);
+    res.targetFileSystem.setXAttr(res.remainingPath, name, value, flag);
+  }
+
+  @Override
+  public byte[] getXAttr(Path path, String name) throws IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res =
+        fsState.resolve(getUriPath(path), true);
+    return res.targetFileSystem.getXAttr(res.remainingPath, name);
+  }
+
+  @Override
+  public Map<String, byte[]> getXAttrs(Path path) throws IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res =
+        fsState.resolve(getUriPath(path), true);
+    return res.targetFileSystem.getXAttrs(res.remainingPath);
+  }
+
+  @Override
+  public Map<String, byte[]> getXAttrs(Path path, List<String> names)
+      throws IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res =
+        fsState.resolve(getUriPath(path), true);
+    return res.targetFileSystem.getXAttrs(res.remainingPath, names);
+  }
+
+  @Override
+  public List<String> listXAttrs(Path path) throws IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res =
+        fsState.resolve(getUriPath(path), true);
+    return res.targetFileSystem.listXAttrs(res.remainingPath);
+  }
+
+  @Override
+  public void removeXAttr(Path path, String name) throws IOException {
+    InodeTree.ResolveResult<AbstractFileSystem> res =
+        fsState.resolve(getUriPath(path), true);
+    res.targetFileSystem.removeXAttr(res.remainingPath, name);
+  }
   
   
   
   
   /*
   /*
@@ -921,5 +967,39 @@ public class ViewFs extends AbstractFileSystem {
           .addEntries(AclUtil.getMinimalAcl(PERMISSION_555))
           .addEntries(AclUtil.getMinimalAcl(PERMISSION_555))
           .stickyBit(false).build();
           .stickyBit(false).build();
     }
     }
+
+    @Override
+    public void setXAttr(Path path, String name, byte[] value,
+                         EnumSet<XAttrSetFlag> flag) throws IOException {
+      checkPathIsSlash(path);
+      throw readOnlyMountTable("setXAttr", path);
+    }
+
+    @Override
+    public byte[] getXAttr(Path path, String name) throws IOException {
+      throw new NotInMountpointException(path, "getXAttr");
+    }
+
+    @Override
+    public Map<String, byte[]> getXAttrs(Path path) throws IOException {
+      throw new NotInMountpointException(path, "getXAttrs");
+    }
+
+    @Override
+    public Map<String, byte[]> getXAttrs(Path path, List<String> names)
+        throws IOException {
+      throw new NotInMountpointException(path, "getXAttrs");
+    }
+
+    @Override
+    public List<String> listXAttrs(Path path) throws IOException {
+      throw new NotInMountpointException(path, "listXAttrs");
+    }
+
+    @Override
+    public void removeXAttr(Path path, String name) throws IOException {
+      checkPathIsSlash(path);
+      throw readOnlyMountTable("removeXAttr", path);
+    }
   }
   }
 }
 }

+ 4 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/JavaKeyStoreProvider.java

@@ -194,15 +194,18 @@ public class JavaKeyStoreProvider extends CredentialProvider {
   @Override
   @Override
   public CredentialEntry createCredentialEntry(String alias, char[] credential)
   public CredentialEntry createCredentialEntry(String alias, char[] credential)
       throws IOException {
       throws IOException {
+    writeLock.lock();
     try {
     try {
       if (keyStore.containsAlias(alias) || cache.containsKey(alias)) {
       if (keyStore.containsAlias(alias) || cache.containsKey(alias)) {
         throw new IOException("Credential " + alias + " already exists in " + this);
         throw new IOException("Credential " + alias + " already exists in " + this);
       }
       }
+      return innerSetCredential(alias, credential);
     } catch (KeyStoreException e) {
     } catch (KeyStoreException e) {
       throw new IOException("Problem looking up credential " + alias + " in " + this,
       throw new IOException("Problem looking up credential " + alias + " in " + this,
           e);
           e);
+    } finally {
+      writeLock.unlock();
     }
     }
-    return innerSetCredential(alias, credential);
   }
   }
 
 
   @Override
   @Override

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/DirectBufferPool.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/DirectBufferPool.java

@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License.
  * limitations under the License.
  */
  */
-package org.apache.hadoop.hdfs.util;
+package org.apache.hadoop.util;
 
 
 import java.lang.ref.WeakReference;
 import java.lang.ref.WeakReference;
 import java.nio.ByteBuffer;
 import java.nio.ByteBuffer;
@@ -27,6 +27,7 @@ import java.util.concurrent.ConcurrentMap;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.classification.InterfaceStability;
 
 
 /**
 /**
  * A simple class for pooling direct ByteBuffers. This is necessary
  * A simple class for pooling direct ByteBuffers. This is necessary
@@ -40,7 +41,8 @@ import com.google.common.annotations.VisibleForTesting;
  * allocated at the same size. There is no attempt to reuse larger
  * allocated at the same size. There is no attempt to reuse larger
  * buffers to satisfy smaller allocations.
  * buffers to satisfy smaller allocations.
  */
  */
-@InterfaceAudience.Private
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Evolving
 public class DirectBufferPool {
 public class DirectBufferPool {
 
 
   // Essentially implement a multimap with weak values.
   // Essentially implement a multimap with weak values.

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java

@@ -30,7 +30,7 @@ import org.apache.hadoop.conf.Configuration;
  * <p><code>ToolRunner</code> can be used to run classes implementing 
  * <p><code>ToolRunner</code> can be used to run classes implementing 
  * <code>Tool</code> interface. It works in conjunction with 
  * <code>Tool</code> interface. It works in conjunction with 
  * {@link GenericOptionsParser} to parse the 
  * {@link GenericOptionsParser} to parse the 
- * <a href="{@docRoot}/org/apache/hadoop/util/GenericOptionsParser.html#GenericOptions">
+ * <a href="{@docRoot}/../hadoop-project-dist/hadoop-common/CommandsManual.html#Generic_Options">
  * generic hadoop command line arguments</a> and modifies the 
  * generic hadoop command line arguments</a> and modifies the 
  * <code>Configuration</code> of the <code>Tool</code>. The 
  * <code>Configuration</code> of the <code>Tool</code>. The 
  * application-specific options are passed along without being modified.
  * application-specific options are passed along without being modified.

+ 6 - 3
hadoop-common-project/hadoop-common/src/site/apt/Compatibility.apt.vm

@@ -72,10 +72,13 @@ Apache Hadoop Compatibility
     * Private-Stable APIs can change across major releases,
     * Private-Stable APIs can change across major releases,
     but not within a major release.
     but not within a major release.
 
 
+    * Classes not annotated are implicitly "Private". Class members not
+    annotated inherit the annotations of the enclosing class.
+
     * Note: APIs generated from the proto files need to be compatible for
     * Note: APIs generated from the proto files need to be compatible for
-rolling-upgrades. See the section on wire-compatibility for more details. The
-compatibility policies for APIs and wire-communication need to go
-hand-in-hand to address this.
+    rolling-upgrades. See the section on wire-compatibility for more details.
+    The compatibility policies for APIs and wire-communication need to go
+    hand-in-hand to address this.
 
 
 ** Semantic compatibility
 ** Semantic compatibility
 
 

+ 20 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDFVariations.java

@@ -29,14 +29,33 @@ import java.util.Random;
 
 
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell;
+import org.junit.After;
+import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
+
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;
 
 
 public class TestDFVariations {
 public class TestDFVariations {
+  private static final String TEST_ROOT_DIR =
+      System.getProperty("test.build.data","build/test/data") + "/TestDFVariations";
+  private static File test_root = null;
 
 
+  @Before
+  public void setup() throws IOException {
+    test_root = new File(TEST_ROOT_DIR);
+    test_root.mkdirs();
+  }
+  
+  @After
+  public void after() throws IOException {
+    FileUtil.setWritable(test_root, true);
+    FileUtil.fullyDelete(test_root);
+    assertTrue(!test_root.exists());
+  }
+  
   public static class XXDF extends DF {
   public static class XXDF extends DF {
     public XXDF() throws IOException {
     public XXDF() throws IOException {
-      super(new File(System.getProperty("test.build.data","/tmp")), 0L);
+      super(test_root, 0L);
     }
     }
 
 
     @Override
     @Override

+ 24 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java

@@ -26,11 +26,13 @@ import java.util.Arrays;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.AvroTestUtil;
 import org.apache.hadoop.io.AvroTestUtil;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell;
 
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Joiner;
 
 
 import junit.framework.TestCase;
 import junit.framework.TestCase;
+import static org.junit.Assert.fail;
 
 
 public class TestPath extends TestCase {
 public class TestPath extends TestCase {
   /**
   /**
@@ -305,6 +307,28 @@ public class TestPath extends TestCase {
     // if the child uri is absolute path
     // if the child uri is absolute path
     assertEquals("foo://bar/fud#boo", new Path(new Path(new URI(
     assertEquals("foo://bar/fud#boo", new Path(new Path(new URI(
         "foo://bar/baz#bud")), new Path(new URI("/fud#boo"))).toString());
         "foo://bar/baz#bud")), new Path(new URI("/fud#boo"))).toString());
+
+    // empty URI
+    URI uri3 = new URI("");
+    assertEquals("", uri3.toString());
+    try {
+      path = new Path(uri3);
+      fail("Expected exception for empty URI");
+    } catch (IllegalArgumentException e) {
+      // expect to receive an IllegalArgumentException
+      GenericTestUtils.assertExceptionContains("Can not create a Path"
+          + " from an empty URI", e);
+    }
+    // null URI
+    uri3 = null;
+    try {
+      path = new Path(uri3);
+      fail("Expected exception for null URI");
+    } catch (IllegalArgumentException e) {
+      // expect to receive an IllegalArgumentException
+      GenericTestUtils.assertExceptionContains("Can not create a Path"
+          + " from a null URI", e);
+    }
   }
   }
 
 
   /** Test URIs created from Path objects */
   /** Test URIs created from Path objects */

+ 8 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/shell/TestPathData.java

@@ -35,19 +35,22 @@ import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
 
 
 public class TestPathData {
 public class TestPathData {
+  private static final String TEST_ROOT_DIR = 
+      System.getProperty("test.build.data","build/test/data") + "/testPD";
   protected Configuration conf;
   protected Configuration conf;
   protected FileSystem fs;
   protected FileSystem fs;
   protected Path testDir;
   protected Path testDir;
-
+  
   @Before
   @Before
   public void initialize() throws Exception {
   public void initialize() throws Exception {
     conf = new Configuration();
     conf = new Configuration();
     fs = FileSystem.getLocal(conf);
     fs = FileSystem.getLocal(conf);
-    testDir = new Path(
-        System.getProperty("test.build.data", "build/test/data") + "/testPD"
-    );
+    testDir = new Path(TEST_ROOT_DIR);
+    
     // don't want scheme on the path, just an absolute path
     // don't want scheme on the path, just an absolute path
     testDir = new Path(fs.makeQualified(testDir).toUri().getPath());
     testDir = new Path(fs.makeQualified(testDir).toUri().getPath());
+    fs.mkdirs(testDir);
+
     FileSystem.setDefaultUri(conf, fs.getUri());    
     FileSystem.setDefaultUri(conf, fs.getUri());    
     fs.setWorkingDirectory(testDir);
     fs.setWorkingDirectory(testDir);
     fs.mkdirs(new Path("d1"));
     fs.mkdirs(new Path("d1"));
@@ -60,6 +63,7 @@ public class TestPathData {
 
 
   @After
   @After
   public void cleanup() throws Exception {
   public void cleanup() throws Exception {
+    fs.delete(testDir, true);
     fs.close();
     fs.close();
   }
   }
 
 

+ 30 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java

@@ -773,4 +773,34 @@ public class ViewFileSystemBaseTest {
     assertFalse(aclStatus.isStickyBit());
     assertFalse(aclStatus.isStickyBit());
   }
   }
 
 
+  @Test(expected=AccessControlException.class)
+  public void testInternalSetXAttr() throws IOException {
+    fsView.setXAttr(new Path("/internalDir"), "xattrName", null);
+  }
+
+  @Test(expected=NotInMountpointException.class)
+  public void testInternalGetXAttr() throws IOException {
+    fsView.getXAttr(new Path("/internalDir"), "xattrName");
+  }
+
+  @Test(expected=NotInMountpointException.class)
+  public void testInternalGetXAttrs() throws IOException {
+    fsView.getXAttrs(new Path("/internalDir"));
+  }
+
+  @Test(expected=NotInMountpointException.class)
+  public void testInternalGetXAttrsWithNames() throws IOException {
+    fsView.getXAttrs(new Path("/internalDir"), new ArrayList<String>());
+  }
+
+  @Test(expected=NotInMountpointException.class)
+  public void testInternalListXAttr() throws IOException {
+    fsView.listXAttrs(new Path("/internalDir"));
+  }
+
+  @Test(expected=AccessControlException.class)
+  public void testInternalRemoveXAttr() throws IOException {
+    fsView.removeXAttr(new Path("/internalDir"), "xattrName");
+  }
+
 }
 }

+ 30 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java

@@ -747,4 +747,34 @@ public class ViewFsBaseTest {
         AclUtil.getMinimalAcl(PERMISSION_555));
         AclUtil.getMinimalAcl(PERMISSION_555));
     assertFalse(aclStatus.isStickyBit());
     assertFalse(aclStatus.isStickyBit());
   }
   }
+
+  @Test(expected=AccessControlException.class)
+  public void testInternalSetXAttr() throws IOException {
+    fcView.setXAttr(new Path("/internalDir"), "xattrName", null);
+  }
+
+  @Test(expected=NotInMountpointException.class)
+  public void testInternalGetXAttr() throws IOException {
+    fcView.getXAttr(new Path("/internalDir"), "xattrName");
+  }
+
+  @Test(expected=NotInMountpointException.class)
+  public void testInternalGetXAttrs() throws IOException {
+    fcView.getXAttrs(new Path("/internalDir"));
+  }
+
+  @Test(expected=NotInMountpointException.class)
+  public void testInternalGetXAttrsWithNames() throws IOException {
+    fcView.getXAttrs(new Path("/internalDir"), new ArrayList<String>());
+  }
+
+  @Test(expected=NotInMountpointException.class)
+  public void testInternalListXAttr() throws IOException {
+    fcView.listXAttrs(new Path("/internalDir"));
+  }
+
+  @Test(expected=AccessControlException.class)
+  public void testInternalRemoveXAttr() throws IOException {
+    fcView.removeXAttr(new Path("/internalDir"), "xattrName");
+  }
 }
 }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestDirectBufferPool.java → hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestDirectBufferPool.java

@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License.
  * limitations under the License.
  */
  */
-package org.apache.hadoop.hdfs.util;
+package org.apache.hadoop.util;
 
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotSame;
 import static org.junit.Assert.assertNotSame;
@@ -29,7 +29,7 @@ import org.junit.Test;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Lists;
 
 
 public class TestDirectBufferPool {
 public class TestDirectBufferPool {
-  final DirectBufferPool pool = new DirectBufferPool();
+  final org.apache.hadoop.util.DirectBufferPool pool = new org.apache.hadoop.util.DirectBufferPool();
   
   
   @Test
   @Test
   public void testBasics() {
   public void testBasics() {

+ 6 - 1
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java

@@ -53,7 +53,12 @@ public class NfsExports {
       long expirationPeriodNano = conf.getLong(
       long expirationPeriodNano = conf.getLong(
           Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY,
           Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY,
           Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT) * 1000 * 1000;
           Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT) * 1000 * 1000;
-      exports = new NfsExports(cacheSize, expirationPeriodNano, matchHosts);
+      try {
+        exports = new NfsExports(cacheSize, expirationPeriodNano, matchHosts);
+      } catch (IllegalArgumentException e) {
+        LOG.error("Invalid NFS Exports provided: ", e);
+        return exports;
+      }
     }
     }
     return exports;
     return exports;
   }
   }

+ 15 - 4
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java

@@ -104,6 +104,10 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
 
 
   @Override
   @Override
   public XDR mnt(XDR xdr, XDR out, int xid, InetAddress client) {
   public XDR mnt(XDR xdr, XDR out, int xid, InetAddress client) {
+    if (hostsMatcher == null) {
+      return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid,
+          null);
+    }
     AccessPrivilege accessPrivilege = hostsMatcher.getAccessPrivilege(client);
     AccessPrivilege accessPrivilege = hostsMatcher.getAccessPrivilege(client);
     if (accessPrivilege == AccessPrivilege.NONE) {
     if (accessPrivilege == AccessPrivilege.NONE) {
       return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid,
       return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid,
@@ -208,16 +212,23 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
     } else if (mntproc == MNTPROC.UMNTALL) {
     } else if (mntproc == MNTPROC.UMNTALL) {
       umntall(out, xid, client);
       umntall(out, xid, client);
     } else if (mntproc == MNTPROC.EXPORT) {
     } else if (mntproc == MNTPROC.EXPORT) {
-      // Currently only support one NFS export 
+      // Currently only support one NFS export
       List<NfsExports> hostsMatchers = new ArrayList<NfsExports>();
       List<NfsExports> hostsMatchers = new ArrayList<NfsExports>();
-      hostsMatchers.add(hostsMatcher);
-      out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
+      if (hostsMatcher != null) {
+        hostsMatchers.add(hostsMatcher);
+        out = MountResponse.writeExportList(out, xid, exports, hostsMatchers);
+      } else {
+        // This means there are no valid exports provided.
+        RpcAcceptedReply.getInstance(xid,
+          RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
+          out);
+      }
     } else {
     } else {
       // Invalid procedure
       // Invalid procedure
       RpcAcceptedReply.getInstance(xid,
       RpcAcceptedReply.getInstance(xid,
           RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
           RpcAcceptedReply.AcceptState.PROC_UNAVAIL, new VerifierNone()).write(
           out);
           out);
-    }  
+    }
     ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer());
     ChannelBuffer buf = ChannelBuffers.wrappedBuffer(out.asReadOnlyWrap().buffer());
     RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
     RpcResponse rsp = new RpcResponse(buf, info.remoteAddress());
     RpcUtil.sendRpcResponse(ctx, rsp);
     RpcUtil.sendRpcResponse(ctx, rsp);

+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java

@@ -2123,8 +2123,11 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
     if (!doPortMonitoring(remoteAddress)) {
     if (!doPortMonitoring(remoteAddress)) {
       return false;
       return false;
     }
     }
-    
+
     // Check export table
     // Check export table
+    if (exports == null) {
+        return false;
+    }
     InetAddress client = ((InetSocketAddress) remoteAddress).getAddress();
     InetAddress client = ((InetSocketAddress) remoteAddress).getAddress();
     AccessPrivilege access = exports.getAccessPrivilege(client);
     AccessPrivilege access = exports.getAccessPrivilege(client);
     if (access == AccessPrivilege.NONE) {
     if (access == AccessPrivilege.NONE) {

+ 61 - 19
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -272,6 +272,9 @@ Trunk (Unreleased)
     HDFS-5794. Fix the inconsistency of layout version number of 
     HDFS-5794. Fix the inconsistency of layout version number of 
     ADD_DATANODE_AND_STORAGE_UUIDS between trunk and branch-2. (jing9)
     ADD_DATANODE_AND_STORAGE_UUIDS between trunk and branch-2. (jing9)
 
 
+    HDFS-6657. Remove link to 'Legacy UI' in trunk's Namenode UI.
+    (Vinayakumar B via wheat 9)
+
 Release 2.6.0 - UNRELEASED
 Release 2.6.0 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -334,6 +337,15 @@ Release 2.6.0 - UNRELEASED
     HDFS-6701. Make seed optional in NetworkTopology#sortByDistance.
     HDFS-6701. Make seed optional in NetworkTopology#sortByDistance.
     (Ashwin Shankar via wang)
     (Ashwin Shankar via wang)
 
 
+    HDFS-6755. There is an unnecessary sleep in the code path where
+    DFSOutputStream#close gives up its attempt to contact the namenode
+    (mitdesai21 via cmccabe)
+
+    HDFS-6750. The DataNode should use its shared memory segment to mark
+    short-circuit replicas that have been unlinked as stale (cmccabe)
+
+    HDFS-6739. Add getDatanodeStorageReport to ClientProtocol. (szetszwo)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-6690. Deduplicate xattr names in memory. (wang)
     HDFS-6690. Deduplicate xattr names in memory. (wang)
@@ -377,6 +389,25 @@ Release 2.6.0 - UNRELEASED
     HDFS-6731. Run "hdfs zkfc-formatZK" on a server in a non-namenode will cause
     HDFS-6731. Run "hdfs zkfc-formatZK" on a server in a non-namenode will cause
     a null pointer exception. (Masatake Iwasaki via brandonli)
     a null pointer exception. (Masatake Iwasaki via brandonli)
 
 
+    HDFS-6114. Block Scan log rolling will never happen if blocks written
+    continuously leading to huge size of dncp_block_verification.log.curr
+    (vinayakumarb via cmccabe)
+
+    HDFS-6455. NFS: Exception should be added in NFS log for invalid separator in
+    nfs.exports.allowed.hosts. (Abhiraj Butala via brandonli)
+
+    HDFS-6715. Webhdfs wont fail over when it gets java.io.IOException: Namenode
+    is in startup mode. (jing9)
+
+    HDFS-5919. FileJournalManager doesn't purge empty and corrupt inprogress edits
+    files (vinayakumarb)
+
+    HDFS-6752. Avoid Address bind errors in TestDatanodeConfig#testMemlockLimit
+    (vinayakumarb)
+
+    HDFS-6749. FSNamesystem methods should call resolvePath.
+    (Charles Lamb via cnauroth)
+
 Release 2.5.0 - UNRELEASED
 Release 2.5.0 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -399,6 +430,15 @@ Release 2.5.0 - UNRELEASED
     HDFS-6406. Add capability for NFS gateway to reject connections from
     HDFS-6406. Add capability for NFS gateway to reject connections from
     unprivileged ports. (atm)
     unprivileged ports. (atm)
 
 
+    HDFS-2006. Ability to support storing extended attributes per file.
+
+    HDFS-5978. Create a tool to take fsimage and expose read-only WebHDFS API.
+    (Akira Ajisaka via wheat9)
+
+    HDFS-6278. Create HTML5-based UI for SNN. (wheat9)
+
+    HDFS-6279. Create new index page for JN / DN. (wheat9)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HDFS-6007. Update documentation about short-circuit local reads (iwasakims
     HDFS-6007. Update documentation about short-circuit local reads (iwasakims
@@ -416,9 +456,6 @@ Release 2.5.0 - UNRELEASED
 
 
     HDFS-6158. Clean up dead code for OfflineImageViewer. (wheat9)
     HDFS-6158. Clean up dead code for OfflineImageViewer. (wheat9)
 
 
-    HDFS-5978. Create a tool to take fsimage and expose read-only WebHDFS API.
-    (Akira Ajisaka via wheat9)
-
     HDFS-6164. Remove lsr in OfflineImageViewer. (wheat9)
     HDFS-6164. Remove lsr in OfflineImageViewer. (wheat9)
 
 
     HDFS-6167. Relocate the non-public API classes in the hdfs.client package.
     HDFS-6167. Relocate the non-public API classes in the hdfs.client package.
@@ -446,10 +483,6 @@ Release 2.5.0 - UNRELEASED
 
 
     HDFS-6265. Prepare HDFS codebase for JUnit 4.11. (cnauroth)
     HDFS-6265. Prepare HDFS codebase for JUnit 4.11. (cnauroth)
 
 
-    HDFS-6278. Create HTML5-based UI for SNN. (wheat9)
-
-    HDFS-6279. Create new index page for JN / DN. (wheat9)
-
     HDFS-5693. Few NN metrics data points were collected via JMX when NN
     HDFS-5693. Few NN metrics data points were collected via JMX when NN
     is under heavy load. (Ming Ma via jing9)
     is under heavy load. (Ming Ma via jing9)
 
 
@@ -821,9 +854,6 @@ Release 2.5.0 - UNRELEASED
     HDFS-6464. Support multiple xattr.name parameters for WebHDFS getXAttrs.
     HDFS-6464. Support multiple xattr.name parameters for WebHDFS getXAttrs.
     (Yi Liu via umamahesh)
     (Yi Liu via umamahesh)
 
 
-    HDFS-6375. Listing extended attributes with the search permission.
-    (Charles Lamb via wang)
-
     HDFS-6539. test_native_mini_dfs is skipped in hadoop-hdfs/pom.xml
     HDFS-6539. test_native_mini_dfs is skipped in hadoop-hdfs/pom.xml
     (decstery via cmccabe)
     (decstery via cmccabe)
 
 
@@ -912,6 +942,27 @@ Release 2.5.0 - UNRELEASED
     HDFS-6703. NFS: Files can be deleted from a read-only mount
     HDFS-6703. NFS: Files can be deleted from a read-only mount
     (Srikanth Upputuri via brandonli)
     (Srikanth Upputuri via brandonli)
 
 
+    HDFS-6422. getfattr in CLI doesn't throw exception or return non-0 return code 
+    when xattr doesn't exist. (Charles Lamb via umamahesh)
+
+    HDFS-6696. Name node cannot start if the path of a file under
+    construction contains ".snapshot". (wang)
+
+    HDFS-6312. WebHdfs HA failover is broken on secure clusters. 
+    (daryn via tucu)
+
+    HDFS-6618. FSNamesystem#delete drops the FSN lock between removing INodes
+    from the tree and deleting them from the inode map (kihwal via cmccabe)
+
+    HDFS-6622. Rename and AddBlock may race and produce invalid edits (kihwal
+    via cmccabe)
+
+    HDFS-6723. New NN webUI no longer displays decommissioned state for dead node.
+    (Ming Ma via wheat9)
+
+    HDFS-6717. JIRA HDFS-5804 breaks default nfs-gateway behavior for unsecured config
+    (brandonli)
+
   BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
   BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
 
 
     HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
     HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
@@ -981,15 +1032,6 @@ Release 2.5.0 - UNRELEASED
     HDFS-6492. Support create-time xattrs and atomically setting multiple
     HDFS-6492. Support create-time xattrs and atomically setting multiple
     xattrs. (wang)
     xattrs. (wang)
 
 
-    HDFS-6312. WebHdfs HA failover is broken on secure clusters. 
-    (daryn via tucu)
-
-    HDFS-6618. FSNamesystem#delete drops the FSN lock between removing INodes
-    from the tree and deleting them from the inode map (kihwal via cmccabe)
-
-    HDFS-6622. Rename and AddBlock may race and produce invalid edits (kihwal
-    via cmccabe)
-
 Release 2.4.1 - 2014-06-23 
 Release 2.4.1 - 2014-06-23 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java

@@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
 import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
-import org.apache.hadoop.hdfs.util.DirectBufferPool;
+import org.apache.hadoop.util.DirectBufferPool;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DataChecksum;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java

@@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
 import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
 import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
-import org.apache.hadoop.hdfs.util.DirectBufferPool;
+import org.apache.hadoop.util.DirectBufferPool;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;

+ 16 - 10
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -32,19 +32,21 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DATANODE_RESTART_TIMEOUT_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY;
@@ -60,8 +62,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CONTEXT_DEFAULT;
 
 
 import java.io.BufferedOutputStream;
 import java.io.BufferedOutputStream;
 import java.io.DataInputStream;
 import java.io.DataInputStream;
@@ -91,7 +91,6 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 
 import javax.net.SocketFactory;
 import javax.net.SocketFactory;
 
 
-import com.google.common.collect.Lists;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -112,22 +111,22 @@ import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Options;
-import org.apache.hadoop.fs.XAttr;
-import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.Options.ChecksumOpt;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.RemoteIterator;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.VolumeId;
 import org.apache.hadoop.fs.VolumeId;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
-import org.apache.hadoop.hdfs.protocol.AclException;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.net.TcpPeerServer;
 import org.apache.hadoop.hdfs.net.TcpPeerServer;
+import org.apache.hadoop.hdfs.protocol.AclException;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator;
@@ -158,8 +157,8 @@ import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
 import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
 import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
 import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
 import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
-import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
+import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
 import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
 import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
 import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
 import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
 import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
 import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
@@ -175,6 +174,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
@@ -200,6 +200,7 @@ import org.apache.hadoop.util.Time;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
 import com.google.common.net.InetAddresses;
 import com.google.common.net.InetAddresses;
 
 
 /********************************************************
 /********************************************************
@@ -2192,6 +2193,11 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     return namenode.getDatanodeReport(type);
     return namenode.getDatanodeReport(type);
   }
   }
     
     
+  public DatanodeStorageReport[] getDatanodeStorageReport(
+      DatanodeReportType type) throws IOException {
+    return namenode.getDatanodeStorageReport(type);
+  }
+
   /**
   /**
    * Enter, leave or get safe mode.
    * Enter, leave or get safe mode.
    * 
    * 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java

@@ -2136,12 +2136,12 @@ public class DFSOutputStream extends FSOutputSummer
             throw new IOException(msg);
             throw new IOException(msg);
         }
         }
         try {
         try {
-          Thread.sleep(localTimeout);
           if (retries == 0) {
           if (retries == 0) {
             throw new IOException("Unable to close file because the last block"
             throw new IOException("Unable to close file because the last block"
                 + " does not have enough number of replicas.");
                 + " does not have enough number of replicas.");
           }
           }
           retries--;
           retries--;
+          Thread.sleep(localTimeout);
           localTimeout *= 2;
           localTimeout *= 2;
           if (Time.now() - localstart > 5000) {
           if (Time.now() - localstart > 5000) {
             DFSClient.LOG.info("Could not complete " + src + " retrying...");
             DFSClient.LOG.info("Could not complete " + src + " retrying...");

+ 11 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

@@ -24,6 +24,7 @@ import java.util.List;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
 import org.apache.hadoop.fs.CacheFlag;
 import org.apache.hadoop.fs.CacheFlag;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.CreateFlag;
@@ -31,11 +32,10 @@ import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.InvalidPathException;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Options;
-import org.apache.hadoop.fs.XAttr;
-import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
@@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.retry.AtMostOnce;
 import org.apache.hadoop.io.retry.AtMostOnce;
@@ -654,6 +655,13 @@ public interface ClientProtocol {
   public DatanodeInfo[] getDatanodeReport(HdfsConstants.DatanodeReportType type)
   public DatanodeInfo[] getDatanodeReport(HdfsConstants.DatanodeReportType type)
       throws IOException;
       throws IOException;
 
 
+  /**
+   * Get a report on the current datanode storages.
+   */
+  @Idempotent
+  public DatanodeStorageReport[] getDatanodeStorageReport(
+      HdfsConstants.DatanodeReportType type) throws IOException;
+
   /**
   /**
    * Get the block size for the given file.
    * Get the block size for the given file.
    * @param filename The name of the file
    * @param filename The name of the file
@@ -1337,6 +1345,6 @@ public interface ClientProtocol {
    * @param xAttr <code>XAttr</code> to remove
    * @param xAttr <code>XAttr</code> to remove
    * @throws IOException
    * @throws IOException
    */
    */
-  @Idempotent
+  @AtMostOnce
   public void removeXAttr(String src, XAttr xAttr) throws IOException;
   public void removeXAttr(String src, XAttr xAttr) throws IOException;
 }
 }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java

@@ -27,7 +27,7 @@ import java.nio.channels.ReadableByteChannel;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.util.DirectBufferPool;
+import org.apache.hadoop.util.DirectBufferPool;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;

+ 18 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java

@@ -72,6 +72,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Create
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSnapshotResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateSymlinkResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DeleteSnapshotRequestProto;
@@ -93,6 +94,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDat
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
@@ -174,7 +177,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
-import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsRequestProto;
@@ -655,6 +657,21 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     }
     }
   }
   }
 
 
+  @Override
+  public GetDatanodeStorageReportResponseProto getDatanodeStorageReport(
+      RpcController controller, GetDatanodeStorageReportRequestProto req)
+      throws ServiceException {
+    try {
+      List<DatanodeStorageReportProto> reports = PBHelper.convertDatanodeStorageReports(
+          server.getDatanodeStorageReport(PBHelper.convert(req.getType())));
+      return GetDatanodeStorageReportResponseProto.newBuilder()
+          .addAllDatanodeStorageReports(reports)
+          .build();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
   @Override
   @Override
   public GetPreferredBlockSizeResponseProto getPreferredBlockSize(
   public GetPreferredBlockSizeResponseProto getPreferredBlockSize(
       RpcController controller, GetPreferredBlockSizeRequestProto req)
       RpcController controller, GetPreferredBlockSizeRequestProto req)

+ 16 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

@@ -94,6 +94,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCon
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto;
@@ -151,6 +152,7 @@ import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufHelper;
@@ -580,6 +582,20 @@ public class ClientNamenodeProtocolTranslatorPB implements
     }
     }
   }
   }
 
 
+  @Override
+  public DatanodeStorageReport[] getDatanodeStorageReport(DatanodeReportType type)
+      throws IOException {
+    final GetDatanodeStorageReportRequestProto req
+        = GetDatanodeStorageReportRequestProto.newBuilder()
+            .setType(PBHelper.convert(type)).build();
+    try {
+      return PBHelper.convertDatanodeStorageReports(
+          rpcProxy.getDatanodeStorageReport(null, req).getDatanodeStorageReportsList());
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
+
   @Override
   @Override
   public long getPreferredBlockSize(String filename) throws IOException,
   public long getPreferredBlockSize(String filename) throws IOException,
       UnresolvedLinkException {
       UnresolvedLinkException {

+ 1 - 13
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java

@@ -21,18 +21,13 @@ package org.apache.hadoop.hdfs.protocolPB;
 import java.io.Closeable;
 import java.io.Closeable;
 import java.io.IOException;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
-import java.util.HashMap;
 import java.util.List;
 import java.util.List;
-import java.util.Map;
-import java.util.concurrent.TimeUnit;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto;
@@ -51,7 +46,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReportBadBlo
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlockReportProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -61,14 +55,10 @@ import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.ipc.RpcClientUtil;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -137,9 +127,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements
         .setRegistration(PBHelper.convert(registration))
         .setRegistration(PBHelper.convert(registration))
         .setXmitsInProgress(xmitsInProgress).setXceiverCount(xceiverCount)
         .setXmitsInProgress(xmitsInProgress).setXceiverCount(xceiverCount)
         .setFailedVolumes(failedVolumes);
         .setFailedVolumes(failedVolumes);
-    for (StorageReport r : reports) {
-      builder.addReports(PBHelper.convert(r));
-    }
+    builder.addAllReports(PBHelper.convertStorageReports(reports));
     if (cacheCapacity != 0) {
     if (cacheCapacity != 0) {
       builder.setCacheCapacity(cacheCapacity);
       builder.setCacheCapacity(cacheCapacity);
     }
     }

+ 49 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java

@@ -90,6 +90,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheP
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto;
@@ -102,14 +103,11 @@ import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdComma
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
-import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto;
-import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto.StorageState;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.FinalizeCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.KeyUpdateCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.NNHAStatusHeartbeatProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDeletedBlockInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto;
-import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
@@ -125,6 +123,8 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
@@ -149,6 +149,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto;
@@ -182,6 +183,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
 import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
 import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
 import org.apache.hadoop.hdfs.server.protocol.JournalInfo;
 import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
 import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
@@ -620,6 +622,41 @@ public class PBHelper {
     return builder.build();
     return builder.build();
   }
   }
 
 
+  public static DatanodeStorageReportProto convertDatanodeStorageReport(
+      DatanodeStorageReport report) {
+    return DatanodeStorageReportProto.newBuilder()
+        .setDatanodeInfo(convert(report.getDatanodeInfo()))
+        .addAllStorageReports(convertStorageReports(report.getStorageReports()))
+        .build();
+  }
+
+  public static List<DatanodeStorageReportProto> convertDatanodeStorageReports(
+      DatanodeStorageReport[] reports) {
+    final List<DatanodeStorageReportProto> protos
+        = new ArrayList<DatanodeStorageReportProto>(reports.length);
+    for(int i = 0; i < reports.length; i++) {
+      protos.add(convertDatanodeStorageReport(reports[i]));
+    }
+    return protos;
+  }
+
+  public static DatanodeStorageReport convertDatanodeStorageReport(
+      DatanodeStorageReportProto proto) {
+    return new DatanodeStorageReport(
+        convert(proto.getDatanodeInfo()),
+        convertStorageReports(proto.getStorageReportsList()));
+  }
+
+  public static DatanodeStorageReport[] convertDatanodeStorageReports(
+      List<DatanodeStorageReportProto> protos) {
+    final DatanodeStorageReport[] reports
+        = new DatanodeStorageReport[protos.size()];
+    for(int i = 0; i < reports.length; i++) {
+      reports[i] = convertDatanodeStorageReport(protos.get(i));
+    }
+    return reports;
+  }
+
   public static AdminStates convert(AdminState adminState) {
   public static AdminStates convert(AdminState adminState) {
     switch(adminState) {
     switch(adminState) {
     case DECOMMISSION_INPROGRESS:
     case DECOMMISSION_INPROGRESS:
@@ -1717,6 +1754,15 @@ public class PBHelper {
     return report;
     return report;
   }
   }
 
 
+  public static List<StorageReportProto> convertStorageReports(StorageReport[] storages) {
+    final List<StorageReportProto> protos = new ArrayList<StorageReportProto>(
+        storages.length);
+    for(int i = 0; i < storages.length; i++) {
+      protos.add(convert(storages[i]));
+    }
+    return protos;
+  }
+
   public static JournalInfo convert(JournalInfoProto info) {
   public static JournalInfo convert(JournalInfoProto info) {
     int lv = info.hasLayoutVersion() ? info.getLayoutVersion() : 0;
     int lv = info.hasLayoutVersion() ? info.getLayoutVersion() : 0;
     int nsID = info.hasNamespaceID() ? info.getNamespaceID() : 0;
     int nsID = info.hasNamespaceID() ? info.getNamespaceID() : 0;

+ 9 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java

@@ -259,6 +259,15 @@ public class DatanodeDescriptor extends DatanodeInfo {
     }
     }
   }
   }
 
 
+  public StorageReport[] getStorageReports() {
+    final StorageReport[] reports = new StorageReport[storageMap.size()];
+    final DatanodeStorageInfo[] infos = getStorageInfos();
+    for(int i = 0; i < infos.length; i++) {
+      reports[i] = infos[i].toStorageReport();
+    }
+    return reports;
+  }
+
   boolean hasStaleStorages() {
   boolean hasStaleStorages() {
     synchronized (storageMap) {
     synchronized (storageMap) {
       for (DatanodeStorageInfo storage : storageMap.values()) {
       for (DatanodeStorageInfo storage : storageMap.values()) {

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java

@@ -291,6 +291,12 @@ public class DatanodeStorageInfo {
   public String toString() {
   public String toString() {
     return "[" + storageType + "]" + storageID + ":" + state;
     return "[" + storageType + "]" + storageID + ":" + state;
   }
   }
+  
+  StorageReport toStorageReport() {
+    return new StorageReport(
+        new DatanodeStorage(storageID, state, storageType),
+        false, capacity, dfsUsed, remaining, blockPoolUsed);
+  }
 
 
   static Iterable<StorageType> toStorageTypes(
   static Iterable<StorageType> toStorageTypes(
       final Iterable<DatanodeStorageInfo> infos) {
       final Iterable<DatanodeStorageInfo> infos) {

+ 42 - 12
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java

@@ -84,6 +84,10 @@ class BlockPoolSliceScanner {
   
   
   private final SortedSet<BlockScanInfo> blockInfoSet
   private final SortedSet<BlockScanInfo> blockInfoSet
       = new TreeSet<BlockScanInfo>(BlockScanInfo.LAST_SCAN_TIME_COMPARATOR);
       = new TreeSet<BlockScanInfo>(BlockScanInfo.LAST_SCAN_TIME_COMPARATOR);
+
+  private final SortedSet<BlockScanInfo> newBlockInfoSet =
+      new TreeSet<BlockScanInfo>(BlockScanInfo.LAST_SCAN_TIME_COMPARATOR);
+
   private final GSet<Block, BlockScanInfo> blockMap
   private final GSet<Block, BlockScanInfo> blockMap
       = new LightWeightGSet<Block, BlockScanInfo>(
       = new LightWeightGSet<Block, BlockScanInfo>(
           LightWeightGSet.computeCapacity(0.5, "BlockMap"));
           LightWeightGSet.computeCapacity(0.5, "BlockMap"));
@@ -195,7 +199,7 @@ class BlockPoolSliceScanner {
       BlockScanInfo info = new BlockScanInfo( block );
       BlockScanInfo info = new BlockScanInfo( block );
       info.lastScanTime = scanTime--; 
       info.lastScanTime = scanTime--; 
       //still keep 'info.lastScanType' to NONE.
       //still keep 'info.lastScanType' to NONE.
-      addBlockInfo(info);
+      addBlockInfo(info, false);
     }
     }
 
 
     RollingLogs rollingLogs = null;
     RollingLogs rollingLogs = null;
@@ -221,25 +225,42 @@ class BlockPoolSliceScanner {
     // Should we change throttler bandwidth every time bytesLeft changes?
     // Should we change throttler bandwidth every time bytesLeft changes?
     // not really required.
     // not really required.
   }
   }
-  
-  private synchronized void addBlockInfo(BlockScanInfo info) {
-    boolean added = blockInfoSet.add(info);
+
+  /**
+   * Add the BlockScanInfo to sorted set of blockScanInfo
+   * @param info BlockScanInfo to be added
+   * @param isNewBlock true if the block is the new Block, false if
+   *          BlockScanInfo is being updated with new scanTime
+   */
+  private synchronized void addBlockInfo(BlockScanInfo info,
+      boolean isNewBlock) {
+    boolean added = false;
+    if (isNewBlock) {
+      // check whether the block already present
+      boolean exists = blockInfoSet.contains(info);
+      added = !exists && newBlockInfoSet.add(info);
+    } else {
+      added = blockInfoSet.add(info);
+    }
     blockMap.put(info);
     blockMap.put(info);
     
     
     if (added) {
     if (added) {
       updateBytesToScan(info.getNumBytes(), info.lastScanTime);
       updateBytesToScan(info.getNumBytes(), info.lastScanTime);
     }
     }
   }
   }
-  
+
   private synchronized void delBlockInfo(BlockScanInfo info) {
   private synchronized void delBlockInfo(BlockScanInfo info) {
     boolean exists = blockInfoSet.remove(info);
     boolean exists = blockInfoSet.remove(info);
+    if (!exists){
+      exists = newBlockInfoSet.remove(info);
+    }
     blockMap.remove(info);
     blockMap.remove(info);
 
 
     if (exists) {
     if (exists) {
       updateBytesToScan(-info.getNumBytes(), info.lastScanTime);
       updateBytesToScan(-info.getNumBytes(), info.lastScanTime);
     }
     }
   }
   }
-  
+
   /** Update blockMap by the given LogEntry */
   /** Update blockMap by the given LogEntry */
   private synchronized void updateBlockInfo(LogEntry e) {
   private synchronized void updateBlockInfo(LogEntry e) {
     BlockScanInfo info = blockMap.get(new Block(e.blockId, 0, e.genStamp));
     BlockScanInfo info = blockMap.get(new Block(e.blockId, 0, e.genStamp));
@@ -249,7 +270,7 @@ class BlockPoolSliceScanner {
       delBlockInfo(info);
       delBlockInfo(info);
       info.lastScanTime = e.verificationTime;
       info.lastScanTime = e.verificationTime;
       info.lastScanType = ScanType.VERIFICATION_SCAN;
       info.lastScanType = ScanType.VERIFICATION_SCAN;
-      addBlockInfo(info);
+      addBlockInfo(info, false);
     }
     }
   }
   }
 
 
@@ -275,14 +296,14 @@ class BlockPoolSliceScanner {
     info = new BlockScanInfo(block.getLocalBlock());    
     info = new BlockScanInfo(block.getLocalBlock());    
     info.lastScanTime = getNewBlockScanTime();
     info.lastScanTime = getNewBlockScanTime();
     
     
-    addBlockInfo(info);
+    addBlockInfo(info, true);
     adjustThrottler();
     adjustThrottler();
   }
   }
   
   
   /** Deletes the block from internal structures */
   /** Deletes the block from internal structures */
   synchronized void deleteBlock(Block block) {
   synchronized void deleteBlock(Block block) {
     BlockScanInfo info = blockMap.get(block);
     BlockScanInfo info = blockMap.get(block);
-    if ( info != null ) {
+    if (info != null) {
       delBlockInfo(info);
       delBlockInfo(info);
     }
     }
   }
   }
@@ -319,7 +340,7 @@ class BlockPoolSliceScanner {
     info.lastScanType = type;
     info.lastScanType = type;
     info.lastScanTime = now;
     info.lastScanTime = now;
     info.lastScanOk = scanOk;
     info.lastScanOk = scanOk;
-    addBlockInfo(info);
+    addBlockInfo(info, false);
         
         
     // Don't update meta data if the verification failed.
     // Don't update meta data if the verification failed.
     if (!scanOk) {
     if (!scanOk) {
@@ -578,7 +599,7 @@ class BlockPoolSliceScanner {
           delBlockInfo(info);        
           delBlockInfo(info);        
           info.lastScanTime = lastScanTime;
           info.lastScanTime = lastScanTime;
           lastScanTime += verifyInterval;
           lastScanTime += verifyInterval;
-          addBlockInfo(info);
+          addBlockInfo(info, false);
         }
         }
       }
       }
     }
     }
@@ -674,12 +695,21 @@ class BlockPoolSliceScanner {
       throw e;
       throw e;
     } finally {
     } finally {
       rollVerificationLogs();
       rollVerificationLogs();
+      rollNewBlocksInfo();
       if (LOG.isDebugEnabled()) {
       if (LOG.isDebugEnabled()) {
         LOG.debug("Done scanning block pool: " + blockPoolId);
         LOG.debug("Done scanning block pool: " + blockPoolId);
       }
       }
     }
     }
   }
   }
-  
+
+  // add new blocks to scan in next iteration
+  private synchronized void rollNewBlocksInfo() {
+    for (BlockScanInfo newBlock : newBlockInfoSet) {
+      blockInfoSet.add(newBlock);
+    }
+    newBlockInfoSet.clear();
+  }
+
   private synchronized void rollVerificationLogs() {
   private synchronized void rollVerificationLogs() {
     if (verificationLog != null) {
     if (verificationLog != null) {
       try {
       try {

+ 27 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java

@@ -74,7 +74,7 @@ import com.google.common.collect.HashMultimap;
  * DN also marks the block's slots as "unanchorable" to prevent additional 
  * DN also marks the block's slots as "unanchorable" to prevent additional 
  * clients from initiating these operations in the future.
  * clients from initiating these operations in the future.
  * 
  * 
- * The counterpart fo this class on the client is {@link DfsClientShmManager}.
+ * The counterpart of this class on the client is {@link DfsClientShmManager}.
  */
  */
 public class ShortCircuitRegistry {
 public class ShortCircuitRegistry {
   public static final Log LOG = LogFactory.getLog(ShortCircuitRegistry.class);
   public static final Log LOG = LogFactory.getLog(ShortCircuitRegistry.class);
@@ -217,7 +217,32 @@ public class ShortCircuitRegistry {
     }
     }
     return allowMunlock;
     return allowMunlock;
   }
   }
-  
+
+  /**
+   * Invalidate any slot associated with a blockId that we are invalidating
+   * (deleting) from this DataNode.  When a slot is invalid, the DFSClient will
+   * not use the corresponding replica for new read or mmap operations (although
+   * existing, ongoing read or mmap operations will complete.)
+   *
+   * @param blockId        The block ID.
+   */
+  public synchronized void processBlockInvalidation(ExtendedBlockId blockId) {
+    if (!enabled) return;
+    final Set<Slot> affectedSlots = slots.get(blockId);
+    if (!affectedSlots.isEmpty()) {
+      final StringBuilder bld = new StringBuilder();
+      String prefix = "";
+      bld.append("Block ").append(blockId).append(" has been invalidated.  ").
+          append("Marking short-circuit slots as invalid: ");
+      for (Slot slot : affectedSlots) {
+        slot.makeInvalid();
+        bld.append(prefix).append(slot.toString());
+        prefix = ", ";
+      }
+      LOG.info(bld.toString());
+    }
+  }
+
   public static class NewShmInfo implements Closeable {
   public static class NewShmInfo implements Closeable {
     public final ShmId shmId;
     public final ShmId shmId;
     public final FileInputStream stream;
     public final FileInputStream stream;

+ 8 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

@@ -44,6 +44,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.ExtendedBlockId;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -1232,8 +1233,15 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
         }
         }
         volumeMap.remove(bpid, invalidBlks[i]);
         volumeMap.remove(bpid, invalidBlks[i]);
       }
       }
+
+      // If a DFSClient has the replica in its cache of short-circuit file
+      // descriptors (and the client is using ShortCircuitShm), invalidate it.
+      datanode.getShortCircuitRegistry().processBlockInvalidation(
+                new ExtendedBlockId(invalidBlks[i].getBlockId(), bpid));
+
       // If the block is cached, start uncaching it.
       // If the block is cached, start uncaching it.
       cacheManager.uncacheBlock(bpid, invalidBlks[i].getBlockId());
       cacheManager.uncacheBlock(bpid, invalidBlks[i].getBlockId());
+
       // Delete the block asynchronously to make sure we can do it fast enough.
       // Delete the block asynchronously to make sure we can do it fast enough.
       // It's ok to unlink the block file before the uncache operation
       // It's ok to unlink the block file before the uncache operation
       // finishes.
       // finishes.

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java

@@ -1074,10 +1074,11 @@ public class FSEditLog implements LogsPurgeable {
     logEdit(op);
     logEdit(op);
   }
   }
   
   
-  void logRemoveXAttrs(String src, List<XAttr> xAttrs) {
+  void logRemoveXAttrs(String src, List<XAttr> xAttrs, boolean toLogRpcIds) {
     final RemoveXAttrOp op = RemoveXAttrOp.getInstance();
     final RemoveXAttrOp op = RemoveXAttrOp.getInstance();
     op.src = src;
     op.src = src;
     op.xAttrs = xAttrs;
     op.xAttrs = xAttrs;
+    logRpcIds(op, toLogRpcIds);
     logEdit(op);
     logEdit(op);
   }
   }
 
 

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java

@@ -821,6 +821,10 @@ public class FSEditLogLoader {
       RemoveXAttrOp removeXAttrOp = (RemoveXAttrOp) op;
       RemoveXAttrOp removeXAttrOp = (RemoveXAttrOp) op;
       fsDir.unprotectedRemoveXAttrs(removeXAttrOp.src,
       fsDir.unprotectedRemoveXAttrs(removeXAttrOp.src,
           removeXAttrOp.xAttrs);
           removeXAttrOp.xAttrs);
+      if (toAddRetryCache) {
+        fsNamesys.addCacheEntry(removeXAttrOp.rpcClientId,
+            removeXAttrOp.rpcCallId);
+      }
       break;
       break;
     }
     }
     default:
     default:

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java

@@ -3551,6 +3551,7 @@ public abstract class FSEditLogOp {
       XAttrEditLogProto p = XAttrEditLogProto.parseDelimitedFrom(in);
       XAttrEditLogProto p = XAttrEditLogProto.parseDelimitedFrom(in);
       src = p.getSrc();
       src = p.getSrc();
       xAttrs = PBHelper.convertXAttrs(p.getXAttrsList());
       xAttrs = PBHelper.convertXAttrs(p.getXAttrsList());
+      readRpcIds(in, logVersion);
     }
     }
 
 
     @Override
     @Override
@@ -3561,18 +3562,22 @@ public abstract class FSEditLogOp {
       }
       }
       b.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs));
       b.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs));
       b.build().writeDelimitedTo(out);
       b.build().writeDelimitedTo(out);
+      // clientId and callId
+      writeRpcIds(rpcClientId, rpcCallId, out);
     }
     }
 
 
     @Override
     @Override
     protected void toXml(ContentHandler contentHandler) throws SAXException {
     protected void toXml(ContentHandler contentHandler) throws SAXException {
       XMLUtils.addSaxString(contentHandler, "SRC", src);
       XMLUtils.addSaxString(contentHandler, "SRC", src);
       appendXAttrsToXml(contentHandler, xAttrs);
       appendXAttrsToXml(contentHandler, xAttrs);
+      appendRpcIdsToXml(contentHandler, rpcClientId, rpcCallId);
     }
     }
 
 
     @Override
     @Override
     void fromXml(Stanza st) throws InvalidXmlException {
     void fromXml(Stanza st) throws InvalidXmlException {
       src = st.getValue("SRC");
       src = st.getValue("SRC");
       xAttrs = readXAttrsFromXml(st);
       xAttrs = readXAttrsFromXml(st);
+      readRpcIdsFromXml(st);
     }
     }
   }
   }
   
   

+ 11 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java

@@ -614,6 +614,16 @@ public class FSImageFormat {
     INodeDirectory parentINode = fsDir.rootDir;
     INodeDirectory parentINode = fsDir.rootDir;
     for (long i = 0; i < numFiles; i++) {
     for (long i = 0; i < numFiles; i++) {
       pathComponents = FSImageSerialization.readPathComponents(in);
       pathComponents = FSImageSerialization.readPathComponents(in);
+      for (int j=0; j < pathComponents.length; j++) {
+        byte[] newComponent = renameReservedComponentOnUpgrade
+            (pathComponents[j], getLayoutVersion());
+        if (!Arrays.equals(newComponent, pathComponents[j])) {
+          String oldPath = DFSUtil.byteArray2PathString(pathComponents);
+          pathComponents[j] = newComponent;
+          String newPath = DFSUtil.byteArray2PathString(pathComponents);
+          LOG.info("Renaming reserved path " + oldPath + " to " + newPath);
+        }
+      }
       final INode newNode = loadINode(
       final INode newNode = loadINode(
           pathComponents[pathComponents.length-1], false, in, counter);
           pathComponents[pathComponents.length-1], false, in, counter);
 
 
@@ -926,6 +936,7 @@ public class FSImageFormat {
           oldnode = namesystem.dir.getInode(cons.getId()).asFile();
           oldnode = namesystem.dir.getInode(cons.getId()).asFile();
           inSnapshot = true;
           inSnapshot = true;
         } else {
         } else {
+          path = renameReservedPathsOnUpgrade(path, getLayoutVersion());
           final INodesInPath iip = fsDir.getLastINodeInPath(path);
           final INodesInPath iip = fsDir.getLastINodeInPath(path);
           oldnode = INodeFile.valueOf(iip.getINode(0), path);
           oldnode = INodeFile.valueOf(iip.getINode(0), path);
         }
         }

+ 80 - 18
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -62,6 +62,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CAC
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY;
@@ -83,9 +85,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROU
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RANDOMIZE_BLOCK_LOCATIONS_PER_BLOCK_DEFAULT;
-
 import static org.apache.hadoop.util.Time.now;
 import static org.apache.hadoop.util.Time.now;
 
 
 import java.io.BufferedWriter;
 import java.io.BufferedWriter;
@@ -230,6 +229,7 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
 import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
 import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
 import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
 import org.apache.hadoop.hdfs.server.protocol.NNHAStatusHeartbeat;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
@@ -3723,8 +3723,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       StandbyException, IOException {
       StandbyException, IOException {
     FSPermissionChecker pc = getPermissionChecker();  
     FSPermissionChecker pc = getPermissionChecker();  
     checkOperation(OperationCategory.READ);
     checkOperation(OperationCategory.READ);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     readLock();
     readLock();
     try {
     try {
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
       checkOperation(OperationCategory.READ);
       checkOperation(OperationCategory.READ);
       if (isPermissionEnabled) {
       if (isPermissionEnabled) {
         checkTraverse(pc, src);
         checkTraverse(pc, src);
@@ -4917,6 +4919,28 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     }
     }
   }
   }
 
 
+  DatanodeStorageReport[] getDatanodeStorageReport(final DatanodeReportType type
+      ) throws AccessControlException, StandbyException {
+    checkSuperuserPrivilege();
+    checkOperation(OperationCategory.UNCHECKED);
+    readLock();
+    try {
+      checkOperation(OperationCategory.UNCHECKED);
+      final DatanodeManager dm = getBlockManager().getDatanodeManager();      
+      final List<DatanodeDescriptor> datanodes = dm.getDatanodeListForReport(type);
+
+      DatanodeStorageReport[] reports = new DatanodeStorageReport[datanodes.size()];
+      for (int i = 0; i < reports.length; i++) {
+        final DatanodeDescriptor d = datanodes.get(i);
+        reports[i] = new DatanodeStorageReport(new DatanodeInfo(d),
+            d.getStorageReports());
+      }
+      return reports;
+    } finally {
+      readUnlock();
+    }
+  }
+
   /**
   /**
    * Save namespace image.
    * Save namespace image.
    * This will save current namespace into fsimage file and empty edits file.
    * This will save current namespace into fsimage file and empty edits file.
@@ -8186,9 +8210,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     nnConf.checkAclsConfigFlag();
     nnConf.checkAclsConfigFlag();
     FSPermissionChecker pc = getPermissionChecker();
     FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.READ);
     checkOperation(OperationCategory.READ);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     readLock();
     readLock();
     try {
     try {
       checkOperation(OperationCategory.READ);
       checkOperation(OperationCategory.READ);
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
       if (isPermissionEnabled) {
       if (isPermissionEnabled) {
         checkPermission(pc, src, false, null, null, null, null);
         checkPermission(pc, src, false, null, null, null, null);
       }
       }
@@ -8282,16 +8308,19 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     nnConf.checkXAttrsConfigFlag();
     nnConf.checkXAttrsConfigFlag();
     FSPermissionChecker pc = getPermissionChecker();
     FSPermissionChecker pc = getPermissionChecker();
     boolean getAll = xAttrs == null || xAttrs.isEmpty();
     boolean getAll = xAttrs == null || xAttrs.isEmpty();
-    List<XAttr> filteredXAttrs = null;
     if (!getAll) {
     if (!getAll) {
-      filteredXAttrs = XAttrPermissionFilter.filterXAttrsForApi(pc, xAttrs);
-      if (filteredXAttrs.isEmpty()) {
-        return filteredXAttrs;
+      try {
+        XAttrPermissionFilter.checkPermissionForApi(pc, xAttrs);
+      } catch (AccessControlException e) {
+        logAuditEvent(false, "getXAttrs", src);
+        throw e;
       }
       }
     }
     }
     checkOperation(OperationCategory.READ);
     checkOperation(OperationCategory.READ);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     readLock();
     readLock();
     try {
     try {
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
       checkOperation(OperationCategory.READ);
       checkOperation(OperationCategory.READ);
       if (isPermissionEnabled) {
       if (isPermissionEnabled) {
         checkPathAccess(pc, src, FsAction.READ);
         checkPathAccess(pc, src, FsAction.READ);
@@ -8305,15 +8334,21 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         if (filteredAll == null || filteredAll.isEmpty()) {
         if (filteredAll == null || filteredAll.isEmpty()) {
           return null;
           return null;
         }
         }
-        List<XAttr> toGet = Lists.newArrayListWithCapacity(filteredXAttrs.size());
-        for (XAttr xAttr : filteredXAttrs) {
+        List<XAttr> toGet = Lists.newArrayListWithCapacity(xAttrs.size());
+        for (XAttr xAttr : xAttrs) {
+          boolean foundIt = false;
           for (XAttr a : filteredAll) {
           for (XAttr a : filteredAll) {
             if (xAttr.getNameSpace() == a.getNameSpace()
             if (xAttr.getNameSpace() == a.getNameSpace()
                 && xAttr.getName().equals(a.getName())) {
                 && xAttr.getName().equals(a.getName())) {
               toGet.add(a);
               toGet.add(a);
+              foundIt = true;
               break;
               break;
             }
             }
           }
           }
+          if (!foundIt) {
+            throw new IOException(
+                "At least one of the attributes provided was not found.");
+          }
         }
         }
         return toGet;
         return toGet;
       }
       }
@@ -8329,8 +8364,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     nnConf.checkXAttrsConfigFlag();
     nnConf.checkXAttrsConfigFlag();
     final FSPermissionChecker pc = getPermissionChecker();
     final FSPermissionChecker pc = getPermissionChecker();
     checkOperation(OperationCategory.READ);
     checkOperation(OperationCategory.READ);
+    byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     readLock();
     readLock();
     try {
     try {
+      src = FSDirectory.resolvePath(src, pathComponents, dir);
       checkOperation(OperationCategory.READ);
       checkOperation(OperationCategory.READ);
       if (isPermissionEnabled) {
       if (isPermissionEnabled) {
         /* To access xattr names, you need EXECUTE in the owning directory. */
         /* To access xattr names, you need EXECUTE in the owning directory. */
@@ -8347,17 +8384,42 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       readUnlock();
       readUnlock();
     }
     }
   }
   }
-  
+
+  /**
+   * Remove an xattr for a file or directory.
+   *
+   * @param src
+   *          - path to remove the xattr from
+   * @param xAttr
+   *          - xAttr to remove
+   * @throws AccessControlException
+   * @throws SafeModeException
+   * @throws UnresolvedLinkException
+   * @throws IOException
+   */
   void removeXAttr(String src, XAttr xAttr) throws IOException {
   void removeXAttr(String src, XAttr xAttr) throws IOException {
-    nnConf.checkXAttrsConfigFlag();
-    HdfsFileStatus resultingStat = null;
-    FSPermissionChecker pc = getPermissionChecker();
+    CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
+    if (cacheEntry != null && cacheEntry.isSuccess()) {
+      return; // Return previous response
+    }
+    boolean success = false;
     try {
     try {
-      XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
+      removeXAttrInt(src, xAttr, cacheEntry != null);
+      success = true;
     } catch (AccessControlException e) {
     } catch (AccessControlException e) {
       logAuditEvent(false, "removeXAttr", src);
       logAuditEvent(false, "removeXAttr", src);
       throw e;
       throw e;
+    } finally {
+      RetryCache.setState(cacheEntry, success);
     }
     }
+  }
+
+  void removeXAttrInt(String src, XAttr xAttr, boolean logRetryCache)
+      throws IOException {
+    nnConf.checkXAttrsConfigFlag();
+    HdfsFileStatus resultingStat = null;
+    FSPermissionChecker pc = getPermissionChecker();
+      XAttrPermissionFilter.checkPermissionForApi(pc, xAttr);
     checkOperation(OperationCategory.WRITE);
     checkOperation(OperationCategory.WRITE);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
     writeLock();
     writeLock();
@@ -8371,12 +8433,12 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       xAttrs.add(xAttr);
       xAttrs.add(xAttr);
       List<XAttr> removedXAttrs = dir.removeXAttrs(src, xAttrs);
       List<XAttr> removedXAttrs = dir.removeXAttrs(src, xAttrs);
       if (removedXAttrs != null && !removedXAttrs.isEmpty()) {
       if (removedXAttrs != null && !removedXAttrs.isEmpty()) {
-        getEditLog().logRemoveXAttrs(src, removedXAttrs);
+        getEditLog().logRemoveXAttrs(src, removedXAttrs, logRetryCache);
+      } else {
+        throw new IOException(
+            "No matching attributes found for remove operation");
       }
       }
       resultingStat = getAuditFileInfo(src, false);
       resultingStat = getAuditFileInfo(src, false);
-    } catch (AccessControlException e) {
-      logAuditEvent(false, "removeXAttr", src);
-      throw e;
     } finally {
     } finally {
       writeUnlock();
       writeUnlock();
     }
     }

+ 28 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java

@@ -71,6 +71,8 @@ public class FileJournalManager implements JournalManager {
     NameNodeFile.EDITS.getName() + "_(\\d+)-(\\d+)");
     NameNodeFile.EDITS.getName() + "_(\\d+)-(\\d+)");
   private static final Pattern EDITS_INPROGRESS_REGEX = Pattern.compile(
   private static final Pattern EDITS_INPROGRESS_REGEX = Pattern.compile(
     NameNodeFile.EDITS_INPROGRESS.getName() + "_(\\d+)");
     NameNodeFile.EDITS_INPROGRESS.getName() + "_(\\d+)");
+  private static final Pattern EDITS_INPROGRESS_STALE_REGEX = Pattern.compile(
+      NameNodeFile.EDITS_INPROGRESS.getName() + "_(\\d+).*(\\S+)");
 
 
   private File currentInProgress = null;
   private File currentInProgress = null;
 
 
@@ -162,8 +164,7 @@ public class FileJournalManager implements JournalManager {
       throws IOException {
       throws IOException {
     LOG.info("Purging logs older than " + minTxIdToKeep);
     LOG.info("Purging logs older than " + minTxIdToKeep);
     File[] files = FileUtil.listFiles(sd.getCurrentDir());
     File[] files = FileUtil.listFiles(sd.getCurrentDir());
-    List<EditLogFile> editLogs = 
-      FileJournalManager.matchEditLogs(files);
+    List<EditLogFile> editLogs = matchEditLogs(files, true);
     for (EditLogFile log : editLogs) {
     for (EditLogFile log : editLogs) {
       if (log.getFirstTxId() < minTxIdToKeep &&
       if (log.getFirstTxId() < minTxIdToKeep &&
           log.getLastTxId() < minTxIdToKeep) {
           log.getLastTxId() < minTxIdToKeep) {
@@ -244,8 +245,13 @@ public class FileJournalManager implements JournalManager {
   public static List<EditLogFile> matchEditLogs(File logDir) throws IOException {
   public static List<EditLogFile> matchEditLogs(File logDir) throws IOException {
     return matchEditLogs(FileUtil.listFiles(logDir));
     return matchEditLogs(FileUtil.listFiles(logDir));
   }
   }
-  
+
   static List<EditLogFile> matchEditLogs(File[] filesInStorage) {
   static List<EditLogFile> matchEditLogs(File[] filesInStorage) {
+    return matchEditLogs(filesInStorage, false);
+  }
+
+  private static List<EditLogFile> matchEditLogs(File[] filesInStorage,
+      boolean forPurging) {
     List<EditLogFile> ret = Lists.newArrayList();
     List<EditLogFile> ret = Lists.newArrayList();
     for (File f : filesInStorage) {
     for (File f : filesInStorage) {
       String name = f.getName();
       String name = f.getName();
@@ -256,6 +262,7 @@ public class FileJournalManager implements JournalManager {
           long startTxId = Long.parseLong(editsMatch.group(1));
           long startTxId = Long.parseLong(editsMatch.group(1));
           long endTxId = Long.parseLong(editsMatch.group(2));
           long endTxId = Long.parseLong(editsMatch.group(2));
           ret.add(new EditLogFile(f, startTxId, endTxId));
           ret.add(new EditLogFile(f, startTxId, endTxId));
+          continue;
         } catch (NumberFormatException nfe) {
         } catch (NumberFormatException nfe) {
           LOG.error("Edits file " + f + " has improperly formatted " +
           LOG.error("Edits file " + f + " has improperly formatted " +
                     "transaction ID");
                     "transaction ID");
@@ -270,12 +277,30 @@ public class FileJournalManager implements JournalManager {
           long startTxId = Long.parseLong(inProgressEditsMatch.group(1));
           long startTxId = Long.parseLong(inProgressEditsMatch.group(1));
           ret.add(
           ret.add(
               new EditLogFile(f, startTxId, HdfsConstants.INVALID_TXID, true));
               new EditLogFile(f, startTxId, HdfsConstants.INVALID_TXID, true));
+          continue;
         } catch (NumberFormatException nfe) {
         } catch (NumberFormatException nfe) {
           LOG.error("In-progress edits file " + f + " has improperly " +
           LOG.error("In-progress edits file " + f + " has improperly " +
                     "formatted transaction ID");
                     "formatted transaction ID");
           // skip
           // skip
         }
         }
       }
       }
+      if (forPurging) {
+        // Check for in-progress stale edits
+        Matcher staleInprogressEditsMatch = EDITS_INPROGRESS_STALE_REGEX
+            .matcher(name);
+        if (staleInprogressEditsMatch.matches()) {
+          try {
+            long startTxId = Long.valueOf(staleInprogressEditsMatch.group(1));
+            ret.add(new EditLogFile(f, startTxId, HdfsConstants.INVALID_TXID,
+                true));
+            continue;
+          } catch (NumberFormatException nfe) {
+            LOG.error("In-progress stale edits file " + f + " has improperly "
+                + "formatted transaction ID");
+            // skip
+          }
+        }
+      }
     }
     }
     return ret;
     return ret;
   }
   }

+ 14 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

@@ -115,6 +115,7 @@ import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
 import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
 import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
@@ -830,11 +831,23 @@ class NameNodeRpcServer implements NamenodeProtocols {
   throws IOException {
   throws IOException {
     DatanodeInfo results[] = namesystem.datanodeReport(type);
     DatanodeInfo results[] = namesystem.datanodeReport(type);
     if (results == null ) {
     if (results == null ) {
-      throw new IOException("Cannot find datanode report");
+      throw new IOException("Failed to get datanode report for " + type
+          + " datanodes.");
     }
     }
     return results;
     return results;
   }
   }
     
     
+  @Override // ClientProtocol
+  public DatanodeStorageReport[] getDatanodeStorageReport(
+      DatanodeReportType type) throws IOException {
+    final DatanodeStorageReport[] reports = namesystem.getDatanodeStorageReport(type);
+    if (reports == null ) {
+      throw new IOException("Failed to get datanode storage report for " + type
+          + " datanodes.");
+    }
+    return reports;
+  }
+
   @Override // ClientProtocol
   @Override // ClientProtocol
   public boolean setSafeMode(SafeModeAction action, boolean isChecked)
   public boolean setSafeMode(SafeModeAction action, boolean isChecked)
       throws IOException {
       throws IOException {

+ 15 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java

@@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Lists;
+import com.google.common.base.Preconditions;
 
 
 /**
 /**
  * There are four types of extended attributes <XAttr> defined by the
  * There are four types of extended attributes <XAttr> defined by the
@@ -60,8 +61,20 @@ public class XAttrPermissionFilter {
     throw new AccessControlException("User doesn't have permission for xattr: "
     throw new AccessControlException("User doesn't have permission for xattr: "
         + XAttrHelper.getPrefixName(xAttr));
         + XAttrHelper.getPrefixName(xAttr));
   }
   }
-  
-  static List<XAttr> filterXAttrsForApi(FSPermissionChecker pc, 
+
+  static void checkPermissionForApi(FSPermissionChecker pc,
+                                    List<XAttr> xAttrs) throws AccessControlException {
+    Preconditions.checkArgument(xAttrs != null);
+    if (xAttrs.isEmpty()) {
+      return;
+    }
+
+    for (XAttr xAttr : xAttrs) {
+      checkPermissionForApi(pc, xAttr);
+    }
+  }
+
+  static List<XAttr> filterXAttrsForApi(FSPermissionChecker pc,
       List<XAttr> xAttrs) {
       List<XAttr> xAttrs) {
     assert xAttrs != null : "xAttrs can not be null";
     assert xAttrs != null : "xAttrs can not be null";
     if (xAttrs == null || xAttrs.isEmpty()) {
     if (xAttrs == null || xAttrs.isEmpty()) {

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java

@@ -111,6 +111,7 @@ import org.apache.hadoop.hdfs.web.resources.XAttrNameParam;
 import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam;
 import org.apache.hadoop.hdfs.web.resources.XAttrSetFlagParam;
 import org.apache.hadoop.hdfs.web.resources.XAttrValueParam;
 import org.apache.hadoop.hdfs.web.resources.XAttrValueParam;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
 import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.Node;
@@ -188,7 +189,7 @@ public class NamenodeWebHdfsMethods {
       throws IOException {
       throws IOException {
      final NamenodeProtocols np = namenode.getRpcServer();
      final NamenodeProtocols np = namenode.getRpcServer();
      if (np == null) {
      if (np == null) {
-       throw new IOException("Namenode is in startup mode");
+       throw new RetriableException("Namenode is in startup mode");
      }
      }
      return np;
      return np;
   }
   }

+ 42 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeStorageReport.java

@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocol;
+
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+
+/**
+ * Class captures information of a datanode and its storages.
+ */
+public class DatanodeStorageReport {
+  final DatanodeInfo datanodeInfo;
+  final StorageReport[] storageReports;
+
+  public DatanodeStorageReport(DatanodeInfo datanodeInfo,
+      StorageReport[] storageReports) {
+    this.datanodeInfo = datanodeInfo;
+    this.storageReports = storageReports;
+  }
+
+  public DatanodeInfo getDatanodeInfo() {
+    return datanodeInfo;
+  }
+
+  public StorageReport[] getStorageReports() {
+    return storageReports;
+  }
+}

+ 13 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java

@@ -32,11 +32,16 @@ import com.google.common.base.Preconditions;
  * DfsClientShm is a subclass of ShortCircuitShm which is used by the
  * DfsClientShm is a subclass of ShortCircuitShm which is used by the
  * DfsClient.
  * DfsClient.
  * When the UNIX domain socket associated with this shared memory segment
  * When the UNIX domain socket associated with this shared memory segment
- * closes unexpectedly, we mark the slots inside this segment as stale.
- * ShortCircuitReplica objects that contain stale slots are themselves stale,
+ * closes unexpectedly, we mark the slots inside this segment as disconnected.
+ * ShortCircuitReplica objects that contain disconnected slots are stale,
  * and will not be used to service new reads or mmap operations.
  * and will not be used to service new reads or mmap operations.
  * However, in-progress read or mmap operations will continue to proceed.
  * However, in-progress read or mmap operations will continue to proceed.
  * Once the last slot is deallocated, the segment can be safely munmapped.
  * Once the last slot is deallocated, the segment can be safely munmapped.
+ *
+ * Slots may also become stale because the associated replica has been deleted
+ * on the DataNode.  In this case, the DataNode will clear the 'valid' bit.
+ * The client will then see these slots as stale (see
+ * #{ShortCircuitReplica#isStale}).
  */
  */
 public class DfsClientShm extends ShortCircuitShm
 public class DfsClientShm extends ShortCircuitShm
     implements DomainSocketWatcher.Handler {
     implements DomainSocketWatcher.Handler {
@@ -58,7 +63,7 @@ public class DfsClientShm extends ShortCircuitShm
    *
    *
    * {@link DfsClientShm#handle} sets this to true.
    * {@link DfsClientShm#handle} sets this to true.
    */
    */
-  private boolean stale = false;
+  private boolean disconnected = false;
 
 
   DfsClientShm(ShmId shmId, FileInputStream stream, EndpointShmManager manager,
   DfsClientShm(ShmId shmId, FileInputStream stream, EndpointShmManager manager,
       DomainPeer peer) throws IOException {
       DomainPeer peer) throws IOException {
@@ -76,14 +81,14 @@ public class DfsClientShm extends ShortCircuitShm
   }
   }
 
 
   /**
   /**
-   * Determine if the shared memory segment is stale.
+   * Determine if the shared memory segment is disconnected from the DataNode.
    *
    *
    * This must be called with the DfsClientShmManager lock held.
    * This must be called with the DfsClientShmManager lock held.
    *
    *
    * @return   True if the shared memory segment is stale.
    * @return   True if the shared memory segment is stale.
    */
    */
-  public synchronized boolean isStale() {
-    return stale;
+  public synchronized boolean isDisconnected() {
+    return disconnected;
   }
   }
 
 
   /**
   /**
@@ -97,8 +102,8 @@ public class DfsClientShm extends ShortCircuitShm
   public boolean handle(DomainSocket sock) {
   public boolean handle(DomainSocket sock) {
     manager.unregisterShm(getShmId());
     manager.unregisterShm(getShmId());
     synchronized (this) {
     synchronized (this) {
-      Preconditions.checkState(!stale);
-      stale = true;
+      Preconditions.checkState(!disconnected);
+      disconnected = true;
       boolean hadSlots = false;
       boolean hadSlots = false;
       for (Iterator<Slot> iter = slotIterator(); iter.hasNext(); ) {
       for (Iterator<Slot> iter = slotIterator(); iter.hasNext(); ) {
         Slot slot = iter.next();
         Slot slot = iter.next();

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java

@@ -271,12 +271,12 @@ public class DfsClientShmManager implements Closeable {
             loading = false;
             loading = false;
             finishedLoading.signalAll();
             finishedLoading.signalAll();
           }
           }
-          if (shm.isStale()) {
+          if (shm.isDisconnected()) {
             // If the peer closed immediately after the shared memory segment
             // If the peer closed immediately after the shared memory segment
             // was created, the DomainSocketWatcher callback might already have
             // was created, the DomainSocketWatcher callback might already have
-            // fired and marked the shm as stale.  In this case, we obviously
-            // don't want to add the SharedMemorySegment to our list of valid
-            // not-full segments.
+            // fired and marked the shm as disconnected.  In this case, we
+            // obviously don't want to add the SharedMemorySegment to our list
+            // of valid not-full segments.
             if (LOG.isDebugEnabled()) {
             if (LOG.isDebugEnabled()) {
               LOG.debug(this + ": the UNIX domain socket associated with " +
               LOG.debug(this + ": the UNIX domain socket associated with " +
                   "this short-circuit memory closed before we could make " +
                   "this short-circuit memory closed before we could make " +
@@ -299,7 +299,7 @@ public class DfsClientShmManager implements Closeable {
     void freeSlot(Slot slot) {
     void freeSlot(Slot slot) {
       DfsClientShm shm = (DfsClientShm)slot.getShm();
       DfsClientShm shm = (DfsClientShm)slot.getShm();
       shm.unregisterSlot(slot.getSlotIdx());
       shm.unregisterSlot(slot.getSlotIdx());
-      if (shm.isStale()) {
+      if (shm.isDisconnected()) {
         // Stale shared memory segments should not be tracked here.
         // Stale shared memory segments should not be tracked here.
         Preconditions.checkState(!full.containsKey(shm.getShmId()));
         Preconditions.checkState(!full.containsKey(shm.getShmId()));
         Preconditions.checkState(!notFull.containsKey(shm.getShmId()));
         Preconditions.checkState(!notFull.containsKey(shm.getShmId()));

+ 9 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java

@@ -306,6 +306,13 @@ public class ShortCircuitShm {
           (slotAddress - baseAddress) / BYTES_PER_SLOT);
           (slotAddress - baseAddress) / BYTES_PER_SLOT);
     }
     }
 
 
+    /**
+     * Clear the slot.
+     */
+    void clear() {
+      unsafe.putLongVolatile(null, this.slotAddress, 0);
+    }
+
     private boolean isSet(long flag) {
     private boolean isSet(long flag) {
       long prev = unsafe.getLongVolatile(null, this.slotAddress);
       long prev = unsafe.getLongVolatile(null, this.slotAddress);
       return (prev & flag) != 0;
       return (prev & flag) != 0;
@@ -535,6 +542,7 @@ public class ShortCircuitShm {
     }
     }
     allocatedSlots.set(idx, true);
     allocatedSlots.set(idx, true);
     Slot slot = new Slot(calculateSlotAddress(idx), blockId);
     Slot slot = new Slot(calculateSlotAddress(idx), blockId);
+    slot.clear();
     slot.makeValid();
     slot.makeValid();
     slots[idx] = slot;
     slots[idx] = slot;
     if (LOG.isTraceEnabled()) {
     if (LOG.isTraceEnabled()) {
@@ -583,7 +591,7 @@ public class ShortCircuitShm {
     Slot slot = new Slot(calculateSlotAddress(slotIdx), blockId);
     Slot slot = new Slot(calculateSlotAddress(slotIdx), blockId);
     if (!slot.isValid()) {
     if (!slot.isValid()) {
       throw new InvalidRequestException(this + ": slot " + slotIdx +
       throw new InvalidRequestException(this + ": slot " + slotIdx +
-          " has not been allocated.");
+          " is not marked as valid.");
     }
     }
     slots[slotIdx] = slot;
     slots[slotIdx] = slot;
     allocatedSlots.set(slotIdx, true);
     allocatedSlots.set(slotIdx, true);

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/XAttrNameParam.java

@@ -25,8 +25,8 @@ public class XAttrNameParam extends StringParam {
   /** Default parameter value. **/
   /** Default parameter value. **/
   public static final String DEFAULT = "";
   public static final String DEFAULT = "";
   
   
-  private static Domain DOMAIN = new Domain(NAME, 
-      Pattern.compile("^(user\\.|trusted\\.|system\\.|security\\.).+"));
+  private static Domain DOMAIN = new Domain(NAME,
+      Pattern.compile(".*"));
   
   
   public XAttrNameParam(final String str) {
   public XAttrNameParam(final String str) {
     super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str);
     super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str);

+ 15 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto

@@ -281,6 +281,19 @@ message GetDatanodeReportResponseProto {
   repeated DatanodeInfoProto di = 1;
   repeated DatanodeInfoProto di = 1;
 }
 }
 
 
+message GetDatanodeStorageReportRequestProto {
+  required DatanodeReportTypeProto type = 1;
+}
+
+message DatanodeStorageReportProto {
+  required DatanodeInfoProto datanodeInfo = 1;
+  repeated StorageReportProto storageReports = 2;
+}
+
+message GetDatanodeStorageReportResponseProto {
+  repeated DatanodeStorageReportProto datanodeStorageReports = 1;
+}
+
 message GetPreferredBlockSizeRequestProto {
 message GetPreferredBlockSizeRequestProto {
   required string filename = 1;
   required string filename = 1;
 }
 }
@@ -672,6 +685,8 @@ service ClientNamenodeProtocol {
   rpc getFsStats(GetFsStatusRequestProto) returns(GetFsStatsResponseProto);
   rpc getFsStats(GetFsStatusRequestProto) returns(GetFsStatsResponseProto);
   rpc getDatanodeReport(GetDatanodeReportRequestProto)
   rpc getDatanodeReport(GetDatanodeReportRequestProto)
       returns(GetDatanodeReportResponseProto);
       returns(GetDatanodeReportResponseProto);
+  rpc getDatanodeStorageReport(GetDatanodeStorageReportRequestProto)
+      returns(GetDatanodeStorageReportResponseProto);
   rpc getPreferredBlockSize(GetPreferredBlockSizeRequestProto)
   rpc getPreferredBlockSize(GetPreferredBlockSizeRequestProto)
       returns(GetPreferredBlockSizeResponseProto);
       returns(GetPreferredBlockSizeResponseProto);
   rpc setSafeMode(SetSafeModeRequestProto)
   rpc setSafeMode(SetSafeModeRequestProto)

+ 0 - 24
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto

@@ -44,20 +44,6 @@ message DatanodeRegistrationProto {
   required string softwareVersion = 4;        // Software version of the DN, e.g. "2.0.0"
   required string softwareVersion = 4;        // Software version of the DN, e.g. "2.0.0"
 }
 }
 
 
-/**
- * Represents a storage available on the datanode
- */
-message DatanodeStorageProto {
-  enum StorageState {
-    NORMAL = 0;
-    READ_ONLY_SHARED = 1;
-  }
-
-  required string storageUuid = 1;
-  optional StorageState state = 2 [default = NORMAL];
-  optional StorageTypeProto storageType = 3 [default = DISK];
-}
-
 /**
 /**
  * Commands sent from namenode to the datanodes
  * Commands sent from namenode to the datanodes
  */
  */
@@ -196,16 +182,6 @@ message HeartbeatRequestProto {
   optional uint64 cacheUsed = 7 [default = 0 ];
   optional uint64 cacheUsed = 7 [default = 0 ];
 }
 }
 
 
-message StorageReportProto {
-  required string storageUuid = 1 [ deprecated = true ];
-  optional bool failed = 2 [ default = false ];
-  optional uint64 capacity = 3 [ default = 0 ];
-  optional uint64 dfsUsed = 4 [ default = 0 ];
-  optional uint64 remaining = 5 [ default = 0 ];
-  optional uint64 blockPoolUsed = 6 [ default = 0 ];
-  optional DatanodeStorageProto storage = 7; // supersedes StorageUuid
-}
-
 /**
 /**
  * state - State the NN is in when returning response to the DN
  * state - State the NN is in when returning response to the DN
  * txid - Highest transaction ID this NN has seen
  * txid - Highest transaction ID this NN has seen

+ 24 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto

@@ -99,6 +99,30 @@ message DatanodeInfoProto {
   optional uint64 cacheUsed = 12 [default = 0];
   optional uint64 cacheUsed = 12 [default = 0];
 }
 }
 
 
+/**
+ * Represents a storage available on the datanode
+ */
+message DatanodeStorageProto {
+  enum StorageState {
+    NORMAL = 0;
+    READ_ONLY_SHARED = 1;
+  }
+
+  required string storageUuid = 1;
+  optional StorageState state = 2 [default = NORMAL];
+  optional StorageTypeProto storageType = 3 [default = DISK];
+}
+
+message StorageReportProto {
+  required string storageUuid = 1 [ deprecated = true ];
+  optional bool failed = 2 [ default = false ];
+  optional uint64 capacity = 3 [ default = 0 ];
+  optional uint64 dfsUsed = 4 [ default = 0 ];
+  optional uint64 remaining = 5 [ default = 0 ];
+  optional uint64 blockPoolUsed = 6 [ default = 0 ];
+  optional DatanodeStorageProto storage = 7; // supersedes StorageUuid
+}
+
 /**
 /**
  * Summary of a file or directory
  * Summary of a file or directory
  */
  */

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html

@@ -66,7 +66,6 @@
 <div class="row">
 <div class="row">
   <hr />
   <hr />
   <div class="col-xs-2"><p>Hadoop, 2014.</p></div>
   <div class="col-xs-2"><p>Hadoop, 2014.</p></div>
-  <div class="col-xs-1 pull-right"><a style="color: #ddd" href="dfshealth.jsp">Legacy UI</a></div>
 </div>
 </div>
 </div>
 </div>
 
 
@@ -283,7 +282,7 @@
   <tr class="danger">
   <tr class="danger">
     <td>{name} ({xferaddr})</td>
     <td>{name} ({xferaddr})</td>
     <td>{lastContact}</td>
     <td>{lastContact}</td>
-    <td>Dead{?decomissioned}, Decomissioned{/decomissioned}</td>
+    <td>Dead{?decommissioned}, Decommissioned{/decommissioned}</td>
     <td>-</td>
     <td>-</td>
     <td>-</td>
     <td>-</td>
     <td>-</td>
     <td>-</td>

+ 1 - 12
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html

@@ -18,18 +18,7 @@
     "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
     "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
 <html xmlns="http://www.w3.org/1999/xhtml">
 <html xmlns="http://www.w3.org/1999/xhtml">
 <head>
 <head>
-<meta http-equiv="REFRESH" content="1;url=dfshealth.jsp" />
+<meta http-equiv="REFRESH" content="0;url=dfshealth.html" />
 <title>Hadoop Administration</title>
 <title>Hadoop Administration</title>
 </head>
 </head>
-<body>
-<script type="text/javascript">
-//<![CDATA[
-window.location.href='dfshealth.html';
-//]]>
-</script>
-<h1>Hadoop Administration</h1>
-<ul>
-<li><a href="dfshealth.jsp">DFS Health/Status</a></li>
-</ul>
-</body>
 </html>
 </html>

+ 0 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/index.html

@@ -21,15 +21,4 @@
   <meta http-equiv="REFRESH" content="0;url=status.html" />
   <meta http-equiv="REFRESH" content="0;url=status.html" />
   <title>Hadoop Administration</title>
   <title>Hadoop Administration</title>
 </head>
 </head>
-<body>
-<script type="text/javascript">
-//<![CDATA[
-window.location.href='status.html';
-//]]>
-</script>
-<h1>Hadoop Administration</h1>
-<ul>
-  <li><a href="status.jsp">Status</a></li>
-</ul>
-</body>
 </html>
 </html>

+ 21 - 39
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm

@@ -44,10 +44,13 @@ HDFS NFS Gateway
 
 
 * {Configuration}
 * {Configuration}
 
 
-   The user running the NFS-gateway must be able to proxy all the users using the NFS mounts. 
-   For instance, if user 'nfsserver' is running the gateway, and users belonging to the groups 'nfs-users1'
-   and 'nfs-users2' use the NFS mounts, then in core-site.xml of the namenode, the following must be set
-   (NOTE: replace 'nfsserver' with the user name starting the gateway in your cluster):
+   The NFS-gateway uses proxy user to proxy all the users accessing the NFS mounts. 
+   In non-secure mode, the user running the gateway is the proxy user, while in secure mode the
+   user in Kerberos keytab is the proxy user. Suppose the proxy user is 'nfsserver'
+   and users belonging to the groups 'nfs-users1'
+   and 'nfs-users2' use the NFS mounts, then in core-site.xml of the NameNode, the following
+   two properities must be set and only NameNode needs restart after the configuration change
+   (NOTE: replace the string 'nfsserver' with the proxy user name in your cluster):
 
 
 ----
 ----
 <property>
 <property>
@@ -72,7 +75,9 @@ HDFS NFS Gateway
 ----
 ----
 
 
    The above are the only required configuration for the NFS gateway in non-secure mode. For Kerberized
    The above are the only required configuration for the NFS gateway in non-secure mode. For Kerberized
-   hadoop clusters, the following configurations need to be added to hdfs-site.xml:
+   hadoop clusters, the following configurations need to be added to hdfs-site.xml for the gateway (NOTE: replace 
+   string "nfsserver" with the proxy user name and ensure the user contained in the keytab is
+   also the same proxy user):
 
 
 ----
 ----
   <property>
   <property>
@@ -87,6 +92,8 @@ HDFS NFS Gateway
     <value>nfsserver/_HOST@YOUR-REALM.COM</value>
     <value>nfsserver/_HOST@YOUR-REALM.COM</value>
   </property>
   </property>
 ----
 ----
+  
+   The rest of the NFS gateway configurations are optional for both secure and non-secure mode.
 
 
    The AIX NFS client has a {{{https://issues.apache.org/jira/browse/HDFS-6549}few known issues}}
    The AIX NFS client has a {{{https://issues.apache.org/jira/browse/HDFS-6549}few known issues}}
    that prevent it from working correctly by default with the HDFS NFS
    that prevent it from working correctly by default with the HDFS NFS
@@ -108,7 +115,7 @@ HDFS NFS Gateway
    have been committed.
    have been committed.
 
 
    It's strongly recommended for the users to update a few configuration properties based on their use
    It's strongly recommended for the users to update a few configuration properties based on their use
-   cases. All the related configuration properties can be added or updated in hdfs-site.xml.
+   cases. All the following configuration properties can be added or updated in hdfs-site.xml.
   
   
    * If the client mounts the export with access time update allowed, make sure the following 
    * If the client mounts the export with access time update allowed, make sure the following 
     property is not disabled in the configuration file. Only NameNode needs to restart after 
     property is not disabled in the configuration file. Only NameNode needs to restart after 
@@ -145,36 +152,6 @@ HDFS NFS Gateway
   </property>
   </property>
 ---- 
 ---- 
 
 
-   * For optimal performance, it is recommended that rtmax be updated to
-     1MB. However, note that this 1MB is a per client allocation, and not
-     from a shared memory pool, and therefore a larger value may adversely 
-     affect small reads, consuming a lot of memory. The maximum value of 
-     this property is 1MB.
-
-----
-<property>
-  <name>nfs.rtmax</name>
-  <value>1048576</value>
-  <description>This is the maximum size in bytes of a READ request
-    supported by the NFS gateway. If you change this, make sure you
-    also update the nfs mount's rsize(add rsize= # of bytes to the 
-    mount directive).
-  </description>
-</property>
-----
-
-----
-<property>
-  <name>nfs.wtmax</name>
-  <value>65536</value>
-  <description>This is the maximum size in bytes of a WRITE request
-    supported by the NFS gateway. If you change this, make sure you
-    also update the nfs mount's wsize(add wsize= # of bytes to the 
-    mount directive).
-  </description>
-</property>
-----
-
   * By default, the export can be mounted by any client. To better control the access,
   * By default, the export can be mounted by any client. To better control the access,
     users can update the following property. The value string contains machine name and
     users can update the following property. The value string contains machine name and
     access privilege, separated by whitespace
     access privilege, separated by whitespace
@@ -238,8 +215,10 @@ HDFS NFS Gateway
 
 
    [[3]] Start mountd and nfsd.
    [[3]] Start mountd and nfsd.
    
    
-     No root privileges are required for this command. However, ensure that the user starting
-     the Hadoop cluster and the user starting the NFS gateway are same.
+     No root privileges are required for this command. In non-secure mode, the NFS gateway
+     should be started by the proxy user mentioned at the beginning of this user guide. 
+     While in secure mode, any user can start NFS gateway 
+     as long as the user has read access to the Kerberos keytab defined in "nfs.keytab.file".
 
 
 -------------------------
 -------------------------
      hadoop nfs3
      hadoop nfs3
@@ -339,7 +318,10 @@ HDFS NFS Gateway
 -------------------------------------------------------------------
 -------------------------------------------------------------------
 
 
   Then the users can access HDFS as part of the local file system except that, 
   Then the users can access HDFS as part of the local file system except that, 
-  hard link and random write are not supported yet.
+  hard link and random write are not supported yet. To optimize the performance
+  of large file I/O, one can increase the NFS transfer size(rsize and wsize) during mount.
+  By default, NFS gateway supports 1MB as the maximum transfer size. For larger data
+  transfer size, one needs to update "nfs.rtmax" and "nfs.rtmax" in hdfs-site.xml.
 
 
 * {Allow mounts from unprivileged clients}
 * {Allow mounts from unprivileged clients}
 
 

+ 69 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java

@@ -2653,6 +2653,75 @@ public class TestDFSShell {
     }
     }
   }
   }
 
 
+  /*
+   * 1. Test that CLI throws an exception and returns non-0 when user does
+   * not have permission to read an xattr.
+   * 2. Test that CLI throws an exception and returns non-0 when a non-existent
+   * xattr is requested.
+   */
+  @Test (timeout = 120000)
+  public void testGetFAttrErrors() throws Exception {
+    final UserGroupInformation user = UserGroupInformation.
+        createUserForTesting("user", new String[] {"mygroup"});
+    MiniDFSCluster cluster = null;
+    PrintStream bakErr = null;
+    try {
+      final Configuration conf = new HdfsConfiguration();
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+      cluster.waitActive();
+
+      final FileSystem fs = cluster.getFileSystem();
+      final Path p = new Path("/foo");
+      fs.mkdirs(p);
+      bakErr = System.err;
+
+      final FsShell fshell = new FsShell(conf);
+      final ByteArrayOutputStream out = new ByteArrayOutputStream();
+      System.setErr(new PrintStream(out));
+
+      // No permission for "other".
+      fs.setPermission(p, new FsPermission((short) 0700));
+
+      {
+        final int ret = ToolRunner.run(fshell, new String[] {
+            "-setfattr", "-n", "user.a1", "-v", "1234", "/foo"});
+        assertEquals("Returned should be 0", 0, ret);
+        out.reset();
+      }
+
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            int ret = ToolRunner.run(fshell, new String[] {
+                "-getfattr", "-n", "user.a1", "/foo"});
+            String str = out.toString();
+            assertTrue("xattr value was incorrectly returned",
+                str.indexOf("1234") == -1);
+            out.reset();
+            return null;
+          }
+        });
+
+      {
+        final int ret = ToolRunner.run(fshell, new String[]{
+            "-getfattr", "-n", "user.nonexistent", "/foo"});
+        String str = out.toString();
+        assertTrue("xattr value was incorrectly returned",
+          str.indexOf(
+              "getfattr: At least one of the attributes provided was not found")
+               >= 0);
+        out.reset();
+      }
+    } finally {
+      if (bakErr != null) {
+        System.setErr(bakErr);
+      }
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
   /**
   /**
    * Test that the server trash configuration is respected when
    * Test that the server trash configuration is respected when
    * the client configuration is not set.
    * the client configuration is not set.

+ 137 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java

@@ -70,6 +70,9 @@ public class TestDFSUpgradeFromImage {
   private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt";
   private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt";
   private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz";
   private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz";
   private static final String HADOOP1_BBW_IMAGE = "hadoop1-bbw.tgz";
   private static final String HADOOP1_BBW_IMAGE = "hadoop1-bbw.tgz";
+  private static final String HADOOP1_RESERVED_IMAGE = "hadoop-1-reserved.tgz";
+  private static final String HADOOP023_RESERVED_IMAGE =
+      "hadoop-0.23-reserved.tgz";
   private static final String HADOOP2_RESERVED_IMAGE = "hadoop-2-reserved.tgz";
   private static final String HADOOP2_RESERVED_IMAGE = "hadoop-2-reserved.tgz";
 
 
   private static class ReferenceFileInfo {
   private static class ReferenceFileInfo {
@@ -325,6 +328,140 @@ public class TestDFSUpgradeFromImage {
     }
     }
   }
   }
 
 
+  /**
+   * Test upgrade from a branch-1.2 image with reserved paths
+   */
+  @Test
+  public void testUpgradeFromRel1ReservedImage() throws Exception {
+    unpackStorage(HADOOP1_RESERVED_IMAGE);
+    MiniDFSCluster cluster = null;
+    // Try it once without setting the upgrade flag to ensure it fails
+    final Configuration conf = new Configuration();
+    // Try it again with a custom rename string
+    try {
+      FSImageFormat.setRenameReservedPairs(
+          ".snapshot=.user-snapshot," +
+              ".reserved=.my-reserved");
+      cluster =
+          new MiniDFSCluster.Builder(conf)
+              .format(false)
+              .startupOption(StartupOption.UPGRADE)
+              .numDataNodes(0).build();
+      DistributedFileSystem dfs = cluster.getFileSystem();
+      // Make sure the paths were renamed as expected
+      // Also check that paths are present after a restart, checks that the
+      // upgraded fsimage has the same state.
+      final String[] expected = new String[] {
+          "/.my-reserved",
+          "/.user-snapshot",
+          "/.user-snapshot/.user-snapshot",
+          "/.user-snapshot/open",
+          "/dir1",
+          "/dir1/.user-snapshot",
+          "/dir2",
+          "/dir2/.user-snapshot",
+          "/user",
+          "/user/andrew",
+          "/user/andrew/.user-snapshot",
+      };
+      for (int i=0; i<2; i++) {
+        // Restart the second time through this loop
+        if (i==1) {
+          cluster.finalizeCluster(conf);
+          cluster.restartNameNode(true);
+        }
+        ArrayList<Path> toList = new ArrayList<Path>();
+        toList.add(new Path("/"));
+        ArrayList<String> found = new ArrayList<String>();
+        while (!toList.isEmpty()) {
+          Path p = toList.remove(0);
+          FileStatus[] statuses = dfs.listStatus(p);
+          for (FileStatus status: statuses) {
+            final String path = status.getPath().toUri().getPath();
+            System.out.println("Found path " + path);
+            found.add(path);
+            if (status.isDirectory()) {
+              toList.add(status.getPath());
+            }
+          }
+        }
+        for (String s: expected) {
+          assertTrue("Did not find expected path " + s, found.contains(s));
+        }
+        assertEquals("Found an unexpected path while listing filesystem",
+            found.size(), expected.length);
+      }
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  /**
+   * Test upgrade from a 0.23.11 image with reserved paths
+   */
+  @Test
+  public void testUpgradeFromRel023ReservedImage() throws Exception {
+    unpackStorage(HADOOP023_RESERVED_IMAGE);
+    MiniDFSCluster cluster = null;
+    // Try it once without setting the upgrade flag to ensure it fails
+    final Configuration conf = new Configuration();
+    // Try it again with a custom rename string
+    try {
+      FSImageFormat.setRenameReservedPairs(
+          ".snapshot=.user-snapshot," +
+              ".reserved=.my-reserved");
+      cluster =
+          new MiniDFSCluster.Builder(conf)
+              .format(false)
+              .startupOption(StartupOption.UPGRADE)
+              .numDataNodes(0).build();
+      DistributedFileSystem dfs = cluster.getFileSystem();
+      // Make sure the paths were renamed as expected
+      // Also check that paths are present after a restart, checks that the
+      // upgraded fsimage has the same state.
+      final String[] expected = new String[] {
+          "/.user-snapshot",
+          "/dir1",
+          "/dir1/.user-snapshot",
+          "/dir2",
+          "/dir2/.user-snapshot"
+      };
+      for (int i=0; i<2; i++) {
+        // Restart the second time through this loop
+        if (i==1) {
+          cluster.finalizeCluster(conf);
+          cluster.restartNameNode(true);
+        }
+        ArrayList<Path> toList = new ArrayList<Path>();
+        toList.add(new Path("/"));
+        ArrayList<String> found = new ArrayList<String>();
+        while (!toList.isEmpty()) {
+          Path p = toList.remove(0);
+          FileStatus[] statuses = dfs.listStatus(p);
+          for (FileStatus status: statuses) {
+            final String path = status.getPath().toUri().getPath();
+            System.out.println("Found path " + path);
+            found.add(path);
+            if (status.isDirectory()) {
+              toList.add(status.getPath());
+            }
+          }
+        }
+        for (String s: expected) {
+          assertTrue("Did not find expected path " + s, found.contains(s));
+        }
+        assertEquals("Found an unexpected path while listing filesystem",
+            found.size(), expected.length);
+      }
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
   /**
   /**
    * Test upgrade from 2.0 image with a variety of .snapshot and .reserved
    * Test upgrade from 2.0 image with a variety of .snapshot and .reserved
    * paths to test renaming on upgrade
    * paths to test renaming on upgrade

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java

@@ -53,6 +53,8 @@ public class TestDatanodeConfig {
     Configuration conf = new HdfsConfiguration();
     Configuration conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY, 0);
     conf.setInt(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY, 0);
     conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "localhost:0");
     conf.set(DFSConfigKeys.DFS_DATANODE_ADDRESS_KEY, "localhost:0");
+    conf.set(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, "localhost:0");
+    conf.set(DFSConfigKeys.DFS_DATANODE_HTTP_ADDRESS_KEY, "localhost:0");
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
     cluster.waitActive();
     cluster.waitActive();
   }
   }

+ 66 - 24
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java

@@ -21,19 +21,26 @@ import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertEquals;
 
 
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Comparator;
+import java.util.List;
 
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.junit.Test;
 import org.junit.Test;
 
 
 /**
 /**
  * This test ensures the all types of data node report work correctly.
  * This test ensures the all types of data node report work correctly.
  */
  */
 public class TestDatanodeReport {
 public class TestDatanodeReport {
+  static final Log LOG = LogFactory.getLog(TestDatanodeReport.class);
   final static private Configuration conf = new HdfsConfiguration();
   final static private Configuration conf = new HdfsConfiguration();
   final static private int NUM_OF_DATANODES = 4;
   final static private int NUM_OF_DATANODES = 4;
     
     
@@ -50,20 +57,18 @@ public class TestDatanodeReport {
     try {
     try {
       //wait until the cluster is up
       //wait until the cluster is up
       cluster.waitActive();
       cluster.waitActive();
+      final String bpid = cluster.getNamesystem().getBlockPoolId();
+      final List<DataNode> datanodes = cluster.getDataNodes();
+      final DFSClient client = cluster.getFileSystem().dfs;
 
 
-      InetSocketAddress addr = new InetSocketAddress("localhost",
-          cluster.getNameNodePort());
-      DFSClient client = new DFSClient(addr, conf);
-
-      assertEquals(client.datanodeReport(DatanodeReportType.ALL).length,
-                   NUM_OF_DATANODES);
-      assertEquals(client.datanodeReport(DatanodeReportType.LIVE).length,
-                   NUM_OF_DATANODES);
-      assertEquals(client.datanodeReport(DatanodeReportType.DEAD).length, 0);
+      assertReports(NUM_OF_DATANODES, DatanodeReportType.ALL, client, datanodes, bpid);
+      assertReports(NUM_OF_DATANODES, DatanodeReportType.LIVE, client, datanodes, bpid);
+      assertReports(0, DatanodeReportType.DEAD, client, datanodes, bpid);
 
 
       // bring down one datanode
       // bring down one datanode
-      ArrayList<DataNode> datanodes = cluster.getDataNodes();
-      datanodes.remove(datanodes.size()-1).shutdown();
+      final DataNode last = datanodes.get(datanodes.size() - 1);
+      LOG.info("XXX shutdown datanode " + last.getDatanodeUuid());
+      last.shutdown();
 
 
       DatanodeInfo[] nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
       DatanodeInfo[] nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
       while (nodeInfo.length != 1) {
       while (nodeInfo.length != 1) {
@@ -74,22 +79,59 @@ public class TestDatanodeReport {
         nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
         nodeInfo = client.datanodeReport(DatanodeReportType.DEAD);
       }
       }
 
 
-      assertEquals(client.datanodeReport(DatanodeReportType.LIVE).length,
-                   NUM_OF_DATANODES-1);
-      assertEquals(client.datanodeReport(DatanodeReportType.ALL).length,
-                   NUM_OF_DATANODES);
+      assertReports(NUM_OF_DATANODES, DatanodeReportType.ALL, client, datanodes, null);
+      assertReports(NUM_OF_DATANODES - 1, DatanodeReportType.LIVE, client, datanodes, null);
+      assertReports(1, DatanodeReportType.DEAD, client, datanodes, null);
 
 
       Thread.sleep(5000);
       Thread.sleep(5000);
       assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
       assertGauge("ExpiredHeartbeats", 1, getMetrics("FSNamesystem"));
-    }finally {
+    } finally {
       cluster.shutdown();
       cluster.shutdown();
     }
     }
   }
   }
- 
-  public static void main(String[] args) throws Exception {
-    new TestDatanodeReport().testDatanodeReport();
-  }
   
   
-}
-
+  final static Comparator<StorageReport> CMP = new Comparator<StorageReport>() {
+    @Override
+    public int compare(StorageReport left, StorageReport right) {
+      return left.getStorage().getStorageID().compareTo(
+            right.getStorage().getStorageID());
+    }
+  };
 
 
+  static void assertReports(int numDatanodes, DatanodeReportType type,
+      DFSClient client, List<DataNode> datanodes, String bpid) throws IOException {
+    final DatanodeInfo[] infos = client.datanodeReport(type);
+    assertEquals(numDatanodes, infos.length);
+    final DatanodeStorageReport[] reports = client.getDatanodeStorageReport(type);
+    assertEquals(numDatanodes, reports.length);
+    
+    for(int i = 0; i < infos.length; i++) {
+      assertEquals(infos[i], reports[i].getDatanodeInfo());
+      
+      final DataNode d = findDatanode(infos[i].getDatanodeUuid(), datanodes);
+      if (bpid != null) {
+        //check storage
+        final StorageReport[] computed = reports[i].getStorageReports();
+        Arrays.sort(computed, CMP);
+        final StorageReport[] expected = d.getFSDataset().getStorageReports(bpid);
+        Arrays.sort(expected, CMP);
+  
+        assertEquals(expected.length, computed.length);
+        for(int j = 0; j < expected.length; j++) {
+          assertEquals(expected[j].getStorage().getStorageID(),
+                       computed[j].getStorage().getStorageID());
+        }
+      }
+    }
+  }
+  
+  static DataNode findDatanode(String id, List<DataNode> datanodes) {
+    for(DataNode d : datanodes) {
+      if (d.getDatanodeUuid().equals(id)) {
+        return d;
+      }
+    }
+    throw new IllegalStateException("Datnode " + id + " not in datanode list: "
+        + datanodes);
+  }
+}

+ 13 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java

@@ -31,25 +31,25 @@ import org.apache.hadoop.fs.permission.AclEntryScope;
 import org.apache.hadoop.fs.permission.AclEntryType;
 import org.apache.hadoop.fs.permission.AclEntryType;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.StorageType;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
 import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeRegistrationProto;
-import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeStorageProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockKeyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockWithLocationsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlocksWithLocationsProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CheckpointSignatureProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExportedBlockKeysProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
@@ -67,9 +67,18 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
 import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
-import org.apache.hadoop.hdfs.server.protocol.*;
+import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
+import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
+import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
+import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;

+ 330 - 21
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java

@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 
 import java.io.FileNotFoundException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.IOException;
-import java.io.FileNotFoundException;
 import java.security.PrivilegedExceptionAction;
 import java.security.PrivilegedExceptionAction;
 import java.util.EnumSet;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.List;
@@ -46,6 +45,7 @@ import static org.apache.hadoop.fs.permission.AclEntryType.USER;
 import static org.apache.hadoop.fs.permission.FsAction.ALL;
 import static org.apache.hadoop.fs.permission.FsAction.ALL;
 import static org.apache.hadoop.fs.permission.FsAction.READ;
 import static org.apache.hadoop.fs.permission.FsAction.READ;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
 import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.junit.Assert.fail;
 import org.junit.After;
 import org.junit.After;
@@ -261,11 +261,12 @@ public class FSXAttrBaseTest {
       fs.setXAttr(path, "user.", value1, EnumSet.of(XAttrSetFlag.CREATE, 
       fs.setXAttr(path, "user.", value1, EnumSet.of(XAttrSetFlag.CREATE, 
           XAttrSetFlag.REPLACE));
           XAttrSetFlag.REPLACE));
       Assert.fail("Setting xattr with empty name should fail.");
       Assert.fail("Setting xattr with empty name should fail.");
+    } catch (RemoteException e) {
+      assertEquals("Unexpected RemoteException: " + e, e.getClassName(),
+          HadoopIllegalArgumentException.class.getCanonicalName());
+      GenericTestUtils.assertExceptionContains("XAttr name cannot be empty", e);
     } catch (HadoopIllegalArgumentException e) {
     } catch (HadoopIllegalArgumentException e) {
       GenericTestUtils.assertExceptionContains("XAttr name cannot be empty", e);
       GenericTestUtils.assertExceptionContains("XAttr name cannot be empty", e);
-    } catch (IllegalArgumentException e) {
-      GenericTestUtils.assertExceptionContains("Invalid value: \"user.\" does " + 
-          "not belong to the domain ^(user\\.|trusted\\.|system\\.|security\\.).+", e);
     }
     }
     
     
     // Set xattr with invalid name: "a1"
     // Set xattr with invalid name: "a1"
@@ -274,11 +275,12 @@ public class FSXAttrBaseTest {
           XAttrSetFlag.REPLACE));
           XAttrSetFlag.REPLACE));
       Assert.fail("Setting xattr with invalid name prefix or without " +
       Assert.fail("Setting xattr with invalid name prefix or without " +
           "name prefix should fail.");
           "name prefix should fail.");
+    } catch (RemoteException e) {
+      assertEquals("Unexpected RemoteException: " + e, e.getClassName(),
+          HadoopIllegalArgumentException.class.getCanonicalName());
+      GenericTestUtils.assertExceptionContains("XAttr name must be prefixed", e);
     } catch (HadoopIllegalArgumentException e) {
     } catch (HadoopIllegalArgumentException e) {
       GenericTestUtils.assertExceptionContains("XAttr name must be prefixed", e);
       GenericTestUtils.assertExceptionContains("XAttr name must be prefixed", e);
-    } catch (IllegalArgumentException e) {
-      GenericTestUtils.assertExceptionContains("Invalid value: \"a1\" does " + 
-          "not belong to the domain ^(user\\.|trusted\\.|system\\.|security\\.).+", e);
     }
     }
     
     
     // Set xattr without XAttrSetFlag
     // Set xattr without XAttrSetFlag
@@ -341,9 +343,18 @@ public class FSXAttrBaseTest {
   }
   }
   
   
   /**
   /**
-   * Tests for getting xattr
-   * 1. To get xattr which does not exist.
-   * 2. To get multiple xattrs.
+   * getxattr tests. Test that getxattr throws an exception if any of
+   * the following are true:
+   * an xattr that was requested doesn't exist
+   * the caller specifies an unknown namespace
+   * the caller doesn't have access to the namespace
+   * the caller doesn't have permission to get the value of the xattr
+   * the caller does not have search access to the parent directory
+   * the caller has only read access to the owning directory
+   * the caller has only search access to the owning directory and
+   * execute/search access to the actual entity
+   * the caller does not have search access to the owning directory and read
+   * access to the actual entity
    */
    */
   @Test(timeout = 120000)
   @Test(timeout = 120000)
   public void testGetXAttrs() throws Exception {
   public void testGetXAttrs() throws Exception {
@@ -351,21 +362,159 @@ public class FSXAttrBaseTest {
     fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
     fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
     fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
     fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
     
     
-    // XAttr does not exist.
-    byte[] value = fs.getXAttr(path, name3);
-    Assert.assertEquals(value, null);
+    /* An XAttr that was requested does not exist. */
+    try {
+      final byte[] value = fs.getXAttr(path, name3);
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains(
+          "At least one of the attributes provided was not found.", e);
+    }
     
     
-    List<String> names = Lists.newArrayList();
-    names.add(name1);
-    names.add(name2);
-    names.add(name3);
-    Map<String, byte[]> xattrs = fs.getXAttrs(path, names);
-    Assert.assertEquals(xattrs.size(), 2);
-    Assert.assertArrayEquals(value1, xattrs.get(name1));
-    Assert.assertArrayEquals(value2, xattrs.get(name2));
+    /* Throw an exception if an xattr that was requested does not exist. */
+    {
+      final List<String> names = Lists.newArrayList();
+      names.add(name1);
+      names.add(name2);
+      names.add(name3);
+      try {
+        final Map<String, byte[]> xattrs = fs.getXAttrs(path, names);
+        Assert.fail("expected IOException");
+      } catch (IOException e) {
+        GenericTestUtils.assertExceptionContains(
+            "At least one of the attributes provided was not found.", e);
+      }
+    }
     
     
     fs.removeXAttr(path, name1);
     fs.removeXAttr(path, name1);
     fs.removeXAttr(path, name2);
     fs.removeXAttr(path, name2);
+
+    /* Unknown namespace should throw an exception. */
+    try {
+      final byte[] xattr = fs.getXAttr(path, "wackynamespace.foo");
+      Assert.fail("expected IOException");
+    } catch (Exception e) {
+      GenericTestUtils.assertExceptionContains
+          ("An XAttr name must be prefixed with user/trusted/security/system, " +
+           "followed by a '.'",
+          e);
+    }
+
+    /*
+     * The 'trusted' namespace should not be accessible and should throw an
+     * exception.
+     */
+    final UserGroupInformation user = UserGroupInformation.
+        createUserForTesting("user", new String[] {"mygroup"});
+    fs.setXAttr(path, "trusted.foo", "1234".getBytes());
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            final byte[] xattr = userFs.getXAttr(path, "trusted.foo");
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("User doesn't have permission", e);
+    }
+
+    fs.setXAttr(path, name1, "1234".getBytes());
+
+    /*
+     * Test that an exception is thrown if the caller doesn't have permission to
+     * get the value of the xattr.
+     */
+
+    /* Set access so that only the owner has access. */
+    fs.setPermission(path, new FsPermission((short) 0700));
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            final byte[] xattr = userFs.getXAttr(path, name1);
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /*
+     * The caller must have search access to the parent directory.
+     */
+    final Path childDir = new Path(path, "child" + pathCount);
+    /* Set access to parent so that only the owner has access. */
+    FileSystem.mkdirs(fs, childDir, FsPermission.createImmutable((short)0700));
+    fs.setXAttr(childDir, name1, "1234".getBytes());
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            final byte[] xattr = userFs.getXAttr(childDir, name1);
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /* Check that read access to the owning directory is not good enough. */
+    fs.setPermission(path, new FsPermission((short) 0704));
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            final byte[] xattr = userFs.getXAttr(childDir, name1);
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /*
+     * Check that search access to the owning directory and search/execute
+     * access to the actual entity with extended attributes is not good enough.
+     */
+    fs.setPermission(path, new FsPermission((short) 0701));
+    fs.setPermission(childDir, new FsPermission((short) 0701));
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            final byte[] xattr = userFs.getXAttr(childDir, name1);
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /*
+     * Check that search access to the owning directory and read access to
+     * the actual entity with the extended attribute is good enough.
+     */
+    fs.setPermission(path, new FsPermission((short) 0701));
+    fs.setPermission(childDir, new FsPermission((short) 0704));
+    user.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          final FileSystem userFs = dfsCluster.getFileSystem();
+          final byte[] xattr = userFs.getXAttr(childDir, name1);
+          return null;
+        }
+      });
   }
   }
   
   
   /**
   /**
@@ -402,6 +551,166 @@ public class FSXAttrBaseTest {
     fs.removeXAttr(path, name3);
     fs.removeXAttr(path, name3);
   }
   }
 
 
+  /**
+   * removexattr tests. Test that removexattr throws an exception if any of
+   * the following are true:
+   * an xattr that was requested doesn't exist
+   * the caller specifies an unknown namespace
+   * the caller doesn't have access to the namespace
+   * the caller doesn't have permission to get the value of the xattr
+   * the caller does not have "execute" (scan) access to the parent directory
+   * the caller has only read access to the owning directory
+   * the caller has only execute access to the owning directory and execute
+   * access to the actual entity
+   * the caller does not have execute access to the owning directory and write
+   * access to the actual entity
+   */
+  @Test(timeout = 120000)
+  public void testRemoveXAttrPermissions() throws Exception {
+    FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
+    fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
+    fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
+    fs.setXAttr(path, name3, null, EnumSet.of(XAttrSetFlag.CREATE));
+
+    try {
+      fs.removeXAttr(path, name2);
+      fs.removeXAttr(path, name2);
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("No matching attributes found", e);
+    }
+
+    /* Unknown namespace should throw an exception. */
+    final String expectedExceptionString = "An XAttr name must be prefixed " +
+        "with user/trusted/security/system, followed by a '.'";
+    try {
+      fs.removeXAttr(path, "wackynamespace.foo");
+      Assert.fail("expected IOException");
+    } catch (RemoteException e) {
+      assertEquals("Unexpected RemoteException: " + e, e.getClassName(),
+          HadoopIllegalArgumentException.class.getCanonicalName());
+      GenericTestUtils.assertExceptionContains(expectedExceptionString, e);
+    } catch (HadoopIllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains(expectedExceptionString, e);
+    }
+
+    /*
+     * The 'trusted' namespace should not be accessible and should throw an
+     * exception.
+     */
+    final UserGroupInformation user = UserGroupInformation.
+        createUserForTesting("user", new String[] {"mygroup"});
+    fs.setXAttr(path, "trusted.foo", "1234".getBytes());
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            userFs.removeXAttr(path, "trusted.foo");
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("User doesn't have permission", e);
+    } finally {
+      fs.removeXAttr(path, "trusted.foo");
+    }
+
+    /*
+     * Test that an exception is thrown if the caller doesn't have permission to
+     * get the value of the xattr.
+     */
+
+    /* Set access so that only the owner has access. */
+    fs.setPermission(path, new FsPermission((short) 0700));
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            userFs.removeXAttr(path, name1);
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /*
+     * The caller must have "execute" (scan) access to the parent directory.
+     */
+    final Path childDir = new Path(path, "child" + pathCount);
+    /* Set access to parent so that only the owner has access. */
+    FileSystem.mkdirs(fs, childDir, FsPermission.createImmutable((short)0700));
+    fs.setXAttr(childDir, name1, "1234".getBytes());
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            userFs.removeXAttr(childDir, name1);
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /* Check that read access to the owning directory is not good enough. */
+    fs.setPermission(path, new FsPermission((short) 0704));
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            userFs.removeXAttr(childDir, name1);
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /*
+     * Check that execute access to the owning directory and scan access to
+     * the actual entity with extended attributes is not good enough.
+     */
+    fs.setPermission(path, new FsPermission((short) 0701));
+    fs.setPermission(childDir, new FsPermission((short) 0701));
+    try {
+      user.doAs(new PrivilegedExceptionAction<Object>() {
+          @Override
+          public Object run() throws Exception {
+            final FileSystem userFs = dfsCluster.getFileSystem();
+            userFs.removeXAttr(childDir, name1);
+            return null;
+          }
+        });
+      Assert.fail("expected IOException");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Permission denied", e);
+    }
+
+    /*
+     * Check that execute access to the owning directory and write access to
+     * the actual entity with extended attributes is good enough.
+     */
+    fs.setPermission(path, new FsPermission((short) 0701));
+    fs.setPermission(childDir, new FsPermission((short) 0706));
+    user.doAs(new PrivilegedExceptionAction<Object>() {
+        @Override
+        public Object run() throws Exception {
+          final FileSystem userFs = dfsCluster.getFileSystem();
+          userFs.removeXAttr(childDir, name1);
+          return null;
+        }
+      });
+  }
+
   @Test(timeout = 120000)
   @Test(timeout = 120000)
   public void testRenameFileWithXAttr() throws Exception {
   public void testRenameFileWithXAttr() throws Exception {
     FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
     FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));

+ 14 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java

@@ -546,6 +546,7 @@ public class TestINodeFile {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
         DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
         DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     MiniDFSCluster cluster = null;
     MiniDFSCluster cluster = null;
     try {
     try {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
@@ -593,6 +594,19 @@ public class TestINodeFile {
       // ClientProtocol#getPreferredBlockSize
       // ClientProtocol#getPreferredBlockSize
       assertEquals(testFileBlockSize,
       assertEquals(testFileBlockSize,
           nnRpc.getPreferredBlockSize(testFileInodePath.toString()));
           nnRpc.getPreferredBlockSize(testFileInodePath.toString()));
+
+      /*
+       * HDFS-6749 added missing calls to FSDirectory.resolvePath in the
+       * following four methods. The calls below ensure that
+       * /.reserved/.inodes paths work properly. No need to check return
+       * values as these methods are tested elsewhere.
+       */
+      {
+        fs.isFileClosed(testFileInodePath);
+        fs.getAclStatus(testFileInodePath);
+        fs.getXAttrs(testFileInodePath);
+        fs.listXAttrs(testFileInodePath);
+      }
       
       
       // symbolic link related tests
       // symbolic link related tests
       
       

+ 9 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java

@@ -212,18 +212,25 @@ public class TestNNStorageRetentionManager {
     tc.addImage("/foo1/current/" + getImageFileName(300), false);
     tc.addImage("/foo1/current/" + getImageFileName(300), false);
     tc.addImage("/foo1/current/" + getImageFileName(400), false);
     tc.addImage("/foo1/current/" + getImageFileName(400), false);
 
 
+    // Segments containing txns upto txId 250 are extra and should be purged.
     tc.addLog("/foo2/current/" + getFinalizedEditsFileName(1, 100), true);
     tc.addLog("/foo2/current/" + getFinalizedEditsFileName(1, 100), true);
-    // Without lowering the max segments to retain, we'd retain all segments
-    // going back to txid 150 (300 - 150).
     tc.addLog("/foo2/current/" + getFinalizedEditsFileName(101, 175), true);
     tc.addLog("/foo2/current/" + getFinalizedEditsFileName(101, 175), true);
+    tc.addLog("/foo2/current/" + getInProgressEditsFileName(176) + ".empty",
+        true);
     tc.addLog("/foo2/current/" + getFinalizedEditsFileName(176, 200), true);
     tc.addLog("/foo2/current/" + getFinalizedEditsFileName(176, 200), true);
     tc.addLog("/foo2/current/" + getFinalizedEditsFileName(201, 225), true);
     tc.addLog("/foo2/current/" + getFinalizedEditsFileName(201, 225), true);
+    tc.addLog("/foo2/current/" + getInProgressEditsFileName(226) + ".corrupt",
+        true);
     tc.addLog("/foo2/current/" + getFinalizedEditsFileName(226, 240), true);
     tc.addLog("/foo2/current/" + getFinalizedEditsFileName(226, 240), true);
     // Only retain 2 extra segments. The 301-350 and 351-400 segments are
     // Only retain 2 extra segments. The 301-350 and 351-400 segments are
     // considered required, not extra.
     // considered required, not extra.
     tc.addLog("/foo2/current/" + getFinalizedEditsFileName(241, 275), false);
     tc.addLog("/foo2/current/" + getFinalizedEditsFileName(241, 275), false);
     tc.addLog("/foo2/current/" + getFinalizedEditsFileName(276, 300), false);
     tc.addLog("/foo2/current/" + getFinalizedEditsFileName(276, 300), false);
+    tc.addLog("/foo2/current/" + getInProgressEditsFileName(301) + ".empty",
+        false);
     tc.addLog("/foo2/current/" + getFinalizedEditsFileName(301, 350), false);
     tc.addLog("/foo2/current/" + getFinalizedEditsFileName(301, 350), false);
+    tc.addLog("/foo2/current/" + getInProgressEditsFileName(351) + ".corrupt",
+        false);
     tc.addLog("/foo2/current/" + getFinalizedEditsFileName(351, 400), false);
     tc.addLog("/foo2/current/" + getFinalizedEditsFileName(351, 400), false);
     tc.addLog("/foo2/current/" + getInProgressEditsFileName(401), false);
     tc.addLog("/foo2/current/" + getInProgressEditsFileName(401), false);
     runTest(tc);
     runTest(tc);

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java

@@ -415,7 +415,7 @@ public class TestNamenodeRetryCache {
     
     
     LightWeightCache<CacheEntry, CacheEntry> cacheSet = 
     LightWeightCache<CacheEntry, CacheEntry> cacheSet = 
         (LightWeightCache<CacheEntry, CacheEntry>) namesystem.getRetryCache().getCacheSet();
         (LightWeightCache<CacheEntry, CacheEntry>) namesystem.getRetryCache().getCacheSet();
-    assertEquals(22, cacheSet.size());
+    assertEquals(23, cacheSet.size());
     
     
     Map<CacheEntry, CacheEntry> oldEntries = 
     Map<CacheEntry, CacheEntry> oldEntries = 
         new HashMap<CacheEntry, CacheEntry>();
         new HashMap<CacheEntry, CacheEntry>();
@@ -434,7 +434,7 @@ public class TestNamenodeRetryCache {
     assertTrue(namesystem.hasRetryCache());
     assertTrue(namesystem.hasRetryCache());
     cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) namesystem
     cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) namesystem
         .getRetryCache().getCacheSet();
         .getRetryCache().getCacheSet();
-    assertEquals(22, cacheSet.size());
+    assertEquals(23, cacheSet.size());
     iter = cacheSet.iterator();
     iter = cacheSet.iterator();
     while (iter.hasNext()) {
     while (iter.hasNext()) {
       CacheEntry entry = iter.next();
       CacheEntry entry = iter.next();

+ 52 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java

@@ -160,7 +160,7 @@ public class TestRetryCacheWithHA {
     FSNamesystem fsn0 = cluster.getNamesystem(0);
     FSNamesystem fsn0 = cluster.getNamesystem(0);
     LightWeightCache<CacheEntry, CacheEntry> cacheSet = 
     LightWeightCache<CacheEntry, CacheEntry> cacheSet = 
         (LightWeightCache<CacheEntry, CacheEntry>) fsn0.getRetryCache().getCacheSet();
         (LightWeightCache<CacheEntry, CacheEntry>) fsn0.getRetryCache().getCacheSet();
-    assertEquals(22, cacheSet.size());
+    assertEquals(23, cacheSet.size());
     
     
     Map<CacheEntry, CacheEntry> oldEntries = 
     Map<CacheEntry, CacheEntry> oldEntries = 
         new HashMap<CacheEntry, CacheEntry>();
         new HashMap<CacheEntry, CacheEntry>();
@@ -181,7 +181,7 @@ public class TestRetryCacheWithHA {
     FSNamesystem fsn1 = cluster.getNamesystem(1);
     FSNamesystem fsn1 = cluster.getNamesystem(1);
     cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) fsn1
     cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) fsn1
         .getRetryCache().getCacheSet();
         .getRetryCache().getCacheSet();
-    assertEquals(22, cacheSet.size());
+    assertEquals(23, cacheSet.size());
     iter = cacheSet.iterator();
     iter = cacheSet.iterator();
     while (iter.hasNext()) {
     while (iter.hasNext()) {
       CacheEntry entry = iter.next();
       CacheEntry entry = iter.next();
@@ -1047,6 +1047,49 @@ public class TestRetryCacheWithHA {
     }
     }
   }
   }
 
 
+  /** removeXAttr */
+  class RemoveXAttrOp extends AtMostOnceOp {
+    private final String src;
+
+    RemoveXAttrOp(DFSClient client, String src) {
+      super("removeXAttr", client);
+      this.src = src;
+    }
+
+    @Override
+    void prepare() throws Exception {
+      Path p = new Path(src);
+      if (!dfs.exists(p)) {
+        DFSTestUtil.createFile(dfs, p, BlockSize, DataNodes, 0);
+        client.setXAttr(src, "user.key", "value".getBytes(),
+          EnumSet.of(XAttrSetFlag.CREATE));
+      }
+    }
+
+    @Override
+    void invoke() throws Exception {
+      client.removeXAttr(src, "user.key");
+    }
+
+    @Override
+    boolean checkNamenodeBeforeReturn() throws Exception {
+      for (int i = 0; i < CHECKTIMES; i++) {
+        Map<String, byte[]> iter = dfs.getXAttrs(new Path(src));
+        Set<String> keySet = iter.keySet();
+        if (!keySet.contains("user.key")) {
+          return true;
+        }
+        Thread.sleep(1000);
+      }
+      return false;
+    }
+
+    @Override
+    Object getResult() {
+      return null;
+    }
+  }
+
   @Test (timeout=60000)
   @Test (timeout=60000)
   public void testCreateSnapshot() throws Exception {
   public void testCreateSnapshot() throws Exception {
     final DFSClient client = genClientWithDummyHandler();
     final DFSClient client = genClientWithDummyHandler();
@@ -1183,6 +1226,13 @@ public class TestRetryCacheWithHA {
     testClientRetryWithFailover(op);
     testClientRetryWithFailover(op);
   }
   }
 
 
+  @Test (timeout=60000)
+  public void testRemoveXAttr() throws Exception {
+    DFSClient client = genClientWithDummyHandler();
+    AtMostOnceOp op = new RemoveXAttrOp(client, "/removexattr");
+    testClientRetryWithFailover(op);
+  }
+
   /**
   /**
    * When NN failover happens, if the client did not receive the response and
    * When NN failover happens, if the client did not receive the response and
    * send a retry request to the other NN, the same response should be recieved
    * send a retry request to the other NN, the same response should be recieved

+ 98 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java

@@ -23,6 +23,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY;
 import static org.hamcrest.CoreMatchers.equalTo;
 import static org.hamcrest.CoreMatchers.equalTo;
 
 
 import java.io.DataOutputStream;
 import java.io.DataOutputStream;
@@ -30,7 +31,9 @@ import java.io.File;
 import java.io.FileInputStream;
 import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.IOException;
+import java.util.Arrays;
 import java.util.HashMap;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.Map;
 import java.util.Map;
 
 
 import org.apache.commons.lang.mutable.MutableBoolean;
 import org.apache.commons.lang.mutable.MutableBoolean;
@@ -462,6 +465,7 @@ public class TestShortCircuitCache {
       }
       }
     }, 10, 60000);
     }, 10, 60000);
     cluster.shutdown();
     cluster.shutdown();
+    sockDir.close();
   }
   }
 
 
   @Test(timeout=60000)
   @Test(timeout=60000)
@@ -516,4 +520,98 @@ public class TestShortCircuitCache {
     });
     });
     cluster.shutdown();
     cluster.shutdown();
   }
   }
+
+  /**
+   * Test unlinking a file whose blocks we are caching in the DFSClient.
+   * The DataNode will notify the DFSClient that the replica is stale via the
+   * ShortCircuitShm.
+   */
+  @Test(timeout=60000)
+  public void testUnlinkingReplicasInFileDescriptorCache() throws Exception {
+    BlockReaderTestUtil.enableShortCircuitShmTracing();
+    TemporarySocketDirectory sockDir = new TemporarySocketDirectory();
+    Configuration conf = createShortCircuitConf(
+        "testUnlinkingReplicasInFileDescriptorCache", sockDir);
+    // We don't want the CacheCleaner to time out short-circuit shared memory
+    // segments during the test, so set the timeout really high.
+    conf.setLong(DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
+        1000000000L);
+    MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+    cluster.waitActive();
+    DistributedFileSystem fs = cluster.getFileSystem();
+    final ShortCircuitCache cache =
+        fs.getClient().getClientContext().getShortCircuitCache();
+    cache.getDfsClientShmManager().visit(new Visitor() {
+      @Override
+      public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
+          throws IOException {
+        // The ClientShmManager starts off empty.
+        Assert.assertEquals(0,  info.size());
+      }
+    });
+    final Path TEST_PATH = new Path("/test_file");
+    final int TEST_FILE_LEN = 8193;
+    final int SEED = 0xFADE0;
+    DFSTestUtil.createFile(fs, TEST_PATH, TEST_FILE_LEN,
+        (short)1, SEED);
+    byte contents[] = DFSTestUtil.readFileBuffer(fs, TEST_PATH);
+    byte expected[] = DFSTestUtil.
+        calculateFileContentsFromSeed(SEED, TEST_FILE_LEN);
+    Assert.assertTrue(Arrays.equals(contents, expected));
+    // Loading this file brought the ShortCircuitReplica into our local
+    // replica cache.
+    final DatanodeInfo datanode =
+        new DatanodeInfo(cluster.getDataNodes().get(0).getDatanodeId());
+    cache.getDfsClientShmManager().visit(new Visitor() {
+      @Override
+      public void visit(HashMap<DatanodeInfo, PerDatanodeVisitorInfo> info)
+          throws IOException {
+        Assert.assertTrue(info.get(datanode).full.isEmpty());
+        Assert.assertFalse(info.get(datanode).disabled);
+        Assert.assertEquals(1, info.get(datanode).notFull.values().size());
+        DfsClientShm shm =
+            info.get(datanode).notFull.values().iterator().next();
+        Assert.assertFalse(shm.isDisconnected());
+      }
+    });
+    // Remove the file whose blocks we just read.
+    fs.delete(TEST_PATH, false);
+
+    // Wait for the replica to be purged from the DFSClient's cache.
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      MutableBoolean done = new MutableBoolean(true);
+      @Override
+      public Boolean get() {
+        try {
+          done.setValue(true);
+          cache.getDfsClientShmManager().visit(new Visitor() {
+            @Override
+            public void visit(HashMap<DatanodeInfo,
+                  PerDatanodeVisitorInfo> info) throws IOException {
+              Assert.assertTrue(info.get(datanode).full.isEmpty());
+              Assert.assertFalse(info.get(datanode).disabled);
+              Assert.assertEquals(1,
+                  info.get(datanode).notFull.values().size());
+              DfsClientShm shm = info.get(datanode).notFull.values().
+                  iterator().next();
+              // Check that all slots have been invalidated.
+              for (Iterator<Slot> iter = shm.slotIterator();
+                   iter.hasNext(); ) {
+                Slot slot = iter.next();
+                if (slot.isValid()) {
+                  done.setValue(false);
+                }
+              }
+            }
+          });
+        } catch (IOException e) {
+          LOG.error("error running visitor", e);
+        }
+        return done.booleanValue();
+      }
+    }, 10, 60000);
+    cluster.shutdown();
+    sockDir.close();
+  }
 }
 }

+ 38 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java

@@ -39,14 +39,18 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.TestDFSClientRetries;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
 import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
 import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
-import org.apache.hadoop.hdfs.TestDFSClientRetries;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.ipc.RetriableException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.Level;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
+import org.mockito.internal.util.reflection.Whitebox;
 
 
 /** Test WebHDFS */
 /** Test WebHDFS */
 public class TestWebHDFS {
 public class TestWebHDFS {
@@ -445,4 +449,37 @@ public class TestWebHDFS {
       }
       }
     }
     }
   }
   }
+
+  /**
+   * Make sure a RetriableException is thrown when rpcServer is null in
+   * NamenodeWebHdfsMethods.
+   */
+  @Test
+  public void testRaceWhileNNStartup() throws Exception {
+    MiniDFSCluster cluster = null;
+    final Configuration conf = WebHdfsTestUtil.createConf();
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+      cluster.waitActive();
+      final NameNode namenode = cluster.getNameNode();
+      final NamenodeProtocols rpcServer = namenode.getRpcServer();
+      Whitebox.setInternalState(namenode, "rpcServer", null);
+
+      final Path foo = new Path("/foo");
+      final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
+          WebHdfsFileSystem.SCHEME);
+      try {
+        webHdfs.mkdirs(foo);
+        fail("Expected RetriableException");
+      } catch (RetriableException e) {
+        GenericTestUtils.assertExceptionContains("Namenode is in startup mode",
+            e);
+      }
+      Whitebox.setInternalState(namenode, "rpcServer", rpcServer);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }
 }

+ 69 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java

@@ -18,6 +18,15 @@
 
 
 package org.apache.hadoop.hdfs.web;
 package org.apache.hadoop.hdfs.web;
 
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
+
+import java.io.IOException;
+import java.net.URI;
+import java.util.HashMap;
+import java.util.Map;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -29,18 +38,14 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
-
-import java.io.IOException;
-import java.net.URI;
-
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
-import static org.mockito.Mockito.spy;
-import static org.mockito.Mockito.verify;
+import org.mockito.internal.util.reflection.Whitebox;
 
 
 public class TestWebHDFSForHA {
 public class TestWebHDFSForHA {
   private static final String LOGICAL_NAME = "minidfs";
   private static final String LOGICAL_NAME = "minidfs";
@@ -182,4 +187,61 @@ public class TestWebHDFSForHA {
       }
       }
     }
     }
   }
   }
+
+  /**
+   * Make sure the WebHdfsFileSystem will retry based on RetriableException when
+   * rpcServer is null in NamenodeWebHdfsMethods while NameNode starts up.
+   */
+  @Test (timeout=120000)
+  public void testRetryWhileNNStartup() throws Exception {
+    final Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME);
+    MiniDFSCluster cluster = null;
+    final Map<String, Boolean> resultMap = new HashMap<String, Boolean>();
+
+    try {
+      cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo)
+          .numDataNodes(0).build();
+      HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME);
+      cluster.waitActive();
+      cluster.transitionToActive(0);
+
+      final NameNode namenode = cluster.getNameNode(0);
+      final NamenodeProtocols rpcServer = namenode.getRpcServer();
+      Whitebox.setInternalState(namenode, "rpcServer", null);
+
+      new Thread() {
+        @Override
+        public void run() {
+          boolean result = false;
+          FileSystem fs = null;
+          try {
+            fs = FileSystem.get(WEBHDFS_URI, conf);
+            final Path dir = new Path("/test");
+            result = fs.mkdirs(dir);
+          } catch (IOException e) {
+            result = false;
+          } finally {
+            IOUtils.cleanup(null, fs);
+          }
+          synchronized (TestWebHDFSForHA.this) {
+            resultMap.put("mkdirs", result);
+            TestWebHDFSForHA.this.notifyAll();
+          }
+        }
+      }.start();
+
+      Thread.sleep(1000);
+      Whitebox.setInternalState(namenode, "rpcServer", rpcServer);
+      synchronized (this) {
+        while (!resultMap.containsKey("mkdirs")) {
+          this.wait();
+        }
+        Assert.assertTrue(resultMap.get("mkdirs"));
+      }
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }
 }

+ 0 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java

@@ -355,12 +355,6 @@ public class TestParam {
   public void testXAttrNameParam() {
   public void testXAttrNameParam() {
     final XAttrNameParam p = new XAttrNameParam("user.a1");
     final XAttrNameParam p = new XAttrNameParam("user.a1");
     Assert.assertEquals(p.getXAttrName(), "user.a1");
     Assert.assertEquals(p.getXAttrName(), "user.a1");
-    try {
-      new XAttrNameParam("a1");
-      Assert.fail();
-    } catch (IllegalArgumentException e) {
-      LOG.info("EXPECTED: " + e);
-    }
   }
   }
   
   
   @Test
   @Test

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/editsStored.xml

@@ -986,6 +986,8 @@
         <NAMESPACE>USER</NAMESPACE>
         <NAMESPACE>USER</NAMESPACE>
         <NAME>a2</NAME>
         <NAME>a2</NAME>
       </XATTR>
       </XATTR>
+      <RPC_CLIENTID>e03f4a52-3d85-4e05-8942-286185e639bd</RPC_CLIENTID>
+      <RPC_CALLID>82</RPC_CALLID>
     </DATA>
     </DATA>
   </RECORD>
   </RECORD>
   <RECORD>
   <RECORD>

BIN
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-0.23-reserved.tgz


BIN
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-1-reserved.tgz


+ 3 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -325,6 +325,9 @@ Release 2.5.0 - UNRELEASED
     MAPREDUCE-5952. LocalContainerLauncher#renameMapOutputForReduce incorrectly 
     MAPREDUCE-5952. LocalContainerLauncher#renameMapOutputForReduce incorrectly 
     assumes a single dir for mapOutIndex. (Gera Shegalov via kasha)
     assumes a single dir for mapOutIndex. (Gera Shegalov via kasha)
 
 
+    MAPREDUCE-6002. Made MR task avoid reporting error to AM when the task process
+    is shutting down. (Wangda Tan via zjshen)
+
 Release 2.4.1 - 2014-06-23 
 Release 2.4.1 - 2014-06-23 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 12 - 6
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java

@@ -31,6 +31,7 @@ import java.util.concurrent.Future;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FSError;
 import org.apache.hadoop.fs.FSError;
@@ -57,6 +58,7 @@ import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent;
 import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent;
 import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.ExitUtil;
+import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
 import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
@@ -406,7 +408,9 @@ public class LocalContainerLauncher extends AbstractService implements
       } catch (FSError e) {
       } catch (FSError e) {
         LOG.fatal("FSError from child", e);
         LOG.fatal("FSError from child", e);
         // umbilical:  MRAppMaster creates (taskAttemptListener), passes to us
         // umbilical:  MRAppMaster creates (taskAttemptListener), passes to us
-        umbilical.fsError(classicAttemptID, e.getMessage());
+        if (!ShutdownHookManager.get().isShutdownInProgress()) {
+          umbilical.fsError(classicAttemptID, e.getMessage());
+        }
         throw new RuntimeException();
         throw new RuntimeException();
 
 
       } catch (Exception exception) {
       } catch (Exception exception) {
@@ -429,11 +433,13 @@ public class LocalContainerLauncher extends AbstractService implements
       } catch (Throwable throwable) {
       } catch (Throwable throwable) {
         LOG.fatal("Error running local (uberized) 'child' : "
         LOG.fatal("Error running local (uberized) 'child' : "
             + StringUtils.stringifyException(throwable));
             + StringUtils.stringifyException(throwable));
-        Throwable tCause = throwable.getCause();
-        String cause = (tCause == null)
-            ? throwable.getMessage()
-                : StringUtils.stringifyException(tCause);
-            umbilical.fatalError(classicAttemptID, cause);
+        if (!ShutdownHookManager.get().isShutdownInProgress()) {
+          Throwable tCause = throwable.getCause();
+          String cause =
+              (tCause == null) ? throwable.getMessage() : StringUtils
+                  .stringifyException(tCause);
+          umbilical.fatalError(classicAttemptID, cause);
+        }
         throw new RuntimeException();
         throw new RuntimeException();
       }
       }
     }
     }

+ 15 - 7
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java

@@ -56,6 +56,7 @@ import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
+import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
 import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.ApplicationConstants;
@@ -176,7 +177,9 @@ class YarnChild {
       });
       });
     } catch (FSError e) {
     } catch (FSError e) {
       LOG.fatal("FSError from child", e);
       LOG.fatal("FSError from child", e);
-      umbilical.fsError(taskid, e.getMessage());
+      if (!ShutdownHookManager.get().isShutdownInProgress()) {
+        umbilical.fsError(taskid, e.getMessage());
+      }
     } catch (Exception exception) {
     } catch (Exception exception) {
       LOG.warn("Exception running child : "
       LOG.warn("Exception running child : "
           + StringUtils.stringifyException(exception));
           + StringUtils.stringifyException(exception));
@@ -201,17 +204,22 @@ class YarnChild {
       }
       }
       // Report back any failures, for diagnostic purposes
       // Report back any failures, for diagnostic purposes
       if (taskid != null) {
       if (taskid != null) {
-        umbilical.fatalError(taskid, StringUtils.stringifyException(exception));
+        if (!ShutdownHookManager.get().isShutdownInProgress()) {
+          umbilical.fatalError(taskid,
+              StringUtils.stringifyException(exception));
+        }
       }
       }
     } catch (Throwable throwable) {
     } catch (Throwable throwable) {
       LOG.fatal("Error running child : "
       LOG.fatal("Error running child : "
     	        + StringUtils.stringifyException(throwable));
     	        + StringUtils.stringifyException(throwable));
       if (taskid != null) {
       if (taskid != null) {
-        Throwable tCause = throwable.getCause();
-        String cause = tCause == null
-                                 ? throwable.getMessage()
-                                 : StringUtils.stringifyException(tCause);
-        umbilical.fatalError(taskid, cause);
+        if (!ShutdownHookManager.get().isShutdownInProgress()) {
+          Throwable tCause = throwable.getCause();
+          String cause =
+              tCause == null ? throwable.getMessage() : StringUtils
+                  .stringifyException(tCause);
+          umbilical.fatalError(taskid, cause);
+        }
       }
       }
     } finally {
     } finally {
       RPC.stopProxy(umbilical);
       RPC.stopProxy(umbilical);

+ 6 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java

@@ -66,6 +66,7 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.util.Progress;
 import org.apache.hadoop.util.Progress;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.util.StringInterner;
 import org.apache.hadoop.util.StringInterner;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 
 
@@ -322,6 +323,11 @@ abstract public class Task implements Writable, Configurable {
   protected void reportFatalError(TaskAttemptID id, Throwable throwable, 
   protected void reportFatalError(TaskAttemptID id, Throwable throwable, 
                                   String logMsg) {
                                   String logMsg) {
     LOG.fatal(logMsg);
     LOG.fatal(logMsg);
+    
+    if (ShutdownHookManager.get().isShutdownInProgress()) {
+      return;
+    }
+    
     Throwable tCause = throwable.getCause();
     Throwable tCause = throwable.getCause();
     String cause = tCause == null 
     String cause = tCause == null 
                    ? StringUtils.stringifyException(throwable)
                    ? StringUtils.stringifyException(throwable)

+ 4 - 0
hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/RumenToSLSConverter.java

@@ -21,6 +21,8 @@ import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.CommandLineParser;
 import org.apache.commons.cli.GnuParser;
 import org.apache.commons.cli.GnuParser;
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.Options;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.codehaus.jackson.JsonFactory;
 import org.codehaus.jackson.JsonFactory;
 import org.codehaus.jackson.map.ObjectMapper;
 import org.codehaus.jackson.map.ObjectMapper;
 import org.codehaus.jackson.map.ObjectWriter;
 import org.codehaus.jackson.map.ObjectWriter;
@@ -42,6 +44,8 @@ import java.util.Set;
 import java.util.TreeMap;
 import java.util.TreeMap;
 import java.util.TreeSet;
 import java.util.TreeSet;
 
 
+@Private
+@Unstable
 public class RumenToSLSConverter {
 public class RumenToSLSConverter {
   private static final String EOL = System.getProperty("line.separator");
   private static final String EOL = System.getProperty("line.separator");
 
 

+ 4 - 0
hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java

@@ -32,6 +32,8 @@ import java.util.Iterator;
 import java.util.Random;
 import java.util.Random;
 import java.util.Arrays;
 import java.util.Arrays;
 
 
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.tools.rumen.JobTraceReader;
 import org.apache.hadoop.tools.rumen.JobTraceReader;
 import org.apache.hadoop.tools.rumen.LoggedJob;
 import org.apache.hadoop.tools.rumen.LoggedJob;
@@ -66,6 +68,8 @@ import org.apache.log4j.Logger;
 import org.codehaus.jackson.JsonFactory;
 import org.codehaus.jackson.JsonFactory;
 import org.codehaus.jackson.map.ObjectMapper;
 import org.codehaus.jackson.map.ObjectMapper;
 
 
+@Private
+@Unstable
 public class SLSRunner {
 public class SLSRunner {
   // RM, Runner
   // RM, Runner
   private ResourceManager rm;
   private ResourceManager rm;

+ 37 - 36
hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/AMSimulator.java

@@ -29,6 +29,8 @@ import java.util.Map;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
 
 
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
@@ -61,6 +63,8 @@ import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.log4j.Logger;
 import org.apache.log4j.Logger;
 
 
@@ -70,6 +74,8 @@ import org.apache.hadoop.yarn.sls.SLSRunner;
 import org.apache.hadoop.yarn.sls.scheduler.TaskRunner;
 import org.apache.hadoop.yarn.sls.scheduler.TaskRunner;
 import org.apache.hadoop.yarn.sls.utils.SLSUtils;
 import org.apache.hadoop.yarn.sls.utils.SLSUtils;
 
 
+@Private
+@Unstable
 public abstract class AMSimulator extends TaskRunner.Task {
 public abstract class AMSimulator extends TaskRunner.Task {
   // resource manager
   // resource manager
   protected ResourceManager rm;
   protected ResourceManager rm;
@@ -129,8 +135,7 @@ public abstract class AMSimulator extends TaskRunner.Task {
    * register with RM
    * register with RM
    */
    */
   @Override
   @Override
-  public void firstStep()
-          throws YarnException, IOException, InterruptedException {
+  public void firstStep() throws Exception {
     simulateStartTimeMS = System.currentTimeMillis() - 
     simulateStartTimeMS = System.currentTimeMillis() - 
                           SLSRunner.getRunner().getStartTimeMS();
                           SLSRunner.getRunner().getStartTimeMS();
 
 
@@ -145,8 +150,7 @@ public abstract class AMSimulator extends TaskRunner.Task {
   }
   }
 
 
   @Override
   @Override
-  public void middleStep()
-          throws InterruptedException, YarnException, IOException {
+  public void middleStep() throws Exception {
     // process responses in the queue
     // process responses in the queue
     processResponseQueue();
     processResponseQueue();
     
     
@@ -158,7 +162,7 @@ public abstract class AMSimulator extends TaskRunner.Task {
   }
   }
 
 
   @Override
   @Override
-  public void lastStep() {
+  public void lastStep() throws Exception {
     LOG.info(MessageFormat.format("Application {0} is shutting down.", appId));
     LOG.info(MessageFormat.format("Application {0} is shutting down.", appId));
     // unregister tracking
     // unregister tracking
     if (isTracked) {
     if (isTracked) {
@@ -169,26 +173,19 @@ public abstract class AMSimulator extends TaskRunner.Task {
                   .newRecordInstance(FinishApplicationMasterRequest.class);
                   .newRecordInstance(FinishApplicationMasterRequest.class);
     finishAMRequest.setFinalApplicationStatus(FinalApplicationStatus.SUCCEEDED);
     finishAMRequest.setFinalApplicationStatus(FinalApplicationStatus.SUCCEEDED);
 
 
-    try {
-      UserGroupInformation ugi =
-              UserGroupInformation.createRemoteUser(appAttemptId.toString());
-      Token<AMRMTokenIdentifier> token =
-              rm.getRMContext().getRMApps().get(appAttemptId.getApplicationId())
-                .getRMAppAttempt(appAttemptId).getAMRMToken();
-      ugi.addTokenIdentifier(token.decodeIdentifier());
-      ugi.doAs(new PrivilegedExceptionAction<Object>() {
-        @Override
-        public Object run() throws Exception {
-          rm.getApplicationMasterService()
-                  .finishApplicationMaster(finishAMRequest);
-          return null;
-        }
-      });
-    } catch (IOException e) {
-      e.printStackTrace();
-    } catch (InterruptedException e) {
-      e.printStackTrace();
-    }
+    UserGroupInformation ugi =
+        UserGroupInformation.createRemoteUser(appAttemptId.toString());
+    Token<AMRMTokenIdentifier> token = rm.getRMContext().getRMApps().get(appId)
+        .getRMAppAttempt(appAttemptId).getAMRMToken();
+    ugi.addTokenIdentifier(token.decodeIdentifier());
+    ugi.doAs(new PrivilegedExceptionAction<Object>() {
+      @Override
+      public Object run() throws Exception {
+        rm.getApplicationMasterService()
+            .finishApplicationMaster(finishAMRequest);
+        return null;
+      }
+    });
 
 
     simulateFinishTimeMS = System.currentTimeMillis() -
     simulateFinishTimeMS = System.currentTimeMillis() -
         SLSRunner.getRunner().getStartTimeMS();
         SLSRunner.getRunner().getStartTimeMS();
@@ -226,11 +223,9 @@ public abstract class AMSimulator extends TaskRunner.Task {
     return createAllocateRequest(ask, new ArrayList<ContainerId>());
     return createAllocateRequest(ask, new ArrayList<ContainerId>());
   }
   }
 
 
-  protected abstract void processResponseQueue()
-          throws InterruptedException, YarnException, IOException;
+  protected abstract void processResponseQueue() throws Exception;
   
   
-  protected abstract void sendContainerRequest()
-          throws YarnException, IOException, InterruptedException;
+  protected abstract void sendContainerRequest() throws Exception;
   
   
   protected abstract void checkStop();
   protected abstract void checkStop();
   
   
@@ -276,11 +271,18 @@ public abstract class AMSimulator extends TaskRunner.Task {
     // waiting until application ACCEPTED
     // waiting until application ACCEPTED
     RMApp app = rm.getRMContext().getRMApps().get(appId);
     RMApp app = rm.getRMContext().getRMApps().get(appId);
     while(app.getState() != RMAppState.ACCEPTED) {
     while(app.getState() != RMAppState.ACCEPTED) {
-      Thread.sleep(50);
+      Thread.sleep(10);
     }
     }
 
 
-    appAttemptId = rm.getRMContext().getRMApps().get(appId)
-            .getCurrentAppAttempt().getAppAttemptId();
+    // Waiting until application attempt reach LAUNCHED
+    // "Unmanaged AM must register after AM attempt reaches LAUNCHED state"
+    this.appAttemptId = rm.getRMContext().getRMApps().get(appId)
+        .getCurrentAppAttempt().getAppAttemptId();
+    RMAppAttempt rmAppAttempt = rm.getRMContext().getRMApps().get(appId)
+        .getCurrentAppAttempt();
+    while (rmAppAttempt.getAppAttemptState() != RMAppAttemptState.LAUNCHED) {
+      Thread.sleep(10);
+    }
   }
   }
 
 
   private void registerAM()
   private void registerAM()
@@ -293,10 +295,9 @@ public abstract class AMSimulator extends TaskRunner.Task {
     amRegisterRequest.setTrackingUrl("localhost:1000");
     amRegisterRequest.setTrackingUrl("localhost:1000");
 
 
     UserGroupInformation ugi =
     UserGroupInformation ugi =
-            UserGroupInformation.createRemoteUser(appAttemptId.toString());
-    Token<AMRMTokenIdentifier> token =
-            rm.getRMContext().getRMApps().get(appAttemptId.getApplicationId())
-                    .getRMAppAttempt(appAttemptId).getAMRMToken();
+        UserGroupInformation.createRemoteUser(appAttemptId.toString());
+    Token<AMRMTokenIdentifier> token = rm.getRMContext().getRMApps().get(appId)
+        .getRMAppAttempt(appAttemptId).getAMRMToken();
     ugi.addTokenIdentifier(token.decodeIdentifier());
     ugi.addTokenIdentifier(token.decodeIdentifier());
 
 
     ugi.doAs(
     ugi.doAs(

+ 6 - 3
hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/appmaster/MRAMSimulator.java

@@ -27,6 +27,8 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
 
 
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
@@ -45,6 +47,8 @@ import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator;
 import org.apache.hadoop.yarn.sls.SLSRunner;
 import org.apache.hadoop.yarn.sls.SLSRunner;
 import org.apache.log4j.Logger;
 import org.apache.log4j.Logger;
 
 
+@Private
+@Unstable
 public class MRAMSimulator extends AMSimulator {
 public class MRAMSimulator extends AMSimulator {
   /*
   /*
   Vocabulary Used: 
   Vocabulary Used: 
@@ -141,8 +145,7 @@ public class MRAMSimulator extends AMSimulator {
   }
   }
 
 
   @Override
   @Override
-  public void firstStep()
-          throws YarnException, IOException, InterruptedException {
+  public void firstStep() throws Exception {
     super.firstStep();
     super.firstStep();
     
     
     requestAMContainer();
     requestAMContainer();
@@ -386,7 +389,7 @@ public class MRAMSimulator extends AMSimulator {
   }
   }
 
 
   @Override
   @Override
-  public void lastStep() {
+  public void lastStep() throws Exception {
     super.lastStep();
     super.lastStep();
 
 
     // clear data structures
     // clear data structures

+ 5 - 0
hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/conf/SLSConfiguration.java

@@ -18,6 +18,11 @@
 
 
 package org.apache.hadoop.yarn.sls.conf;
 package org.apache.hadoop.yarn.sls.conf;
 
 
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+
+@Private
+@Unstable
 public class SLSConfiguration {
 public class SLSConfiguration {
   // sls
   // sls
   public static final String PREFIX = "yarn.sls.";
   public static final String PREFIX = "yarn.sls.";

+ 43 - 29
hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NMSimulator.java

@@ -27,6 +27,9 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.DelayQueue;
 import java.util.concurrent.DelayQueue;
 
 
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
@@ -54,6 +57,8 @@ import org.apache.hadoop.yarn.sls.scheduler.ContainerSimulator;
 import org.apache.hadoop.yarn.sls.scheduler.TaskRunner;
 import org.apache.hadoop.yarn.sls.scheduler.TaskRunner;
 import org.apache.hadoop.yarn.sls.utils.SLSUtils;
 import org.apache.hadoop.yarn.sls.utils.SLSUtils;
 
 
+@Private
+@Unstable
 public class NMSimulator extends TaskRunner.Task {
 public class NMSimulator extends TaskRunner.Task {
   // node resource
   // node resource
   private RMNode node;
   private RMNode node;
@@ -103,12 +108,12 @@ public class NMSimulator extends TaskRunner.Task {
   }
   }
 
 
   @Override
   @Override
-  public void firstStep() throws YarnException, IOException {
+  public void firstStep() {
     // do nothing
     // do nothing
   }
   }
 
 
   @Override
   @Override
-  public void middleStep() {
+  public void middleStep() throws Exception {
     // we check the lifetime for each running containers
     // we check the lifetime for each running containers
     ContainerSimulator cs = null;
     ContainerSimulator cs = null;
     synchronized(completedContainerList) {
     synchronized(completedContainerList) {
@@ -132,37 +137,31 @@ public class NMSimulator extends TaskRunner.Task {
     ns.setResponseId(RESPONSE_ID ++);
     ns.setResponseId(RESPONSE_ID ++);
     ns.setNodeHealthStatus(NodeHealthStatus.newInstance(true, "", 0));
     ns.setNodeHealthStatus(NodeHealthStatus.newInstance(true, "", 0));
     beatRequest.setNodeStatus(ns);
     beatRequest.setNodeStatus(ns);
-    try {
-      NodeHeartbeatResponse beatResponse =
-              rm.getResourceTrackerService().nodeHeartbeat(beatRequest);
-      if (! beatResponse.getContainersToCleanup().isEmpty()) {
-        // remove from queue
-        synchronized(releasedContainerList) {
-          for (ContainerId containerId : beatResponse.getContainersToCleanup()){
-            if (amContainerList.contains(containerId)) {
-              // AM container (not killed?, only release)
-              synchronized(amContainerList) {
-                amContainerList.remove(containerId);
-              }
-              LOG.debug(MessageFormat.format("NodeManager {0} releases " +
-                      "an AM ({1}).", node.getNodeID(), containerId));
-            } else {
-              cs = runningContainers.remove(containerId);
-              containerQueue.remove(cs);
-              releasedContainerList.add(containerId);
-              LOG.debug(MessageFormat.format("NodeManager {0} releases a " +
-                      "container ({1}).", node.getNodeID(), containerId));
+    NodeHeartbeatResponse beatResponse =
+        rm.getResourceTrackerService().nodeHeartbeat(beatRequest);
+    if (! beatResponse.getContainersToCleanup().isEmpty()) {
+      // remove from queue
+      synchronized(releasedContainerList) {
+        for (ContainerId containerId : beatResponse.getContainersToCleanup()){
+          if (amContainerList.contains(containerId)) {
+            // AM container (not killed?, only release)
+            synchronized(amContainerList) {
+              amContainerList.remove(containerId);
             }
             }
+            LOG.debug(MessageFormat.format("NodeManager {0} releases " +
+                "an AM ({1}).", node.getNodeID(), containerId));
+          } else {
+            cs = runningContainers.remove(containerId);
+            containerQueue.remove(cs);
+            releasedContainerList.add(containerId);
+            LOG.debug(MessageFormat.format("NodeManager {0} releases a " +
+                "container ({1}).", node.getNodeID(), containerId));
           }
           }
         }
         }
       }
       }
-      if (beatResponse.getNodeAction() == NodeAction.SHUTDOWN) {
-        lastStep();
-      }
-    } catch (YarnException e) {
-      e.printStackTrace();
-    } catch (IOException e) {
-      e.printStackTrace();
+    }
+    if (beatResponse.getNodeAction() == NodeAction.SHUTDOWN) {
+      lastStep();
     }
     }
   }
   }
 
 
@@ -258,4 +257,19 @@ public class NMSimulator extends TaskRunner.Task {
       completedContainerList.add(containerId);
       completedContainerList.add(containerId);
     }
     }
   }
   }
+
+  @VisibleForTesting
+  Map<ContainerId, ContainerSimulator> getRunningContainers() {
+    return runningContainers;
+  }
+
+  @VisibleForTesting
+  List<ContainerId> getAMContainers() {
+    return amContainerList;
+  }
+
+  @VisibleForTesting
+  List<ContainerId> getCompletedContainers() {
+    return completedContainerList;
+  }
 }
 }

+ 7 - 1
hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java

@@ -21,6 +21,8 @@ package org.apache.hadoop.yarn.sls.nodemanager;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.List;
 
 
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
 import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
@@ -36,6 +38,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode
         .UpdatedContainerInfo;
         .UpdatedContainerInfo;
 
 
+@Private
+@Unstable
 public class NodeInfo {
 public class NodeInfo {
   private static int NODE_ID = 0;
   private static int NODE_ID = 0;
 
 
@@ -43,6 +47,8 @@ public class NodeInfo {
     return NodeId.newInstance(host, port);
     return NodeId.newInstance(host, port);
   }
   }
 
 
+  @Private
+  @Unstable
   private static class FakeRMNodeImpl implements RMNode {
   private static class FakeRMNodeImpl implements RMNode {
     private NodeId nodeId;
     private NodeId nodeId;
     private String hostName;
     private String hostName;
@@ -164,7 +170,7 @@ public class NodeInfo {
       perNode = resourceOption;
       perNode = resourceOption;
     }
     }
   }
   }
-  
+
   public static RMNode newNodeInfo(String rackName, String hostName,
   public static RMNode newNodeInfo(String rackName, String hostName,
                               final ResourceOption resourceOption, int port) {
                               final ResourceOption resourceOption, int port) {
     final NodeId nodeId = newNodeID(hostName, port);
     final NodeId nodeId = newNodeID(hostName, port);

+ 5 - 0
hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/CapacitySchedulerMetrics.java

@@ -18,6 +18,11 @@
 
 
 package org.apache.hadoop.yarn.sls.scheduler;
 package org.apache.hadoop.yarn.sls.scheduler;
 
 
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+
+@Private
+@Unstable
 public class CapacitySchedulerMetrics extends SchedulerMetrics {
 public class CapacitySchedulerMetrics extends SchedulerMetrics {
 
 
   public CapacitySchedulerMetrics() {
   public CapacitySchedulerMetrics() {

+ 4 - 0
hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ContainerSimulator.java

@@ -21,9 +21,13 @@ package org.apache.hadoop.yarn.sls.scheduler;
 import java.util.concurrent.Delayed;
 import java.util.concurrent.Delayed;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeUnit;
 
 
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.Resource;
 
 
+@Private
+@Unstable
 public class ContainerSimulator implements Delayed {
 public class ContainerSimulator implements Delayed {
   // id
   // id
   private ContainerId id;
   private ContainerId id;

+ 4 - 0
hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java

@@ -18,6 +18,8 @@
 
 
 package org.apache.hadoop.yarn.sls.scheduler;
 package org.apache.hadoop.yarn.sls.scheduler;
 
 
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair
         .AppSchedulable;
         .AppSchedulable;
@@ -28,6 +30,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair
 import com.codahale.metrics.Gauge;
 import com.codahale.metrics.Gauge;
 import org.apache.hadoop.yarn.sls.SLSRunner;
 import org.apache.hadoop.yarn.sls.SLSRunner;
 
 
+@Private
+@Unstable
 public class FairSchedulerMetrics extends SchedulerMetrics {
 public class FairSchedulerMetrics extends SchedulerMetrics {
 
 
   private int totalMemoryMB = Integer.MAX_VALUE;
   private int totalMemoryMB = Integer.MAX_VALUE;

+ 4 - 0
hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FifoSchedulerMetrics.java

@@ -18,12 +18,16 @@
 
 
 package org.apache.hadoop.yarn.sls.scheduler;
 package org.apache.hadoop.yarn.sls.scheduler;
 
 
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.api.records.QueueInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo
         .FifoScheduler;
         .FifoScheduler;
 
 
 import com.codahale.metrics.Gauge;
 import com.codahale.metrics.Gauge;
 
 
+@Private
+@Unstable
 public class FifoSchedulerMetrics extends SchedulerMetrics {
 public class FifoSchedulerMetrics extends SchedulerMetrics {
   
   
   public FifoSchedulerMetrics() {
   public FifoSchedulerMetrics() {

+ 4 - 0
hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/NodeUpdateSchedulerEventWrapper.java

@@ -18,9 +18,13 @@
 
 
 package org.apache.hadoop.yarn.sls.scheduler;
 package org.apache.hadoop.yarn.sls.scheduler;
 
 
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event
         .NodeUpdateSchedulerEvent;
         .NodeUpdateSchedulerEvent;
 
 
+@Private
+@Unstable
 public class NodeUpdateSchedulerEventWrapper extends NodeUpdateSchedulerEvent {
 public class NodeUpdateSchedulerEventWrapper extends NodeUpdateSchedulerEvent {
   
   
   public NodeUpdateSchedulerEventWrapper(NodeUpdateSchedulerEvent event) {
   public NodeUpdateSchedulerEventWrapper(NodeUpdateSchedulerEvent event) {

+ 4 - 0
hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java

@@ -18,6 +18,8 @@
 
 
 package org.apache.hadoop.yarn.sls.scheduler;
 package org.apache.hadoop.yarn.sls.scheduler;
 
 
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -33,6 +35,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode
 import java.util.Collections;
 import java.util.Collections;
 import java.util.List;
 import java.util.List;
 
 
+@Private
+@Unstable
 public class RMNodeWrapper implements RMNode {
 public class RMNodeWrapper implements RMNode {
   private RMNode node;
   private RMNode node;
   private List<UpdatedContainerInfo> updates;
   private List<UpdatedContainerInfo> updates;

+ 30 - 13
hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java

@@ -36,6 +36,7 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 import java.util.concurrent.locks.ReentrantLock;
 
 
+import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
 import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configurable;
@@ -66,6 +67,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Allocation;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppReport;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppReport;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
@@ -92,13 +94,14 @@ import com.codahale.metrics.MetricRegistry;
 import com.codahale.metrics.SlidingWindowReservoir;
 import com.codahale.metrics.SlidingWindowReservoir;
 import com.codahale.metrics.Timer;
 import com.codahale.metrics.Timer;
 
 
+@Private
+@Unstable
 final public class ResourceSchedulerWrapper
 final public class ResourceSchedulerWrapper
     extends AbstractYarnScheduler<SchedulerApplicationAttempt, SchedulerNode>
     extends AbstractYarnScheduler<SchedulerApplicationAttempt, SchedulerNode>
     implements SchedulerWrapper, ResourceScheduler, Configurable {
     implements SchedulerWrapper, ResourceScheduler, Configurable {
   private static final String EOL = System.getProperty("line.separator");
   private static final String EOL = System.getProperty("line.separator");
   private static final int SAMPLING_SIZE = 60;
   private static final int SAMPLING_SIZE = 60;
   private ScheduledExecutorService pool;
   private ScheduledExecutorService pool;
-  private RMContext rmContext;
   // counters for scheduler allocate/handle operations
   // counters for scheduler allocate/handle operations
   private Counter schedulerAllocateCounter;
   private Counter schedulerAllocateCounter;
   private Counter schedulerHandleCounter;
   private Counter schedulerHandleCounter;
@@ -573,7 +576,7 @@ final public class ResourceSchedulerWrapper
       new Gauge<Integer>() {
       new Gauge<Integer>() {
         @Override
         @Override
         public Integer getValue() {
         public Integer getValue() {
-          if(scheduler == null || scheduler.getRootQueueMetrics() == null) {
+          if (scheduler == null || scheduler.getRootQueueMetrics() == null) {
             return 0;
             return 0;
           } else {
           } else {
             return scheduler.getRootQueueMetrics().getAppsRunning();
             return scheduler.getRootQueueMetrics().getAppsRunning();
@@ -720,17 +723,18 @@ final public class ResourceSchedulerWrapper
   public void addAMRuntime(ApplicationId appId,
   public void addAMRuntime(ApplicationId appId,
                            long traceStartTimeMS, long traceEndTimeMS,
                            long traceStartTimeMS, long traceEndTimeMS,
                            long simulateStartTimeMS, long simulateEndTimeMS) {
                            long simulateStartTimeMS, long simulateEndTimeMS) {
-
-    try {
-      // write job runtime information
-      StringBuilder sb = new StringBuilder();
-      sb.append(appId).append(",").append(traceStartTimeMS).append(",")
-              .append(traceEndTimeMS).append(",").append(simulateStartTimeMS)
-              .append(",").append(simulateEndTimeMS);
-      jobRuntimeLogBW.write(sb.toString() + EOL);
-      jobRuntimeLogBW.flush();
-    } catch (IOException e) {
-      e.printStackTrace();
+    if (metricsON) {
+      try {
+        // write job runtime information
+        StringBuilder sb = new StringBuilder();
+        sb.append(appId).append(",").append(traceStartTimeMS).append(",")
+            .append(traceEndTimeMS).append(",").append(simulateStartTimeMS)
+            .append(",").append(simulateEndTimeMS);
+        jobRuntimeLogBW.write(sb.toString() + EOL);
+        jobRuntimeLogBW.flush();
+      } catch (IOException e) {
+        e.printStackTrace();
+      }
     }
     }
   }
   }
 
 
@@ -916,4 +920,17 @@ final public class ResourceSchedulerWrapper
   public Resource getClusterResource() {
   public Resource getClusterResource() {
     return null;
     return null;
   }
   }
+
+  @Override
+  public synchronized List<Container> getTransferredContainers(
+      ApplicationAttemptId currentAttempt) {
+    return new ArrayList<Container>();
+  }
+
+  @Override
+  public Map<ApplicationId, SchedulerApplication<SchedulerApplicationAttempt>>
+      getSchedulerApplications() {
+    return new HashMap<ApplicationId,
+        SchedulerApplication<SchedulerApplicationAttempt>>();
+  }
 }
 }

+ 16 - 10
hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SLSCapacityScheduler.java

@@ -17,6 +17,8 @@
  */
  */
 package org.apache.hadoop.yarn.sls.scheduler;
 package org.apache.hadoop.yarn.sls.scheduler;
 
 
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.yarn.sls.SLSRunner;
 import org.apache.hadoop.yarn.sls.SLSRunner;
 import org.apache.hadoop.yarn.sls.conf.SLSConfiguration;
 import org.apache.hadoop.yarn.sls.conf.SLSConfiguration;
@@ -100,6 +102,8 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 import java.util.concurrent.locks.ReentrantLock;
 
 
+@Private
+@Unstable
 public class SLSCapacityScheduler extends CapacityScheduler implements
 public class SLSCapacityScheduler extends CapacityScheduler implements
         SchedulerWrapper,Configurable {
         SchedulerWrapper,Configurable {
   private static final String EOL = System.getProperty("line.separator");
   private static final String EOL = System.getProperty("line.separator");
@@ -725,16 +729,18 @@ public class SLSCapacityScheduler extends CapacityScheduler implements
                            long traceStartTimeMS, long traceEndTimeMS,
                            long traceStartTimeMS, long traceEndTimeMS,
                            long simulateStartTimeMS, long simulateEndTimeMS) {
                            long simulateStartTimeMS, long simulateEndTimeMS) {
 
 
-    try {
-      // write job runtime information
-      StringBuilder sb = new StringBuilder();
-      sb.append(appId).append(",").append(traceStartTimeMS).append(",")
-              .append(traceEndTimeMS).append(",").append(simulateStartTimeMS)
-              .append(",").append(simulateEndTimeMS);
-      jobRuntimeLogBW.write(sb.toString() + EOL);
-      jobRuntimeLogBW.flush();
-    } catch (IOException e) {
-      e.printStackTrace();
+    if (metricsON) {
+      try {
+        // write job runtime information
+        StringBuilder sb = new StringBuilder();
+        sb.append(appId).append(",").append(traceStartTimeMS).append(",")
+            .append(traceEndTimeMS).append(",").append(simulateStartTimeMS)
+            .append(",").append(simulateEndTimeMS);
+        jobRuntimeLogBW.write(sb.toString() + EOL);
+        jobRuntimeLogBW.flush();
+      } catch (IOException e) {
+        e.printStackTrace();
+      }
     }
     }
   }
   }
 
 

+ 4 - 0
hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SchedulerMetrics.java

@@ -21,6 +21,8 @@ package org.apache.hadoop.yarn.sls.scheduler;
 import java.util.HashSet;
 import java.util.HashSet;
 import java.util.Set;
 import java.util.Set;
 
 
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler
         .ResourceScheduler;
         .ResourceScheduler;
@@ -30,6 +32,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler
 import com.codahale.metrics.Gauge;
 import com.codahale.metrics.Gauge;
 import com.codahale.metrics.MetricRegistry;
 import com.codahale.metrics.MetricRegistry;
 
 
+@Private
+@Unstable
 public abstract class SchedulerMetrics {
 public abstract class SchedulerMetrics {
   protected ResourceScheduler scheduler;
   protected ResourceScheduler scheduler;
   protected Set<String> trackedQueues;
   protected Set<String> trackedQueues;

+ 4 - 0
hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/SchedulerWrapper.java

@@ -19,11 +19,15 @@ package org.apache.hadoop.yarn.sls.scheduler;
 
 
 import java.util.Set;
 import java.util.Set;
 
 
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 
 
 import com.codahale.metrics.MetricRegistry;
 import com.codahale.metrics.MetricRegistry;
 
 
+@Private
+@Unstable
 public interface SchedulerWrapper {
 public interface SchedulerWrapper {
 
 
 	public MetricRegistry getMetrics();
 	public MetricRegistry getMetrics();

+ 12 - 10
hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/TaskRunner.java

@@ -25,9 +25,15 @@ import java.util.concurrent.Delayed;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeUnit;
 
 
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 
 
+@Private
+@Unstable
 public class TaskRunner {
 public class TaskRunner {
+  @Private
+  @Unstable
   public abstract static class Task implements Runnable, Delayed {
   public abstract static class Task implements Runnable, Delayed {
     private long start;
     private long start;
     private long end;
     private long end;
@@ -93,12 +99,10 @@ public class TaskRunner {
         } else {
         } else {
           lastStep();
           lastStep();
         }
         }
-      } catch (YarnException e) {
-        e.printStackTrace();
-      } catch (IOException e) {
-        e.printStackTrace();
-      } catch (InterruptedException e) {
+      } catch (Exception e) {
         e.printStackTrace();
         e.printStackTrace();
+        Thread.getDefaultUncaughtExceptionHandler()
+            .uncaughtException(Thread.currentThread(), e);
       }
       }
     }
     }
 
 
@@ -118,13 +122,11 @@ public class TaskRunner {
     }
     }
 
 
 
 
-    public abstract void firstStep()
-            throws YarnException, IOException, InterruptedException;
+    public abstract void firstStep() throws Exception;
 
 
-    public abstract void middleStep()
-            throws YarnException, InterruptedException, IOException;
+    public abstract void middleStep() throws Exception;
 
 
-    public abstract void lastStep() throws YarnException;
+    public abstract void lastStep() throws Exception;
 
 
     public void setEndTime(long et) {
     public void setEndTime(long et) {
       endTime = et;
       endTime = et;

+ 4 - 0
hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java

@@ -17,6 +17,8 @@
  */
  */
 package org.apache.hadoop.yarn.sls.utils;
 package org.apache.hadoop.yarn.sls.utils;
 
 
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.tools.rumen.JobTraceReader;
 import org.apache.hadoop.tools.rumen.JobTraceReader;
@@ -36,6 +38,8 @@ import java.util.Map;
 import java.util.List;
 import java.util.List;
 import java.util.Iterator;
 import java.util.Iterator;
 
 
+@Private
+@Unstable
 public class SLSUtils {
 public class SLSUtils {
 
 
   public static String[] getRackHostName(String hostname) {
   public static String[] getRackHostName(String hostname) {

+ 4 - 0
hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java

@@ -30,6 +30,8 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import javax.servlet.http.HttpServletResponse;
 
 
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event
         .SchedulerEventType;
         .SchedulerEventType;
 import org.mortbay.jetty.Handler;
 import org.mortbay.jetty.Handler;
@@ -49,6 +51,8 @@ import com.codahale.metrics.Histogram;
 import com.codahale.metrics.MetricRegistry;
 import com.codahale.metrics.MetricRegistry;
 import org.mortbay.jetty.handler.ResourceHandler;
 import org.mortbay.jetty.handler.ResourceHandler;
 
 
+@Private
+@Unstable
 public class SLSWebApp extends HttpServlet {
 public class SLSWebApp extends HttpServlet {
   private static final long serialVersionUID = 1905162041950251407L;
   private static final long serialVersionUID = 1905162041950251407L;
   private transient Server server;
   private transient Server server;

Niektóre pliki nie zostały wyświetlone z powodu dużej ilości zmienionych plików