Browse Source

Merge remote-tracking branch 'apache/trunk' into HDFS-7285

Conflicts:
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
	hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java

Change-Id: I8511c4d64b0959e79129febc179845a3892fedcc
Zhe Zhang 9 years ago
parent
commit
1080c37300
100 changed files with 4179 additions and 1788 deletions
  1. 7 0
      hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml
  2. 2 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java
  3. 139 66
      hadoop-common-project/hadoop-common/CHANGES.txt
  4. 16 1
      hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
  5. 10 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  6. 6 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
  7. 7 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java
  8. 1 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
  9. 23 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
  10. 13 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
  11. 53 15
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
  12. 4 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
  13. 12 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
  14. 7 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
  15. 3 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java
  16. 8 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java
  17. 12 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java
  18. 3 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java
  19. 218 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/StatsDSink.java
  20. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPlainServer.java
  21. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java
  22. 10 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java
  23. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
  24. 11 0
      hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
  25. 1 1
      hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md
  26. 3 0
      hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md
  27. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/TestCLI.java
  28. 4 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLICommand.java
  29. 4 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLITestCmd.java
  30. 46 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java
  31. 12 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java
  32. 37 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
  33. 8 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java
  34. 13 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java
  35. 122 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestStatsDMetrics.java
  36. 9 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java
  37. 1 6
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java
  38. 15 7
      hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
  39. 1 0
      hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
  40. 105 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
  41. 14 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
  42. 1 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/BasicInetPeer.java
  43. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java
  44. 2 2
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/NioInetPeer.java
  45. 4 4
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/Peer.java
  46. 7 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
  47. 42 5
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
  48. 8 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
  49. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java
  50. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/IOStreamPair.java
  51. 0 31
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java
  52. 2 2
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/TrustedChannelResolver.java
  53. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataEncryptionKeyFactory.java
  54. 10 10
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java
  55. 1 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java
  56. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslParticipant.java
  57. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslResponseWithNegotiatedCipherOption.java
  58. 17 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
  59. 2177 9
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
  60. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java
  61. 11 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java
  62. 2 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java
  63. 2 2
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
  64. 16 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto
  65. 1 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto
  66. 17 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem
  67. 2 2
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java
  68. 2 2
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java
  69. 4 3
      hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java
  70. 276 133
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  71. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java
  72. 4 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
  73. 14 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/SWebHdfs.java
  74. 13 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/WebHdfs.java
  75. 4 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  76. 41 9
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  77. 6 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
  78. 1 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
  79. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
  80. 8 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
  81. 5 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DomainPeerServer.java
  82. 8 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/PeerServer.java
  83. 7 63
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java
  84. 16 17
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java
  85. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java
  86. 20 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java
  87. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolPB.java
  88. 68 67
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
  89. 64 64
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
  90. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
  91. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java
  92. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolServerSideTranslatorPB.java
  93. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolTranslatorPB.java
  94. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
  95. 35 1039
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
  96. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java
  97. 29 11
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
  98. 6 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalMetrics.java
  99. 71 99
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
  100. 168 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancerParameters.java

+ 7 - 0
hadoop-assemblies/src/main/resources/assemblies/hadoop-tools.xml

@@ -51,6 +51,13 @@
         <include>*-sources.jar</include>
         <include>*-sources.jar</include>
       </includes>
       </includes>
     </fileSet>
     </fileSet>
+    <fileSet>
+      <directory>../hadoop-archive-logs/target</directory>
+      <outputDirectory>/share/hadoop/${hadoop.component}/sources</outputDirectory>
+      <includes>
+        <include>*-sources.jar</include>
+      </includes>
+    </fileSet>
     <fileSet>
     <fileSet>
       <directory>../hadoop-datajoin/target</directory>
       <directory>../hadoop-datajoin/target</directory>
       <outputDirectory>/share/hadoop/${hadoop.component}/sources</outputDirectory>
       <outputDirectory>/share/hadoop/${hadoop.component}/sources</outputDirectory>

+ 2 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationToken.java

@@ -62,7 +62,8 @@ public class AuthenticationToken extends AuthToken {
   /**
   /**
    * Sets the max inactive time of the token.
    * Sets the max inactive time of the token.
    *
    *
-   * @param max inactive time of the token in milliseconds since the epoch.
+   * @param maxInactives inactive time of the token in milliseconds
+   *                     since the epoch.
    */
    */
   public void setMaxInactives(long maxInactives) {
   public void setMaxInactives(long maxInactives) {
     if (this != AuthenticationToken.ANONYMOUS) {
     if (this != AuthenticationToken.ANONYMOUS) {

+ 139 - 66
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -29,6 +29,9 @@ Trunk (Unreleased)
     HADOOP-11698. Remove DistCpV1 and Logalyzer.
     HADOOP-11698. Remove DistCpV1 and Logalyzer.
     (Brahma Reddy Battula via aajisaka)
     (Brahma Reddy Battula via aajisaka)
 
 
+    HADOOP-11356. Removed deprecated o.a.h.fs.permission.AccessControlException.
+    (Li Lu via wheat9)
+
   NEW FEATURES
   NEW FEATURES
 
 
     HADOOP-6590. Add a username check for hadoop sub-commands (John Smith via
     HADOOP-6590. Add a username check for hadoop sub-commands (John Smith via
@@ -342,9 +345,6 @@ Trunk (Unreleased)
     HADOOP-7256. Resource leak during failure scenario of closing
     HADOOP-7256. Resource leak during failure scenario of closing
     of resources. (Ramkrishna S. Vasudevan via harsh)
     of resources. (Ramkrishna S. Vasudevan via harsh)
 
 
-    HADOOP-8151. Error handling in snappy decompressor throws invalid
-    exceptions. (Matt Foley via harsh)
-
     HADOOP-8813. Add InterfaceAudience and InterfaceStability annotations
     HADOOP-8813. Add InterfaceAudience and InterfaceStability annotations
     to RPC Server and Client classes. (Brandon Li via suresh)
     to RPC Server and Client classes. (Brandon Li via suresh)
 
 
@@ -506,6 +506,9 @@ Trunk (Unreleased)
 
 
     HADOOP-12244. recover broken rebase during precommit (aw)
     HADOOP-12244. recover broken rebase during precommit (aw)
 
 
+    HADOOP-11942. Add links to SLGUserGuide to site index.
+    (Masatake Iwasaki via xyao)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -518,6 +521,9 @@ Release 2.8.0 - UNRELEASED
 
 
     HADOOP-11746. rewrite test-patch.sh (aw)
     HADOOP-11746. rewrite test-patch.sh (aw)
 
 
+    HADOOP-12416. Trash messages should be handled by Logger instead of being
+    delivered on System.out. (Mingliang Liu via aajisaka)
+
   NEW FEATURES
   NEW FEATURES
 
 
     HADOOP-11226. Add a configuration to set ipc.Client's traffic class with
     HADOOP-11226. Add a configuration to set ipc.Client's traffic class with
@@ -540,6 +546,8 @@ Release 2.8.0 - UNRELEASED
     HADOOP-5732. Add SFTP FileSystem. (Ramtin Boustani and Inigo Goiri via
     HADOOP-5732. Add SFTP FileSystem. (Ramtin Boustani and Inigo Goiri via
     cdouglas)
     cdouglas)
 
 
+    HADOOP-12360. Create StatsD metrics2 sink. (Dave Marion via stevel)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HADOOP-12271. Hadoop Jar Error Should Be More Explanatory
     HADOOP-12271. Hadoop Jar Error Should Be More Explanatory
@@ -608,8 +616,7 @@ Release 2.8.0 - UNRELEASED
     HADOOP-11917. test-patch.sh should work with ${BASEDIR}/patchprocess
     HADOOP-11917. test-patch.sh should work with ${BASEDIR}/patchprocess
     setups (aw)
     setups (aw)
 
 
-    HADOOP-11942. Add links to SLGUserGuide to site index.
-    (Masatake Iwasaki via xyao)
+    HADOOP-11925. backport trunk's smart-apply-patch.sh to branch-2 (aw)
 
 
     HADOOP-11906. test-patch.sh should use 'file' command for patch
     HADOOP-11906. test-patch.sh should use 'file' command for patch
     determinism (Sean Busbey via aw)
     determinism (Sean Busbey via aw)
@@ -742,12 +749,12 @@ Release 2.8.0 - UNRELEASED
     command-line arguments passed by the user (Masatake Iwasaki via Colin P.
     command-line arguments passed by the user (Masatake Iwasaki via Colin P.
     McCabe)
     McCabe)
 
 
-    HADOOP-12280. Skip unit tests based on maven profile rather than
-    NativeCodeLoader.isNativeCodeLoaded (Masatake Iwasaki via Colin P. McCabe)
-
     HADOOP-12318. Expose underlying LDAP exceptions in SaslPlainServer. (Mike
     HADOOP-12318. Expose underlying LDAP exceptions in SaslPlainServer. (Mike
     Yoder via atm)
     Yoder via atm)
 
 
+    HADOOP-9891. CLIMiniCluster instructions fail with MiniYarnCluster
+    ClassNotFoundException (Darrell Taylor via aw)
+
     HADOOP-12295. Improve NetworkTopology#InnerNode#remove logic. (yliu)
     HADOOP-12295. Improve NetworkTopology#InnerNode#remove logic. (yliu)
 
 
     HADOOP-12050. Enable MaxInactiveInterval for hadoop http auth token
     HADOOP-12050. Enable MaxInactiveInterval for hadoop http auth token
@@ -762,14 +769,33 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12367. Move TestFileUtil's test resources to resources folder.
     HADOOP-12367. Move TestFileUtil's test resources to resources folder.
     (wang via yliu)
     (wang via yliu)
 
 
+    HADOOP-12369. Point hadoop-project/pom.xml java.security.krb5.conf
+    within target folder. (wang)
+
+    HADOOP-12358. Add -safely flag to rm to prompt when deleting many files.
+    (xyao via wang)
+
+    HADOOP-12384. Add "-direct" flag option for fs copy so that user can choose
+    not to create "._COPYING_" file (J.Andreina via vinayakumarb)
+
+    HADOOP-12324. Better exception reporting in SaslPlainServer.
+    (Mike Yoder via stevel)
+
+    HADOOP-12413. AccessControlList should avoid calling getGroupNames in
+    isUserInList with empty groups. (Zhihai Xu via cnauroth)
+
+    HADOOP-12404. Disable caching for JarURLConnection to avoid sharing
+    JarFile with other users when loading resource from URL in Configuration
+    class. (zxu)
+
+    HADOOP-12428. Fix inconsistency between log-level guards and statements.
+    (Jagadesh Kiran N and Jackie Chang via ozawa)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-11785. Reduce the number of listStatus operation in distcp
     HADOOP-11785. Reduce the number of listStatus operation in distcp
     buildListing (Zoran Dimitrijevic via Colin P. McCabe)
     buildListing (Zoran Dimitrijevic via Colin P. McCabe)
 
 
-    HADOOP-11812. Implement listLocatedStatus for ViewFileSystem to speed up
-    split calculation (gera)
-
     HADOOP-11970. Replace uses of ThreadLocal<Random> with JDK7
     HADOOP-11970. Replace uses of ThreadLocal<Random> with JDK7
     ThreadLocalRandom.  (Sean Busbey via Colin P. McCabe)
     ThreadLocalRandom.  (Sean Busbey via Colin P. McCabe)
 
 
@@ -802,11 +828,14 @@ Release 2.8.0 - UNRELEASED
 
 
   BUG FIXES
   BUG FIXES
 
 
+    HADOOP-12374. Updated expunge command description.
+    (WeiWei Yang via eyang)
+
     HADOOP-12352. Delay in checkpointing Trash can leave trash for 2 intervals
     HADOOP-12352. Delay in checkpointing Trash can leave trash for 2 intervals
     before deleting (Casey Brotherton via harsh)
     before deleting (Casey Brotherton via harsh)
 
 
-    HADOOP-11802: DomainSocketWatcher thread terminates sometimes after there
-    is an I/O error during requestShortCircuitShm (cmccabe)
+    HADOOP-11568. Description on usage of classpath in hadoop command is
+    incomplete. ( Archana T via vinayakumarb )
 
 
     HADOOP-10027. *Compressor_deflateBytesDirect passes instance instead of
     HADOOP-10027. *Compressor_deflateBytesDirect passes instance instead of
     jclass to GetStaticObjectField. (Hui Zheng via cnauroth)
     jclass to GetStaticObjectField. (Hui Zheng via cnauroth)
@@ -832,8 +861,8 @@ Release 2.8.0 - UNRELEASED
     HADOOP-11859. PseudoAuthenticationHandler fails with httpcomponents v4.4.
     HADOOP-11859. PseudoAuthenticationHandler fails with httpcomponents v4.4.
     (Eugene Koifman via jitendra)
     (Eugene Koifman via jitendra)
 
 
-    HADOOP-11848. Incorrect arguments to sizeof in DomainSocket.c (Malcolm
-    Kavalsky via Colin P. McCabe)
+    HADOOP-11848. Incorrect arguments to sizeof in DomainSocket.c
+    (Malcolm Kavalsky via Colin P. McCabe)
 
 
     HADOOP-11861. test-patch.sh rewrite addendum patch.
     HADOOP-11861. test-patch.sh rewrite addendum patch.
     (Allen Wittenauer via cnauroth)
     (Allen Wittenauer via cnauroth)
@@ -856,9 +885,6 @@ Release 2.8.0 - UNRELEASED
     HADOOP-11866. increase readability and reliability of checkstyle,
     HADOOP-11866. increase readability and reliability of checkstyle,
     shellcheck, and whitespace reports (aw)
     shellcheck, and whitespace reports (aw)
 
 
-    HADOOP-11491. HarFs incorrectly declared as requiring an authority.
-    (Brahma Reddy Battula via gera)
-
     HADOOP-11889. Make checkstyle runnable from root project
     HADOOP-11889. Make checkstyle runnable from root project
     (Gera Shegalov via jeagles)
     (Gera Shegalov via jeagles)
 
 
@@ -1020,6 +1046,12 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12164. Fix TestMove and TestFsShellReturnCode failed to get command
     HADOOP-12164. Fix TestMove and TestFsShellReturnCode failed to get command
     name using reflection. (Lei (Eddy) Xu)
     name using reflection. (Lei (Eddy) Xu)
 
 
+    HADOOP-12173. NetworkTopology::add calls toString always.
+    (Inigo Goiri via cdouglas)
+
+    HADOOP-12185. NetworkTopology is not efficient adding/getting/removing
+    nodes. (Inigo Goiri via cdouglas)
+
     HADOOP-12117. Potential NPE from Configuration#loadProperty with
     HADOOP-12117. Potential NPE from Configuration#loadProperty with
     allowNullValueProperties set. (zhihai xu via vinayakumarb)
     allowNullValueProperties set. (zhihai xu via vinayakumarb)
 
 
@@ -1041,6 +1073,19 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12088. KMSClientProvider uses equalsIgnoreCase("application/json").
     HADOOP-12088. KMSClientProvider uses equalsIgnoreCase("application/json").
     (Brahma Reddy Battula via stevel)
     (Brahma Reddy Battula via stevel)
 
 
+    HADOOP-11797. releasedocmaker.py needs to put ASF headers on output (aw)
+
+    HADOOP-12348. MetricsSystemImpl creates MetricsSourceAdapter with wrong
+    time unit parameter. (zxu via rkanter)
+
+    HADOOP-12087. [JDK8] Fix javadoc errors caused by incorrect or illegal
+    tags. (Akira AJISAKA via stevel).
+
+    HADOOP-12386. RetryPolicies.RETRY_FOREVER should be able to specify a 
+    retry interval. (Sunil G via wangda)
+
+  OPTIMIZATIONS
+
     HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()
     HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()
     over getMessage() in logging/span events. (Varun Saxena via stevel)
     over getMessage() in logging/span events. (Varun Saxena via stevel)
 
 
@@ -1088,6 +1133,17 @@ Release 2.8.0 - UNRELEASED
     HADOOP-12362. Set hadoop.tmp.dir and hadoop.log.dir in pom.
     HADOOP-12362. Set hadoop.tmp.dir and hadoop.log.dir in pom.
     (Charlie Helin via wang)
     (Charlie Helin via wang)
 
 
+    HADOOP-10318. Incorrect reference to nodeFile in RumenToSLSConverter
+    error message. (Wei Yan via ozawa)
+
+    HADOOP-12388. Fix components' version information in the web page
+    'About the Cluster'. (Jun Gong via zxu)
+
+    HADOOP-12407. Test failing: hadoop.ipc.TestSaslRPC. (stevel)
+
+    HADOOP-12417. TestWebDelegationToken failing with port in use.
+    (Mingliang Liu via wheat9)
+
 Release 2.7.2 - UNRELEASED
 Release 2.7.2 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -1114,15 +1170,18 @@ Release 2.7.2 - UNRELEASED
     HADOOP-12304. Applications using FileContext fail with the default file
     HADOOP-12304. Applications using FileContext fail with the default file
     system configured to be wasb/s3/etc. (cnauroth)
     system configured to be wasb/s3/etc. (cnauroth)
 
 
-    HADOOP-11932. MetricsSinkAdapter may hang  when being stopped.
-    (Brahma Reddy Battula via jianhe)
-
     HADOOP-12061. Incorrect command in single cluster setup document.
     HADOOP-12061. Incorrect command in single cluster setup document.
     (Kengo Seki via aajisaka)
     (Kengo Seki via aajisaka)
 
 
     HADOOP-12359. hadoop fs -getmerge doc is wrong.
     HADOOP-12359. hadoop fs -getmerge doc is wrong.
     (Jagadesh Kiran N via aajisaka)
     (Jagadesh Kiran N via aajisaka)
 
 
+    HADOOP-10365. BufferedOutputStream in FileUtil#unpackEntries() should be
+    closed in finally block. (Kiran Kumar M R and Sanghyun Yun via ozawa)
+
+    HADOOP-12213. Interrupted exception can occur when Client#stop is called.
+    (Kuhu Shukla via ozawa)
+
 Release 2.7.1 - 2015-07-06 
 Release 2.7.1 - 2015-07-06 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -1141,11 +1200,8 @@ Release 2.7.1 - 2015-07-06
     HADOOP-11868. Invalid user logins trigger large backtraces in server log
     HADOOP-11868. Invalid user logins trigger large backtraces in server log
     (Chang Li via jlowe)
     (Chang Li via jlowe)
 
 
-    HADOOP-11730. Regression: s3n read failure recovery broken.
-    (Takenori Sato via stevel)
-
-    HADOOP-11802. DomainSocketWatcher thread terminates sometimes after there
-    is an I/O error during requestShortCircuitShm (cmccabe)
+    HADOOP-11872. "hadoop dfs" command prints message about using "yarn jar" on
+    Windows(branch-2 only) (Varun Vasudev via cnauroth)
 
 
     HADOOP-11891. OsSecureRandom should lazily fill its reservoir (asuresh)
     HADOOP-11891. OsSecureRandom should lazily fill its reservoir (asuresh)
 
 
@@ -1161,9 +1217,6 @@ Release 2.7.1 - 2015-07-06
     HADOOP-11973. Ensure ZkDelegationTokenSecretManager namespace znodes get
     HADOOP-11973. Ensure ZkDelegationTokenSecretManager namespace znodes get
     created with ACLs. (Gregory Chanan via asuresh)
     created with ACLs. (Gregory Chanan via asuresh)
 
 
-    HADOOP-11934. Use of JavaKeyStoreProvider in LdapGroupsMapping causes
-    infinite loop. (Larry McCay via cnauroth)
-
     HADOOP-12058. Fix dead links to DistCp and Hadoop Archives pages.
     HADOOP-12058. Fix dead links to DistCp and Hadoop Archives pages.
     (Kazuho Fujii via aajisaka)
     (Kazuho Fujii via aajisaka)
 
 
@@ -1240,9 +1293,6 @@ Release 2.7.0 - 2015-04-20
     HADOOP-11301. [optionally] update jmx cache to drop old metrics
     HADOOP-11301. [optionally] update jmx cache to drop old metrics
     (Maysam Yabandeh via stack)
     (Maysam Yabandeh via stack)
 
 
-    HADOOP-11356. Removed deprecated o.a.h.fs.permission.AccessControlException.
-    (Li Lu via wheat9)
-
     HADOOP-11313. Adding a document about NativeLibraryChecker.
     HADOOP-11313. Adding a document about NativeLibraryChecker.
     (Tsuyoshi OZAWA via cnauroth)
     (Tsuyoshi OZAWA via cnauroth)
 
 
@@ -1427,18 +1477,12 @@ Release 2.7.0 - 2015-04-20
     HADOOP-11323. WritableComparator#compare keeps reference to byte array.
     HADOOP-11323. WritableComparator#compare keeps reference to byte array.
     (Wilfred Spiegelenburg via wang)
     (Wilfred Spiegelenburg via wang)
 
 
-    HADOOP-11238. Update the NameNode's Group Cache in the background when
-    possible (Chris Li via Colin P. McCabe)
-
     HADOOP-10809. hadoop-azure: page blob support. (Dexter Bradshaw,
     HADOOP-10809. hadoop-azure: page blob support. (Dexter Bradshaw,
     Mostafa Elhemali, Eric Hanson, and Mike Liddell via cnauroth)
     Mostafa Elhemali, Eric Hanson, and Mike Liddell via cnauroth)
 
 
     HADOOP-11188. hadoop-azure: automatically expand page blobs when they become
     HADOOP-11188. hadoop-azure: automatically expand page blobs when they become
     full. (Eric Hanson via cnauroth)
     full. (Eric Hanson via cnauroth)
 
 
-    HADOOP-11506. Configuration variable expansion regex expensive for long
-    values. (Gera Shegalov via gera)
-
     HADOOP-11620. Add support for load balancing across a group of KMS for HA.
     HADOOP-11620. Add support for load balancing across a group of KMS for HA.
     (Arun Suresh via wang)
     (Arun Suresh via wang)
 
 
@@ -1545,9 +1589,6 @@ Release 2.7.0 - 2015-04-20
     HADOOP-11355. When accessing data in HDFS and the key has been deleted,
     HADOOP-11355. When accessing data in HDFS and the key has been deleted,
     a Null Pointer Exception is shown. (Arun Suresh via wang)
     a Null Pointer Exception is shown. (Arun Suresh via wang)
 
 
-    HADOOP-11343. Overflow is not properly handled in caclulating final iv for
-    AES CTR. (Jerry Chen via wang)
-
     HADOOP-11354. ThrottledInputStream doesn't perform effective throttling.
     HADOOP-11354. ThrottledInputStream doesn't perform effective throttling.
     (Ted Yu via jing9)
     (Ted Yu via jing9)
 
 
@@ -1563,9 +1604,6 @@ Release 2.7.0 - 2015-04-20
     HADOOP-11369. Fix new findbugs warnings in hadoop-mapreduce-client,
     HADOOP-11369. Fix new findbugs warnings in hadoop-mapreduce-client,
     non-core directories. (Li Lu via wheat9)
     non-core directories. (Li Lu via wheat9)
 
 
-    HADOOP-11368. Fix SSLFactory truststore reloader thread leak in
-    KMSClientProvider. (Arun Suresh via wang)
-
     HADOOP-11372. Fix new findbugs warnings in mapreduce-examples.
     HADOOP-11372. Fix new findbugs warnings in mapreduce-examples.
     (Li Lu via wheat9)
     (Li Lu via wheat9)
 
 
@@ -1661,9 +1699,6 @@ Release 2.7.0 - 2015-04-20
     HADOOP-11462. TestSocketIOWithTimeout needs change for PowerPC platform.
     HADOOP-11462. TestSocketIOWithTimeout needs change for PowerPC platform.
     (Ayappan via cnauroth)
     (Ayappan via cnauroth)
 
 
-    HADOOP-11350. The size of header buffer of HttpServer is too small when
-    HTTPS is enabled. (Benoy Antony via wheat9)
-
     HADOOP-10542 Potential null pointer dereference in Jets3tFileSystemStore
     HADOOP-10542 Potential null pointer dereference in Jets3tFileSystemStore
     retrieveBlock(). (Ted Yu via stevel)	
     retrieveBlock(). (Ted Yu via stevel)	
 
 
@@ -1688,9 +1723,6 @@ Release 2.7.0 - 2015-04-20
     HADOOP-11507 Hadoop RPC Authentication problem with different user locale.
     HADOOP-11507 Hadoop RPC Authentication problem with different user locale.
     (Talat UYARER via stevel)
     (Talat UYARER via stevel)
 
 
-    HADOOP-11482. Use correct UGI when KMSClientProvider is called by a proxy
-    user. Contributed by Arun Suresh.
-
     HADOOP-11499. Check of executorThreadsStarted in
     HADOOP-11499. Check of executorThreadsStarted in
     ValueQueue#submitRefillTask() evades lock acquisition (Ted Yu via jlowe)
     ValueQueue#submitRefillTask() evades lock acquisition (Ted Yu via jlowe)
 
 
@@ -1759,9 +1791,6 @@ Release 2.7.0 - 2015-04-20
     HADOOP-11570. S3AInputStream.close() downloads the remaining bytes of
     HADOOP-11570. S3AInputStream.close() downloads the remaining bytes of
     the object from S3. (Dan Hecht via stevel).
     the object from S3. (Dan Hecht via stevel).
 
 
-    HADOOP-11295. RPC Server Reader thread can't shutdown if RPCCallQueue is
-    full. (Ming Ma via kihwal)
-
     HADOOP-11599. Client#getTimeout should use IPC_CLIENT_PING_DEFAULT when 
     HADOOP-11599. Client#getTimeout should use IPC_CLIENT_PING_DEFAULT when 
     IPC_CLIENT_PING_KEY is not configured. (zhihai xu via ozawa)
     IPC_CLIENT_PING_KEY is not configured. (zhihai xu via ozawa)
 
 
@@ -1774,9 +1803,6 @@ Release 2.7.0 - 2015-04-20
     HADOOP-9087. Queue size metric for metric sinks isn't actually maintained
     HADOOP-9087. Queue size metric for metric sinks isn't actually maintained
     (Akira AJISAKA via jlowe)
     (Akira AJISAKA via jlowe)
 
 
-    HADOOP-11604. Prevent ConcurrentModificationException while closing domain
-    sockets during shutdown of DomainSocketWatcher thread. (cnauroth)
-
     HADOOP-11612. Workaround for Curator's ChildReaper requiring Guava 15+.
     HADOOP-11612. Workaround for Curator's ChildReaper requiring Guava 15+.
     (rkanter)
     (rkanter)
 
 
@@ -1810,9 +1836,6 @@ Release 2.7.0 - 2015-04-20
     HADOOP-11605. FilterFileSystem#create with ChecksumOpt should propagate it
     HADOOP-11605. FilterFileSystem#create with ChecksumOpt should propagate it
     to wrapped FS. (gera)
     to wrapped FS. (gera)
 
 
-    HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream
-    should be non static. (Sean Busbey via yliu)
-
     HADOOP-11670. Regression: s3a auth setup broken. (Adam Budde via stevel)
     HADOOP-11670. Regression: s3a auth setup broken. (Adam Budde via stevel)
 
 
     HADOOP-11602. Fix toUpperCase/toLowerCase to use Locale.ENGLISH. (ozawa)
     HADOOP-11602. Fix toUpperCase/toLowerCase to use Locale.ENGLISH. (ozawa)
@@ -1826,9 +1849,6 @@ Release 2.7.0 - 2015-04-20
     HADOOP-11693. Azure Storage FileSystem rename operations are throttled too
     HADOOP-11693. Azure Storage FileSystem rename operations are throttled too
     aggressively to complete HBase WAL archiving. (Duo Xu via cnauroth)
     aggressively to complete HBase WAL archiving. (Duo Xu via cnauroth)
 
 
-    HADOOP-11710. Make CryptoOutputStream behave like DFSOutputStream wrt
-    synchronization. (Sean Busbey via yliu)
-
     HADOOP-11558. Fix dead links to doc of hadoop-tools. (Jean-Pierre 
     HADOOP-11558. Fix dead links to doc of hadoop-tools. (Jean-Pierre 
     Matsumoto via ozawa)
     Matsumoto via ozawa)
 
 
@@ -1904,7 +1924,7 @@ Release 2.6.2 - UNRELEASED
 
 
   BUG FIXES
   BUG FIXES
 
 
-Release 2.6.1 - UNRELEASED
+Release 2.6.1 - 2015-09-09
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
 
 
@@ -1915,11 +1935,20 @@ Release 2.6.1 - UNRELEASED
     HADOOP-7139. Allow appending to existing SequenceFiles
     HADOOP-7139. Allow appending to existing SequenceFiles
     (kanaka kumar avvaru via vinayakumarb)
     (kanaka kumar avvaru via vinayakumarb)
 
 
+    HADOOP-12280. Skip unit tests based on maven profile rather than
+    NativeCodeLoader.isNativeCodeLoaded (Masatake Iwasaki via Colin P. McCabe)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
+    HADOOP-11238. Update the NameNode's Group Cache in the background when
+    possible (Chris Li via Colin P. McCabe)
+
+    HADOOP-11506. Configuration variable expansion regex expensive for long
+    values. (Gera Shegalov via gera)
+
   BUG FIXES
   BUG FIXES
 
 
-    HADOOP-11466. FastByteComparisons: do not use UNSAFE_COMPARER on the SPARC
+    HADOOP-11466: FastByteComparisons: do not use UNSAFE_COMPARER on the SPARC
     architecture because it is slower there (Suman Somasundar via Colin P.
     architecture because it is slower there (Suman Somasundar via Colin P.
     McCabe)
     McCabe)
 
 
@@ -1928,6 +1957,51 @@ Release 2.6.1 - UNRELEASED
     HADOOP-11333. Fix deadlock in DomainSocketWatcher when the notification
     HADOOP-11333. Fix deadlock in DomainSocketWatcher when the notification
     pipe is full (zhaoyunjiong via cmccabe)
     pipe is full (zhaoyunjiong via cmccabe)
 
 
+    HADOOP-11343. Overflow is not properly handled in caclulating final iv for
+    AES CTR. (Jerry Chen via wang)
+
+    HADOOP-11368. Fix SSLFactory truststore reloader thread leak in
+    KMSClientProvider. (Arun Suresh via wang)
+
+    HADOOP-11350. The size of header buffer of HttpServer is too small when
+    HTTPS is enabled. (Benoy Antony via wheat9)
+
+    HADOOP-11482. Use correct UGI when KMSClientProvider is called by a proxy
+    user. Contributed by Arun Suresh.
+
+    HADOOP-11295. RPC Server Reader thread can't shutdown if RPCCallQueue is
+    full. (Ming Ma via kihwal)
+
+    HADOOP-11604. Prevent ConcurrentModificationException while closing domain
+    sockets during shutdown of DomainSocketWatcher thread. (cnauroth)
+
+    HADOOP-11674. oneByteBuf in CryptoInputStream and CryptoOutputStream
+    should be non static. (Sean Busbey via yliu)
+
+    HADOOP-11710. Make CryptoOutputStream behave like DFSOutputStream wrt
+    synchronization. (Sean Busbey via yliu)
+
+    HADOOP-11812. Implement listLocatedStatus for ViewFileSystem to speed up
+    split calculation (gera)
+
+    HADOOP-11730. Regression: s3n read failure recovery broken.
+    (Takenori Sato via stevel)
+
+    HADOOP-11802: DomainSocketWatcher thread terminates sometimes after there
+    is an I/O error during requestShortCircuitShm (cmccabe)
+
+    HADOOP-11491. HarFs incorrectly declared as requiring an authority.
+    (Brahma Reddy Battula via gera)
+
+    HADOOP-8151. Error handling in snappy decompressor throws invalid
+    exceptions. (Matt Foley via harsh)
+
+    HADOOP-11932. MetricsSinkAdapter may hang  when being stopped.
+    (Brahma Reddy Battula via jianhe)
+
+    HADOOP-11934. Use of JavaKeyStoreProvider in LdapGroupsMapping causes
+    infinite loop. (Larry McCay via cnauroth)
+
 Release 2.6.0 - 2014-11-18
 Release 2.6.0 - 2014-11-18
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -2011,7 +2085,7 @@ Release 2.6.0 - 2014-11-18
     HADOOP-10281. Create a scheduler, which assigns schedulables a priority
     HADOOP-10281. Create a scheduler, which assigns schedulables a priority
     level. (Chris Li via Arpit Agarwal)
     level. (Chris Li via Arpit Agarwal)
 
 
-    HADOOP-8944. Shell command fs -count should include human readable option 
+    HADOOP-8944. Shell command fs -count should include human readable option
     (Jonathan Allen via aw)
     (Jonathan Allen via aw)
 
 
     HADOOP-10231. Add some components in Native Libraries document (Akira 
     HADOOP-10231. Add some components in Native Libraries document (Akira 
@@ -5969,7 +6043,6 @@ Release 0.23.1 - 2012-02-17
     HADOOP-7792. Add verifyToken method to AbstractDelegationTokenSecretManager.
     HADOOP-7792. Add verifyToken method to AbstractDelegationTokenSecretManager.
     (jitendra)
     (jitendra)
 
 
-
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES

+ 16 - 1
hadoop-common-project/hadoop-common/src/main/conf/log4j.properties

@@ -152,7 +152,8 @@ log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
 # NameNode metrics logging.
 # NameNode metrics logging.
 # The default is to retain two namenode-metrics.log files up to 64MB each.
 # The default is to retain two namenode-metrics.log files up to 64MB each.
 #
 #
-log4j.logger.NameNodeMetricsLog=INFO,NNMETRICSRFA
+namenode.metrics.logger=INFO,NullAppender
+log4j.logger.NameNodeMetricsLog=${namenode.metrics.logger}
 log4j.additivity.NameNodeMetricsLog=false
 log4j.additivity.NameNodeMetricsLog=false
 log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender
 log4j.appender.NNMETRICSRFA=org.apache.log4j.RollingFileAppender
 log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log
 log4j.appender.NNMETRICSRFA.File=${hadoop.log.dir}/namenode-metrics.log
@@ -161,6 +162,20 @@ log4j.appender.NNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
 log4j.appender.NNMETRICSRFA.MaxBackupIndex=1
 log4j.appender.NNMETRICSRFA.MaxBackupIndex=1
 log4j.appender.NNMETRICSRFA.MaxFileSize=64MB
 log4j.appender.NNMETRICSRFA.MaxFileSize=64MB
 
 
+#
+# DataNode metrics logging.
+# The default is to retain two datanode-metrics.log files up to 64MB each.
+#
+datanode.metrics.logger=INFO,NullAppender
+log4j.logger.DataNodeMetricsLog=${datanode.metrics.logger}
+log4j.additivity.DataNodeMetricsLog=false
+log4j.appender.DNMETRICSRFA=org.apache.log4j.RollingFileAppender
+log4j.appender.DNMETRICSRFA.File=${hadoop.log.dir}/datanode-metrics.log
+log4j.appender.DNMETRICSRFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.DNMETRICSRFA.layout.ConversionPattern=%d{ISO8601} %m%n
+log4j.appender.DNMETRICSRFA.MaxBackupIndex=1
+log4j.appender.DNMETRICSRFA.MaxFileSize=64MB
+
 #
 #
 # mapred audit logging
 # mapred audit logging
 #
 #

+ 10 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -34,7 +34,9 @@ import java.io.Reader;
 import java.io.Writer;
 import java.io.Writer;
 import java.lang.ref.WeakReference;
 import java.lang.ref.WeakReference;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
+import java.net.JarURLConnection;
 import java.net.URL;
 import java.net.URL;
+import java.net.URLConnection;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collection;
@@ -2531,7 +2533,14 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     if (url == null) {
     if (url == null) {
       return null;
       return null;
     }
     }
-    return parse(builder, url.openStream(), url.toString());
+
+    URLConnection connection = url.openConnection();
+    if (connection instanceof JarURLConnection) {
+      // Disable caching for JarURLConnection to avoid sharing JarFile
+      // with other users.
+      connection.setUseCaches(false);
+    }
+    return parse(builder, connection.getInputStream(), url.toString());
   }
   }
 
 
   private Document parse(DocumentBuilder builder, InputStream is,
   private Document parse(DocumentBuilder builder, InputStream is,

+ 6 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

@@ -381,5 +381,11 @@ public class CommonConfigurationKeysPublic {
       "hadoop.shell.missing.defaultFs.warning";
       "hadoop.shell.missing.defaultFs.warning";
   public static final boolean HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_DEFAULT =
   public static final boolean HADOOP_SHELL_MISSING_DEFAULT_FS_WARNING_DEFAULT =
       false;
       false;
+
+  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
+  public static final String HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES =
+      "hadoop.shell.safely.delete.limit.num.files";
+  public static final long HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES_DEFAULT =
+      100;
 }
 }
 
 

+ 7 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java

@@ -742,15 +742,15 @@ public class FileUtil {
 
 
     int count;
     int count;
     byte data[] = new byte[2048];
     byte data[] = new byte[2048];
-    BufferedOutputStream outputStream = new BufferedOutputStream(
-        new FileOutputStream(outputFile));
+    try (BufferedOutputStream outputStream = new BufferedOutputStream(
+        new FileOutputStream(outputFile));) {
 
 
-    while ((count = tis.read(data)) != -1) {
-      outputStream.write(data, 0, count);
-    }
+      while ((count = tis.read(data)) != -1) {
+        outputStream.write(data, 0, count);
+      }
 
 
-    outputStream.flush();
-    outputStream.close();
+      outputStream.flush();
+    }
   }
   }
 
 
   /**
   /**

+ 1 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java

@@ -94,8 +94,7 @@ public class Trash extends Configured {
     Trash trash = new Trash(fullyResolvedFs, conf);
     Trash trash = new Trash(fullyResolvedFs, conf);
     boolean success = trash.moveToTrash(fullyResolvedPath);
     boolean success = trash.moveToTrash(fullyResolvedPath);
     if (success) {
     if (success) {
-      System.out.println("Moved: '" + p + "' to trash at: " +
-          trash.getCurrentTrashDir() );
+      LOG.info("Moved: '" + p + "' to trash at: " + trash.getCurrentTrashDir());
     }
     }
     return success;
     return success;
   }
   }

+ 23 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java

@@ -61,7 +61,8 @@ abstract class CommandWithDestination extends FsCommand {
   private boolean verifyChecksum = true;
   private boolean verifyChecksum = true;
   private boolean writeChecksum = true;
   private boolean writeChecksum = true;
   private boolean lazyPersist = false;
   private boolean lazyPersist = false;
-  
+  private boolean direct = false;
+
   /**
   /**
    * The name of the raw xattr namespace. It would be nice to use
    * The name of the raw xattr namespace. It would be nice to use
    * XAttr.RAW.name() but we can't reference the hadoop-hdfs project.
    * XAttr.RAW.name() but we can't reference the hadoop-hdfs project.
@@ -94,7 +95,11 @@ abstract class CommandWithDestination extends FsCommand {
   protected void setWriteChecksum(boolean flag) {
   protected void setWriteChecksum(boolean flag) {
     writeChecksum = flag;
     writeChecksum = flag;
   }
   }
-  
+
+  protected void setDirectWrite(boolean flag) {
+    direct = flag;
+  }
+
   /**
   /**
    * If true, the last modified time, last access time,
    * If true, the last modified time, last access time,
    * owner, group and permission information of the source
    * owner, group and permission information of the source
@@ -372,9 +377,11 @@ abstract class CommandWithDestination extends FsCommand {
   }
   }
 
 
   /**
   /**
-   * Copies the stream contents to a temporary file.  If the copy is
+   * If direct write is disabled ,copies the stream contents to a temporary
+   * file "<target>._COPYING_". If the copy is
    * successful, the temporary file will be renamed to the real path,
    * successful, the temporary file will be renamed to the real path,
    * else the temporary file will be deleted.
    * else the temporary file will be deleted.
+   * if direct write is enabled , then creation temporary file is skipped.
    * @param in the input stream for the copy
    * @param in the input stream for the copy
    * @param target where to store the contents of the stream
    * @param target where to store the contents of the stream
    * @throws IOException if copy fails
    * @throws IOException if copy fails
@@ -386,10 +393,12 @@ abstract class CommandWithDestination extends FsCommand {
     }
     }
     TargetFileSystem targetFs = new TargetFileSystem(target.fs);
     TargetFileSystem targetFs = new TargetFileSystem(target.fs);
     try {
     try {
-      PathData tempTarget = target.suffix("._COPYING_");
+      PathData tempTarget = direct ? target : target.suffix("._COPYING_");
       targetFs.setWriteChecksum(writeChecksum);
       targetFs.setWriteChecksum(writeChecksum);
-      targetFs.writeStreamToFile(in, tempTarget, lazyPersist);
-      targetFs.rename(tempTarget, target);
+      targetFs.writeStreamToFile(in, tempTarget, lazyPersist, direct);
+      if (!direct) {
+        targetFs.rename(tempTarget, target);
+      }
     } finally {
     } finally {
       targetFs.close(); // last ditch effort to ensure temp file is removed
       targetFs.close(); // last ditch effort to ensure temp file is removed
     }
     }
@@ -459,10 +468,11 @@ abstract class CommandWithDestination extends FsCommand {
     }
     }
 
 
     void writeStreamToFile(InputStream in, PathData target,
     void writeStreamToFile(InputStream in, PathData target,
-                           boolean lazyPersist) throws IOException {
+        boolean lazyPersist, boolean direct)
+        throws IOException {
       FSDataOutputStream out = null;
       FSDataOutputStream out = null;
       try {
       try {
-        out = create(target, lazyPersist);
+        out = create(target, lazyPersist, direct);
         IOUtils.copyBytes(in, out, getConf(), true);
         IOUtils.copyBytes(in, out, getConf(), true);
       } finally {
       } finally {
         IOUtils.closeStream(out); // just in case copyBytes didn't
         IOUtils.closeStream(out); // just in case copyBytes didn't
@@ -470,7 +480,8 @@ abstract class CommandWithDestination extends FsCommand {
     }
     }
     
     
     // tag created files as temp files
     // tag created files as temp files
-    FSDataOutputStream create(PathData item, boolean lazyPersist)
+    FSDataOutputStream create(PathData item, boolean lazyPersist,
+        boolean direct)
         throws IOException {
         throws IOException {
       try {
       try {
         if (lazyPersist) {
         if (lazyPersist) {
@@ -488,7 +499,9 @@ abstract class CommandWithDestination extends FsCommand {
           return create(item.path, true);
           return create(item.path, true);
         }
         }
       } finally { // might have been created but stream was interrupted
       } finally { // might have been created but stream was interrupted
-        deleteOnExit(item.path);
+        if (!direct) {
+          deleteOnExit(item.path);
+        }
       }
       }
     }
     }
 
 

+ 13 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java

@@ -133,7 +133,8 @@ class CopyCommands {
 
 
   static class Cp extends CommandWithDestination {
   static class Cp extends CommandWithDestination {
     public static final String NAME = "cp";
     public static final String NAME = "cp";
-    public static final String USAGE = "[-f] [-p | -p[topax]] <src> ... <dst>";
+    public static final String USAGE =
+        "[-f] [-p | -p[topax]] [-d] <src> ... <dst>";
     public static final String DESCRIPTION =
     public static final String DESCRIPTION =
       "Copy files that match the file pattern <src> to a " +
       "Copy files that match the file pattern <src> to a " +
       "destination.  When copying multiple files, the destination " +
       "destination.  When copying multiple files, the destination " +
@@ -147,13 +148,15 @@ class CopyCommands {
       "if (1) they are supported (HDFS only) and, (2) all of the source and " +
       "if (1) they are supported (HDFS only) and, (2) all of the source and " +
       "target pathnames are in the /.reserved/raw hierarchy. raw namespace " +
       "target pathnames are in the /.reserved/raw hierarchy. raw namespace " +
       "xattr preservation is determined solely by the presence (or absence) " +
       "xattr preservation is determined solely by the presence (or absence) " +
-      "of the /.reserved/raw prefix and not by the -p option.\n";
+        "of the /.reserved/raw prefix and not by the -p option. Passing -d "+
+        "will skip creation of temporary file(<dst>._COPYING_).\n";
 
 
     @Override
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
     protected void processOptions(LinkedList<String> args) throws IOException {
       popPreserveOption(args);
       popPreserveOption(args);
-      CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "f");
+      CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "f", "d");
       cf.parse(args);
       cf.parse(args);
+      setDirectWrite(cf.getOpt("d"));
       setOverwrite(cf.getOpt("f"));
       setOverwrite(cf.getOpt("f"));
       // should have a -r option
       // should have a -r option
       setRecursive(true);
       setRecursive(true);
@@ -215,7 +218,8 @@ class CopyCommands {
    */
    */
   public static class Put extends CommandWithDestination {
   public static class Put extends CommandWithDestination {
     public static final String NAME = "put";
     public static final String NAME = "put";
-    public static final String USAGE = "[-f] [-p] [-l] <localsrc> ... <dst>";
+    public static final String USAGE =
+        "[-f] [-p] [-l] [-d] <localsrc> ... <dst>";
     public static final String DESCRIPTION =
     public static final String DESCRIPTION =
       "Copy files from the local file system " +
       "Copy files from the local file system " +
       "into fs. Copying fails if the file already " +
       "into fs. Copying fails if the file already " +
@@ -225,15 +229,18 @@ class CopyCommands {
       "  -f : Overwrites the destination if it already exists.\n" +
       "  -f : Overwrites the destination if it already exists.\n" +
       "  -l : Allow DataNode to lazily persist the file to disk. Forces\n" +
       "  -l : Allow DataNode to lazily persist the file to disk. Forces\n" +
       "       replication factor of 1. This flag will result in reduced\n" +
       "       replication factor of 1. This flag will result in reduced\n" +
-      "       durability. Use with care.\n";
+      "       durability. Use with care.\n" +
+        "  -d : Skip creation of temporary file(<dst>._COPYING_).\n";
 
 
     @Override
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
     protected void processOptions(LinkedList<String> args) throws IOException {
-      CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE, "f", "p", "l");
+      CommandFormat cf =
+          new CommandFormat(1, Integer.MAX_VALUE, "f", "p", "l", "d");
       cf.parse(args);
       cf.parse(args);
       setOverwrite(cf.getOpt("f"));
       setOverwrite(cf.getOpt("f"));
       setPreserve(cf.getOpt("p"));
       setPreserve(cf.getOpt("p"));
       setLazyPersist(cf.getOpt("l"));
       setLazyPersist(cf.getOpt("l"));
+      setDirectWrite(cf.getOpt("d"));
       getRemoteDestination(args);
       getRemoteDestination(args);
       // should have a -r option
       // should have a -r option
       setRecursive(true);
       setRecursive(true);

+ 53 - 15
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java

@@ -25,6 +25,7 @@ import java.util.List;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.PathIOException;
 import org.apache.hadoop.fs.PathIOException;
 import org.apache.hadoop.fs.PathIsDirectoryException;
 import org.apache.hadoop.fs.PathIsDirectoryException;
@@ -32,9 +33,13 @@ import org.apache.hadoop.fs.PathIsNotDirectoryException;
 import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
 import org.apache.hadoop.fs.PathNotFoundException;
 import org.apache.hadoop.fs.PathNotFoundException;
 import org.apache.hadoop.fs.Trash;
 import org.apache.hadoop.fs.Trash;
+import org.apache.hadoop.util.ToolRunner;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES_DEFAULT;
 
 
 /**
 /**
- * Classes that delete paths
+ * Classes that delete paths.
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
@@ -50,28 +55,36 @@ class Delete {
   /** remove non-directory paths */
   /** remove non-directory paths */
   public static class Rm extends FsCommand {
   public static class Rm extends FsCommand {
     public static final String NAME = "rm";
     public static final String NAME = "rm";
-    public static final String USAGE = "[-f] [-r|-R] [-skipTrash] <src> ...";
+    public static final String USAGE = "[-f] [-r|-R] [-skipTrash] " +
+        "[-safely] <src> ...";
     public static final String DESCRIPTION =
     public static final String DESCRIPTION =
-      "Delete all files that match the specified file pattern. " +
-      "Equivalent to the Unix command \"rm <src>\"\n" +
-      "-skipTrash: option bypasses trash, if enabled, and immediately " +
-      "deletes <src>\n" +
-      "-f: If the file does not exist, do not display a diagnostic " +
-      "message or modify the exit status to reflect an error.\n" +
-      "-[rR]:  Recursively deletes directories";
+        "Delete all files that match the specified file pattern. " +
+            "Equivalent to the Unix command \"rm <src>\"\n" +
+            "-f: If the file does not exist, do not display a diagnostic " +
+            "message or modify the exit status to reflect an error.\n" +
+            "-[rR]:  Recursively deletes directories.\n" +
+            "-skipTrash: option bypasses trash, if enabled, and immediately " +
+            "deletes <src>.\n" +
+            "-safely: option requires safety confirmation,if enabled, " +
+            "requires confirmation before deleting large directory with more " +
+            "than <hadoop.shell.delete.limit.num.files> files. Delay is " +
+            "expected when walking over large directory recursively to count " +
+            "the number of files to be deleted before the confirmation.\n";
 
 
     private boolean skipTrash = false;
     private boolean skipTrash = false;
     private boolean deleteDirs = false;
     private boolean deleteDirs = false;
     private boolean ignoreFNF = false;
     private boolean ignoreFNF = false;
-    
+    private boolean safeDelete = false;
+
     @Override
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
     protected void processOptions(LinkedList<String> args) throws IOException {
       CommandFormat cf = new CommandFormat(
       CommandFormat cf = new CommandFormat(
-          1, Integer.MAX_VALUE, "f", "r", "R", "skipTrash");
+          1, Integer.MAX_VALUE, "f", "r", "R", "skipTrash", "safely");
       cf.parse(args);
       cf.parse(args);
       ignoreFNF = cf.getOpt("f");
       ignoreFNF = cf.getOpt("f");
       deleteDirs = cf.getOpt("r") || cf.getOpt("R");
       deleteDirs = cf.getOpt("r") || cf.getOpt("R");
       skipTrash = cf.getOpt("skipTrash");
       skipTrash = cf.getOpt("skipTrash");
+      safeDelete = cf.getOpt("safely");
     }
     }
 
 
     @Override
     @Override
@@ -102,7 +115,7 @@ class Delete {
       // problem (ie. creating the trash dir, moving the item to be deleted,
       // problem (ie. creating the trash dir, moving the item to be deleted,
       // etc), then the path will just be deleted because moveToTrash returns
       // etc), then the path will just be deleted because moveToTrash returns
       // false and it falls thru to fs.delete.  this doesn't seem right
       // false and it falls thru to fs.delete.  this doesn't seem right
-      if (moveToTrash(item)) {
+      if (moveToTrash(item) || !canBeSafelyDeleted(item)) {
         return;
         return;
       }
       }
       if (!item.fs.delete(item.path, deleteDirs)) {
       if (!item.fs.delete(item.path, deleteDirs)) {
@@ -111,6 +124,28 @@ class Delete {
       out.println("Deleted " + item);
       out.println("Deleted " + item);
     }
     }
 
 
+    private boolean canBeSafelyDeleted(PathData item)
+        throws IOException {
+      boolean shouldDelete = true;
+      if (safeDelete) {
+        final long deleteLimit = getConf().getLong(
+            HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES,
+            HADOOP_SHELL_SAFELY_DELETE_LIMIT_NUM_FILES_DEFAULT);
+        if (deleteLimit > 0) {
+          ContentSummary cs = item.fs.getContentSummary(item.path);
+          final long numFiles = cs.getFileCount();
+          if (numFiles > deleteLimit) {
+            if (!ToolRunner.confirmPrompt("Proceed deleting " + numFiles +
+                " files?")) {
+              System.err.println("Delete aborted at user request.\n");
+              shouldDelete = false;
+            }
+          }
+        }
+      }
+      return shouldDelete;
+    }
+
     private boolean moveToTrash(PathData item) throws IOException {
     private boolean moveToTrash(PathData item) throws IOException {
       boolean success = false;
       boolean success = false;
       if (!skipTrash) {
       if (!skipTrash) {
@@ -122,7 +157,7 @@ class Delete {
           String msg = ioe.getMessage();
           String msg = ioe.getMessage();
           if (ioe.getCause() != null) {
           if (ioe.getCause() != null) {
             msg += ": " + ioe.getCause().getMessage();
             msg += ": " + ioe.getCause().getMessage();
-	  }
+          }
           throw new IOException(msg + ". Consider using -skipTrash option", ioe);
           throw new IOException(msg + ". Consider using -skipTrash option", ioe);
         }
         }
       }
       }
@@ -180,11 +215,14 @@ class Delete {
     }
     }
   }
   }
 
 
-  /** empty the trash */
+  // delete files from the trash that are older
+  // than the retention threshold.
   static class Expunge extends FsCommand {
   static class Expunge extends FsCommand {
     public static final String NAME = "expunge";
     public static final String NAME = "expunge";
     public static final String USAGE = "";
     public static final String USAGE = "";
-    public static final String DESCRIPTION = "Empty the Trash";
+    public static final String DESCRIPTION =
+        "Delete files from the trash that are older " +
+            "than the retention threshold";
 
 
     // TODO: should probably allow path arguments for the filesystems
     // TODO: should probably allow path arguments for the filesystems
     @Override
     @Override

+ 4 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java

@@ -1141,4 +1141,8 @@ public class ActiveStandbyElector implements StatCallback, StringCallback {
       ((appData == null) ? "null" : StringUtils.byteToHexString(appData)) + 
       ((appData == null) ? "null" : StringUtils.byteToHexString(appData)) + 
       " cb=" + appClient;
       " cb=" + appClient;
   }
   }
+
+  public String getHAZookeeperConnectionState() {
+    return this.zkConnectionState.name();
+  }
 }
 }

+ 12 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java

@@ -63,6 +63,17 @@ public class RetryPolicies {
    */
    */
   public static final RetryPolicy RETRY_FOREVER = new RetryForever();
   public static final RetryPolicy RETRY_FOREVER = new RetryForever();
 
 
+  /**
+   * <p>
+   * Keep trying forever with a fixed time between attempts.
+   * </p>
+   */
+  public static final RetryPolicy retryForeverWithFixedSleep(long sleepTime,
+      TimeUnit timeUnit) {
+    return new RetryUpToMaximumCountWithFixedSleep(Integer.MAX_VALUE,
+        sleepTime, timeUnit);
+  }
+
   /**
   /**
    * <p>
    * <p>
    * Keep trying a limited number of times, waiting a fixed time between attempts,
    * Keep trying a limited number of times, waiting a fixed time between attempts,
@@ -151,7 +162,7 @@ public class RetryPolicies {
     return new FailoverOnNetworkExceptionRetry(fallbackPolicy, maxFailovers,
     return new FailoverOnNetworkExceptionRetry(fallbackPolicy, maxFailovers,
         maxRetries, delayMillis, maxDelayBase);
         maxRetries, delayMillis, maxDelayBase);
   }
   }
-  
+
   static class TryOnceThenFail implements RetryPolicy {
   static class TryOnceThenFail implements RetryPolicy {
     @Override
     @Override
     public RetryAction shouldRetry(Exception e, int retries, int failovers,
     public RetryAction shouldRetry(Exception e, int retries, int failovers,

+ 7 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java

@@ -197,9 +197,10 @@ public class Client {
             clientExecutor.shutdownNow();
             clientExecutor.shutdownNow();
           }
           }
         } catch (InterruptedException e) {
         } catch (InterruptedException e) {
-          LOG.error("Interrupted while waiting for clientExecutor" +
-              "to stop", e);
+          LOG.warn("Interrupted while waiting for clientExecutor" +
+              " to stop");
           clientExecutor.shutdownNow();
           clientExecutor.shutdownNow();
+          Thread.currentThread().interrupt();
         }
         }
         clientExecutor = null;
         clientExecutor = null;
       }
       }
@@ -256,6 +257,10 @@ public class Client {
     conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_KEY, timeout);
     conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_KEY, timeout);
   }
   }
 
 
+  @VisibleForTesting
+  public static final ExecutorService getClientExecutor() {
+    return Client.clientExcecutorFactory.clientExecutor;
+  }
   /**
   /**
    * Increment this client's reference count
    * Increment this client's reference count
    *
    *

+ 3 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java

@@ -58,6 +58,9 @@ class MetricsConfig extends SubsetConfiguration {
   static final String PERIOD_KEY = "period";
   static final String PERIOD_KEY = "period";
   static final int PERIOD_DEFAULT = 10; // seconds
   static final int PERIOD_DEFAULT = 10; // seconds
 
 
+  // For testing, this will have the priority.
+  static final String PERIOD_MILLIS_KEY = "periodMillis";
+
   static final String QUEUE_CAPACITY_KEY = "queue.capacity";
   static final String QUEUE_CAPACITY_KEY = "queue.capacity";
   static final int QUEUE_CAPACITY_DEFAULT = 1;
   static final int QUEUE_CAPACITY_DEFAULT = 1;
 
 

+ 8 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java

@@ -61,7 +61,7 @@ class MetricsSourceAdapter implements DynamicMBean {
 
 
   private Iterable<MetricsRecordImpl> lastRecs;
   private Iterable<MetricsRecordImpl> lastRecs;
   private long jmxCacheTS = 0;
   private long jmxCacheTS = 0;
-  private int jmxCacheTTL;
+  private long jmxCacheTTL;
   private MBeanInfo infoCache;
   private MBeanInfo infoCache;
   private ObjectName mbeanName;
   private ObjectName mbeanName;
   private final boolean startMBeans;
   private final boolean startMBeans;
@@ -69,7 +69,7 @@ class MetricsSourceAdapter implements DynamicMBean {
   MetricsSourceAdapter(String prefix, String name, String description,
   MetricsSourceAdapter(String prefix, String name, String description,
                        MetricsSource source, Iterable<MetricsTag> injectedTags,
                        MetricsSource source, Iterable<MetricsTag> injectedTags,
                        MetricsFilter recordFilter, MetricsFilter metricFilter,
                        MetricsFilter recordFilter, MetricsFilter metricFilter,
-                       int jmxCacheTTL, boolean startMBeans) {
+                       long jmxCacheTTL, boolean startMBeans) {
     this.prefix = checkNotNull(prefix, "prefix");
     this.prefix = checkNotNull(prefix, "prefix");
     this.name = checkNotNull(name, "name");
     this.name = checkNotNull(name, "name");
     this.source = checkNotNull(source, "source");
     this.source = checkNotNull(source, "source");
@@ -84,7 +84,7 @@ class MetricsSourceAdapter implements DynamicMBean {
 
 
   MetricsSourceAdapter(String prefix, String name, String description,
   MetricsSourceAdapter(String prefix, String name, String description,
                        MetricsSource source, Iterable<MetricsTag> injectedTags,
                        MetricsSource source, Iterable<MetricsTag> injectedTags,
-                       int period, MetricsConfig conf) {
+                       long period, MetricsConfig conf) {
     this(prefix, name, description, source, injectedTags,
     this(prefix, name, description, source, injectedTags,
          conf.getFilter(RECORD_FILTER_KEY),
          conf.getFilter(RECORD_FILTER_KEY),
          conf.getFilter(METRIC_FILTER_KEY),
          conf.getFilter(METRIC_FILTER_KEY),
@@ -229,7 +229,11 @@ class MetricsSourceAdapter implements DynamicMBean {
     return mbeanName;
     return mbeanName;
   }
   }
 
 
-  
+  @VisibleForTesting
+  long getJmxCacheTTL() {
+    return jmxCacheTTL;
+  }
+
   private void updateInfoCache() {
   private void updateInfoCache() {
     LOG.debug("Updating info cache...");
     LOG.debug("Updating info cache...");
     infoCache = infoBuilder.reset(lastRecs).get();
     infoCache = infoBuilder.reset(lastRecs).get();

+ 12 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java

@@ -105,7 +105,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
   private Map<String, MetricsConfig> sourceConfigs, sinkConfigs;
   private Map<String, MetricsConfig> sourceConfigs, sinkConfigs;
   private boolean monitoring = false;
   private boolean monitoring = false;
   private Timer timer;
   private Timer timer;
-  private int period; // seconds
+  private long period; // milliseconds
   private long logicalTime; // number of timer invocations * period
   private long logicalTime; // number of timer invocations * period
   private ObjectName mbeanName;
   private ObjectName mbeanName;
   private boolean publishSelfMetrics = true;
   private boolean publishSelfMetrics = true;
@@ -359,7 +359,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
       return;
       return;
     }
     }
     logicalTime = 0;
     logicalTime = 0;
-    long millis = period * 1000;
+    long millis = period;
     timer = new Timer("Timer for '"+ prefix +"' metrics system", true);
     timer = new Timer("Timer for '"+ prefix +"' metrics system", true);
     timer.scheduleAtFixedRate(new TimerTask() {
     timer.scheduleAtFixedRate(new TimerTask() {
           @Override
           @Override
@@ -371,7 +371,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
             }
             }
           }
           }
         }, millis, millis);
         }, millis, millis);
-    LOG.info("Scheduled snapshot period at "+ period +" second(s).");
+    LOG.info("Scheduled snapshot period at "+ (period/1000) +" second(s).");
   }
   }
 
 
   synchronized void onTimerEvent() {
   synchronized void onTimerEvent() {
@@ -485,12 +485,15 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
 
 
   private synchronized void configureSinks() {
   private synchronized void configureSinks() {
     sinkConfigs = config.getInstanceConfigs(SINK_KEY);
     sinkConfigs = config.getInstanceConfigs(SINK_KEY);
-    int confPeriod = 0;
+    long confPeriodMillis = 0;
     for (Entry<String, MetricsConfig> entry : sinkConfigs.entrySet()) {
     for (Entry<String, MetricsConfig> entry : sinkConfigs.entrySet()) {
       MetricsConfig conf = entry.getValue();
       MetricsConfig conf = entry.getValue();
       int sinkPeriod = conf.getInt(PERIOD_KEY, PERIOD_DEFAULT);
       int sinkPeriod = conf.getInt(PERIOD_KEY, PERIOD_DEFAULT);
-      confPeriod = confPeriod == 0 ? sinkPeriod
-                                   : ArithmeticUtils.gcd(confPeriod, sinkPeriod);
+      // Support configuring periodMillis for testing.
+      long sinkPeriodMillis =
+          conf.getLong(PERIOD_MILLIS_KEY, sinkPeriod * 1000);
+      confPeriodMillis = confPeriodMillis == 0 ? sinkPeriodMillis
+          : ArithmeticUtils.gcd(confPeriodMillis, sinkPeriodMillis);
       String clsName = conf.getClassName("");
       String clsName = conf.getClassName("");
       if (clsName == null) continue;  // sink can be registered later on
       if (clsName == null) continue;  // sink can be registered later on
       String sinkName = entry.getKey();
       String sinkName = entry.getKey();
@@ -503,8 +506,9 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource {
         LOG.warn("Error creating sink '"+ sinkName +"'", e);
         LOG.warn("Error creating sink '"+ sinkName +"'", e);
       }
       }
     }
     }
-    period = confPeriod > 0 ? confPeriod
-                            : config.getInt(PERIOD_KEY, PERIOD_DEFAULT);
+    long periodSec = config.getInt(PERIOD_KEY, PERIOD_DEFAULT);
+    period = confPeriodMillis > 0 ? confPeriodMillis
+        : config.getLong(PERIOD_MILLIS_KEY, periodSec * 1000);
   }
   }
 
 
   static MetricsSinkAdapter newSink(String name, String desc, MetricsSink sink,
   static MetricsSinkAdapter newSink(String name, String desc, MetricsSink sink,

+ 3 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/package-info.java

@@ -74,7 +74,9 @@
     </dd>
     </dd>
     <dt> <code>org.apache.hadoop.metrics2.sink</code></dt>
     <dt> <code>org.apache.hadoop.metrics2.sink</code></dt>
     <dd>Builtin metrics sink implementations including the
     <dd>Builtin metrics sink implementations including the
-      {@link org.apache.hadoop.metrics2.sink.FileSink}.
+      {@link org.apache.hadoop.metrics2.sink.FileSink},
+      {@link org.apache.hadoop.metrics2.sink.GraphiteSink}, and
+      {@link org.apache.hadoop.metrics2.sink.StatsDSink}.
     </dd>
     </dd>
     <dt> <code>org.apache.hadoop.metrics2.util</code></dt>
     <dt> <code>org.apache.hadoop.metrics2.util</code></dt>
     <dd>General utilities for implementing metrics sinks etc., including the
     <dd>General utilities for implementing metrics sinks etc., including the

+ 218 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/StatsDSink.java

@@ -0,0 +1,218 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.sink;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.DatagramPacket;
+import java.net.DatagramSocket;
+import java.net.InetSocketAddress;
+import java.nio.charset.StandardCharsets;
+
+import org.apache.commons.configuration.SubsetConfiguration;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.metrics2.AbstractMetric;
+import org.apache.hadoop.metrics2.MetricType;
+import org.apache.hadoop.metrics2.MetricsException;
+import org.apache.hadoop.metrics2.MetricsRecord;
+import org.apache.hadoop.metrics2.MetricsSink;
+import org.apache.hadoop.metrics2.MetricsTag;
+import org.apache.hadoop.metrics2.impl.MsInfo;
+import org.apache.hadoop.net.NetUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * A metrics sink that writes metrics to a StatsD daemon.
+ * This sink will produce metrics of the form
+ * '[hostname].servicename.context.name.metricname:value|type'
+ * where hostname is optional. This is useful when sending to
+ * a daemon that is running on the localhost and will add the
+ * hostname to the metric (such as the
+ * <a href="https://collectd.org/">CollectD</a> StatsD plugin).
+ * <br/>
+ * To configure this plugin, you will need to add the following
+ * entries to your hadoop-metrics2.properties file:
+ * <br/>
+ * <pre>
+ * *.sink.statsd.class=org.apache.hadoop.metrics2.sink.StatsDSink
+ * [prefix].sink.statsd.server.host=
+ * [prefix].sink.statsd.server.port=
+ * [prefix].sink.statsd.skip.hostname=true|false (optional)
+ * [prefix].sink.statsd.service.name=NameNode (name you want for service)
+ * </pre>
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
+public class StatsDSink implements MetricsSink, Closeable {
+  private static final Logger LOG = LoggerFactory.getLogger(StatsDSink.class);
+  private static final String PERIOD = ".";
+  private static final String SERVER_HOST_KEY = "server.host";
+  private static final String SERVER_PORT_KEY = "server.port";
+  private static final String HOST_NAME_KEY = "host.name";
+  private static final String SERVICE_NAME_KEY = "service.name";
+  private static final String SKIP_HOSTNAME_KEY = "skip.hostname";
+  private boolean skipHostname = false;
+  private String hostName = null;
+  private String serviceName = null;
+  private StatsD statsd = null;
+
+  @Override
+  public void init(SubsetConfiguration conf) {
+    // Get StatsD host configurations.
+    final String serverHost = conf.getString(SERVER_HOST_KEY);
+    final int serverPort = Integer.parseInt(conf.getString(SERVER_PORT_KEY));
+
+    skipHostname = conf.getBoolean(SKIP_HOSTNAME_KEY, false);
+    if (!skipHostname) {
+      hostName = conf.getString(HOST_NAME_KEY, null);
+      if (null == hostName) {
+        hostName = NetUtils.getHostname();
+      }
+    }
+
+    serviceName = conf.getString(SERVICE_NAME_KEY, null);
+
+    statsd = new StatsD(serverHost, serverPort);
+  }
+
+  @Override
+  public void putMetrics(MetricsRecord record) {
+
+    String hn = hostName;
+    String ctx = record.context();
+    String sn = serviceName;
+
+    for (MetricsTag tag : record.tags()) {
+      if (tag.info().name().equals(MsInfo.Hostname.name())
+          && tag.value() != null) {
+        hn = tag.value();
+      } else if (tag.info().name().equals(MsInfo.Context.name())
+          && tag.value() != null) {
+        ctx = tag.value();
+      } else if (tag.info().name().equals(MsInfo.ProcessName.name())
+          && tag.value() != null) {
+        sn = tag.value();
+      }
+    }
+
+    StringBuilder buf = new StringBuilder();
+    if (!skipHostname && hn != null) {
+      int idx = hn.indexOf(".");
+      if (idx == -1) {
+        buf.append(hn).append(PERIOD);
+      } else {
+        buf.append(hn.substring(0, idx)).append(PERIOD);
+      }
+    }
+    buf.append(sn).append(PERIOD);
+    buf.append(ctx).append(PERIOD);
+    buf.append(record.name().replaceAll("\\.", "-")).append(PERIOD);
+
+    // Collect datapoints.
+    for (AbstractMetric metric : record.metrics()) {
+      String type = null;
+      if (metric.type().equals(MetricType.COUNTER)) {
+        type = "c";
+      } else if (metric.type().equals(MetricType.GAUGE)) {
+        type = "g";
+      }
+      StringBuilder line = new StringBuilder();
+      line.append(buf.toString())
+          .append(metric.name().replace(' ', '_'))
+          .append(":")
+          .append(metric.value())
+          .append("|")
+          .append(type);
+      writeMetric(line.toString());
+    }
+
+  }
+
+  public void writeMetric(String line) {
+    try {
+      statsd.write(line);
+    } catch (IOException e) {
+      LOG.warn("Error sending metrics to StatsD", e);
+      throw new MetricsException("Error writing metric to StatsD", e);
+    }
+  }
+
+  @Override
+  public void flush() {
+  }
+
+  @Override
+  public void close() throws IOException {
+    statsd.close();
+  }
+
+  /**
+   * Class that sends UDP packets to StatsD daemon.
+   *
+   */
+  public static class StatsD {
+
+    private DatagramSocket socket = null;
+    private DatagramPacket packet = null;
+    private String serverHost;
+    private int serverPort;
+
+    public StatsD(String serverHost, int serverPort) {
+      this.serverHost = serverHost;
+      this.serverPort = serverPort;
+    }
+
+    public void createSocket() throws IOException {
+      try {
+        InetSocketAddress address =
+            new InetSocketAddress(this.serverHost, this.serverPort);
+        socket = new DatagramSocket();
+        packet =
+            new DatagramPacket("".getBytes(StandardCharsets.UTF_8), 0, 0,
+                address.getAddress(), this.serverPort);
+      } catch (IOException ioe) {
+        throw NetUtils.wrapException(this.serverHost, this.serverPort,
+            "localhost", 0, ioe);
+      }
+    }
+
+    public void write(String msg) throws IOException {
+      if (null == socket) {
+        createSocket();
+      }
+      LOG.debug("Sending metric: {}", msg);
+      packet.setData(msg.getBytes(StandardCharsets.UTF_8));
+      socket.send(packet);
+    }
+
+    public void close() throws IOException {
+      try {
+        if (socket != null) {
+          socket.close();
+        }
+      } finally {
+        socket = null;
+      }
+    }
+
+  }
+
+}

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPlainServer.java

@@ -105,7 +105,7 @@ public class SaslPlainServer implements SaslServer {
         authz = ac.getAuthorizedID();
         authz = ac.getAuthorizedID();
       }
       }
     } catch (Exception e) {
     } catch (Exception e) {
-      throw new SaslException("PLAIN auth failed: " + e.getMessage(), e);
+      throw new SaslException("PLAIN auth failed: " + e.toString(), e);
     } finally {
     } finally {
       completed = true;
       completed = true;
     }
     }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/AccessControlList.java

@@ -230,7 +230,7 @@ public class AccessControlList implements Writable {
   public final boolean isUserInList(UserGroupInformation ugi) {
   public final boolean isUserInList(UserGroupInformation ugi) {
     if (allAllowed || users.contains(ugi.getShortUserName())) {
     if (allAllowed || users.contains(ugi.getShortUserName())) {
       return true;
       return true;
-    } else {
+    } else if (!groups.isEmpty()) {
       for(String group: ugi.getGroupNames()) {
       for(String group: ugi.getGroupNames()) {
         if (groups.contains(group)) {
         if (groups.contains(group)) {
           return true;
           return true;

+ 10 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LineReader.java

@@ -303,7 +303,10 @@ public class LineReader implements Closeable {
         startPosn = bufferPosn = 0;
         startPosn = bufferPosn = 0;
         bufferLength = fillBuffer(in, buffer, ambiguousByteCount > 0);
         bufferLength = fillBuffer(in, buffer, ambiguousByteCount > 0);
         if (bufferLength <= 0) {
         if (bufferLength <= 0) {
-          str.append(recordDelimiterBytes, 0, ambiguousByteCount);
+          if (ambiguousByteCount > 0) {
+            str.append(recordDelimiterBytes, 0, ambiguousByteCount);
+            bytesConsumed += ambiguousByteCount;
+          }
           break; // EOF
           break; // EOF
         }
         }
       }
       }
@@ -325,13 +328,13 @@ public class LineReader implements Closeable {
       if (appendLength > maxLineLength - txtLength) {
       if (appendLength > maxLineLength - txtLength) {
         appendLength = maxLineLength - txtLength;
         appendLength = maxLineLength - txtLength;
       }
       }
+      bytesConsumed += ambiguousByteCount;
+      if (appendLength >= 0 && ambiguousByteCount > 0) {
+        //appending the ambiguous characters (refer case 2.2)
+        str.append(recordDelimiterBytes, 0, ambiguousByteCount);
+        ambiguousByteCount = 0;
+      }
       if (appendLength > 0) {
       if (appendLength > 0) {
-        if (ambiguousByteCount > 0) {
-          str.append(recordDelimiterBytes, 0, ambiguousByteCount);
-          //appending the ambiguous characters (refer case 2.2)
-          bytesConsumed += ambiguousByteCount;
-          ambiguousByteCount=0;
-        }
         str.append(buffer, startPosn, appendLength);
         str.append(buffer, startPosn, appendLength);
         txtLength += appendLength;
         txtLength += appendLength;
       }
       }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java

@@ -86,7 +86,7 @@ public class VersionInfo {
   }
   }
 
 
   protected String _getBuildVersion(){
   protected String _getBuildVersion(){
-    return getVersion() +
+    return _getVersion() +
       " from " + _getRevision() +
       " from " + _getRevision() +
       " by " + _getUser() +
       " by " + _getUser() +
       " source checksum " + _getSrcChecksum();
       " source checksum " + _getSrcChecksum();

+ 11 - 0
hadoop-common-project/hadoop-common/src/main/resources/core-default.xml

@@ -1962,4 +1962,15 @@ for ldap providers in the same way as above does.
     <name>hadoop.shell.missing.defaultFs.warning</name>
     <name>hadoop.shell.missing.defaultFs.warning</name>
     <value>false</value>
     <value>false</value>
   </property>
   </property>
+
+  <property>
+    <name>hadoop.shell.safely.delete.limit.num.files</name>
+    <value>100</value>
+    <description>Used by -safely option of hadoop fs shell -rm command to avoid
+      accidental deletion of large directories. When enabled, the -rm command
+      requires confirmation if the number of files to be deleted is greater than
+      this limit.  The default limit is 100 files. The warning is disabled if
+      the limit is 0 or the -safely is not specified in -rm command.
+    </description>
+  </property>
 </configuration>
 </configuration>

+ 1 - 1
hadoop-common-project/hadoop-common/src/site/markdown/FileSystemShell.md

@@ -271,7 +271,7 @@ expunge
 
 
 Usage: `hadoop fs -expunge`
 Usage: `hadoop fs -expunge`
 
 
-Empty the Trash. Refer to the [HDFS Architecture Guide](../hadoop-hdfs/HdfsDesign.html) for more information on the Trash feature.
+If trash is enabled when a file is deleted, HDFS instead moves the deleted file to a trash directory. This command causes HDFS to permanently delete files from the trash that are older than the retention threshold. Refer to the [File Deletes and Undeletes Guide](../hadoop-hdfs/HdfsDesign.html#File_Deletes_and_Undeletes) in Space Reclamation section for more information on the Trash feature.
 
 
 find
 find
 ----
 ----

+ 3 - 0
hadoop-common-project/hadoop-common/src/site/markdown/Metrics.md

@@ -192,6 +192,8 @@ Each metrics record contains tags such as ProcessName, SessionId, and Hostname a
 | `PutImageNumOps` | Total number of fsimage uploads to SecondaryNameNode |
 | `PutImageNumOps` | Total number of fsimage uploads to SecondaryNameNode |
 | `PutImageAvgTime` | Average fsimage upload time in milliseconds |
 | `PutImageAvgTime` | Average fsimage upload time in milliseconds |
 | `TotalFileOps`| Total number of file operations performed |
 | `TotalFileOps`| Total number of file operations performed |
+| `NNStarted`| NameNode start time |
+| `NNStartedTimeInMillis`| NameNode start time in milliseconds |
 
 
 FSNamesystem
 FSNamesystem
 ------------
 ------------
@@ -273,6 +275,7 @@ The server-side metrics for a journal from the JournalNode's perspective. Each m
 | `CurrentLagTxns` | The number of transactions that this JournalNode is lagging |
 | `CurrentLagTxns` | The number of transactions that this JournalNode is lagging |
 | `LastWrittenTxId` | The highest transaction id stored on this JournalNode |
 | `LastWrittenTxId` | The highest transaction id stored on this JournalNode |
 | `LastPromisedEpoch` | The last epoch number which this node has promised not to accept any lower epoch, or 0 if no promises have been made |
 | `LastPromisedEpoch` | The last epoch number which this node has promised not to accept any lower epoch, or 0 if no promises have been made |
+| `LastJournalTimestamp` | The timestamp of last successfully written transaction |
 
 
 datanode
 datanode
 --------
 --------

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/TestCLI.java

@@ -42,7 +42,7 @@ public class TestCLI extends CLITestHelper {
 
 
   @Override
   @Override
   protected CommandExecutor.Result execute(CLICommand cmd) throws Exception {
   protected CommandExecutor.Result execute(CLICommand cmd) throws Exception {
-    return cmd.getExecutor("").executeCommand(cmd.getCmd());
+    return cmd.getExecutor("", conf).executeCommand(cmd.getCmd());
 
 
   }
   }
   
   

+ 4 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLICommand.java

@@ -17,11 +17,14 @@
  */
  */
 package org.apache.hadoop.cli.util;
 package org.apache.hadoop.cli.util;
 
 
+import org.apache.hadoop.conf.Configuration;
+
 /**
 /**
  * This interface is to generalize types of test command for upstream projects
  * This interface is to generalize types of test command for upstream projects
  */
  */
 public interface CLICommand {
 public interface CLICommand {
-  public CommandExecutor getExecutor(String tag) throws IllegalArgumentException;
+  public CommandExecutor getExecutor(String tag, Configuration conf)
+      throws IllegalArgumentException;
   public CLICommandTypes getType();
   public CLICommandTypes getType();
   public String getCmd();
   public String getCmd();
   @Override
   @Override

+ 4 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CLITestCmd.java

@@ -17,6 +17,7 @@
  */
  */
 package org.apache.hadoop.cli.util;
 package org.apache.hadoop.cli.util;
 
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.FsShell;
 
 
 /**
 /**
@@ -32,9 +33,10 @@ public class CLITestCmd implements CLICommand {
   }
   }
 
 
   @Override
   @Override
-  public CommandExecutor getExecutor(String tag) throws IllegalArgumentException {
+  public CommandExecutor getExecutor(String tag, Configuration conf)
+      throws IllegalArgumentException {
     if (getType() instanceof CLICommandFS)
     if (getType() instanceof CLICommandFS)
-      return new FSCmdExecutor(tag, new FsShell());
+      return new FSCmdExecutor(tag, new FsShell(conf));
     throw new
     throw new
         IllegalArgumentException("Unknown type of test command: " + getType());
         IllegalArgumentException("Unknown type of test command: " + getType());
   }
   }

+ 46 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellCopy.java

@@ -485,6 +485,52 @@ public class TestFsShellCopy {
     checkPath(dstPath, false);
     checkPath(dstPath, false);
   }
   }
   
   
+  @Test
+  public void testDirectCopy() throws Exception {
+    Path testRoot = new Path(testRootDir, "testPutFile");
+    lfs.delete(testRoot, true);
+    lfs.mkdirs(testRoot);
+
+    Path target_COPYING_File = new Path(testRoot, "target._COPYING_");
+    Path target_File = new Path(testRoot, "target");
+    Path srcFile = new Path(testRoot, new Path("srcFile"));
+    lfs.createNewFile(srcFile);
+
+    // If direct write is false , then creation of "file1" ,will delete file
+    // (file1._COPYING_) if already exist.
+    checkDirectCopy(srcFile, target_File, target_COPYING_File, false);
+    shell.run(new String[] { "-rm", target_File.toString() });
+
+    // If direct write is true , then creation of "file1", will not create a
+    // temporary file and will not delete (file1._COPYING_) if already exist.
+    checkDirectCopy(srcFile, target_File, target_COPYING_File, true);
+  }
+
+  private void checkDirectCopy(Path srcFile, Path target_File,
+      Path target_COPYING_File,boolean direct) throws Exception {
+    int directWriteExitCode = direct ? 0 : 1;
+    shell
+        .run(new String[] { "-copyFromLocal", srcFile.toString(),
+        target_COPYING_File.toString() });
+    int srcFileexist = shell
+        .run(new String[] { "-cat", target_COPYING_File.toString() });
+    assertEquals(0, srcFileexist);
+
+    if (!direct) {
+      shell.run(new String[] { "-copyFromLocal", srcFile.toString(),
+          target_File.toString() });
+    } else {
+      shell.run(new String[] { "-copyFromLocal", "-d", srcFile.toString(),
+          target_File.toString() });
+    }
+    // cat of "target._COPYING_" will return exitcode :
+    // as 1(file does not exist), if direct write is false.
+    // as 0, if direct write is true.
+    srcFileexist = shell.run(new String[] { "-cat",
+        target_COPYING_File.toString() });
+    assertEquals(directWriteExitCode, srcFileexist);
+  }
+
   private void createFile(Path ... paths) throws IOException {
   private void createFile(Path ... paths) throws IOException {
     for (Path path : paths) {
     for (Path path : paths) {
       FSDataOutputStream out = lfs.create(path);
       FSDataOutputStream out = lfs.create(path);

+ 12 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java

@@ -25,6 +25,7 @@ import static org.apache.hadoop.io.retry.RetryPolicies.retryByRemoteException;
 import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumCountWithFixedSleep;
 import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumCountWithFixedSleep;
 import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumCountWithProportionalSleep;
 import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumCountWithProportionalSleep;
 import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumTimeWithFixedSleep;
 import static org.apache.hadoop.io.retry.RetryPolicies.retryUpToMaximumTimeWithFixedSleep;
+import static org.apache.hadoop.io.retry.RetryPolicies.retryForeverWithFixedSleep;
 import static org.apache.hadoop.io.retry.RetryPolicies.exponentialBackoffRetry;
 import static org.apache.hadoop.io.retry.RetryPolicies.exponentialBackoffRetry;
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;
 
 
@@ -110,7 +111,17 @@ public class TestRetryProxy {
     unreliable.failsOnceThenSucceeds();
     unreliable.failsOnceThenSucceeds();
     unreliable.failsTenTimesThenSucceeds();
     unreliable.failsTenTimesThenSucceeds();
   }
   }
-  
+
+  @Test
+  public void testRetryForeverWithFixedSleep() throws UnreliableException {
+    UnreliableInterface unreliable = (UnreliableInterface) RetryProxy.create(
+        UnreliableInterface.class, unreliableImpl,
+        retryForeverWithFixedSleep(1, TimeUnit.MILLISECONDS));
+    unreliable.alwaysSucceeds();
+    unreliable.failsOnceThenSucceeds();
+    unreliable.failsTenTimesThenSucceeds();
+  }
+
   @Test
   @Test
   public void testRetryUpToMaximumCountWithFixedSleep() throws UnreliableException {
   public void testRetryUpToMaximumCountWithFixedSleep() throws UnreliableException {
     UnreliableInterface unreliable = (UnreliableInterface)
     UnreliableInterface unreliable = (UnreliableInterface)

+ 37 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java

@@ -304,6 +304,8 @@ public class TestIPC {
       String causeText=cause.getMessage();
       String causeText=cause.getMessage();
       assertTrue("Did not find " + causeText + " in " + message,
       assertTrue("Did not find " + causeText + " in " + message,
               message.contains(causeText));
               message.contains(causeText));
+    } finally {
+      client.stop();
     }
     }
   }
   }
   
   
@@ -416,6 +418,7 @@ public class TestIPC {
       client.call(param, addr, null, null, 0, conf);
       client.call(param, addr, null, null, 0, conf);
       
       
     } finally {
     } finally {
+      client.stop();
       server.stop();
       server.stop();
     }
     }
   }
   }
@@ -531,6 +534,8 @@ public class TestIPC {
       fail("Expected an exception to have been thrown");
       fail("Expected an exception to have been thrown");
     } catch (IOException e) {
     } catch (IOException e) {
       assertTrue(e.getMessage().contains("Injected fault"));
       assertTrue(e.getMessage().contains("Injected fault"));
+    } finally {
+      client.stop();
     }
     }
   }
   }
 
 
@@ -556,11 +561,11 @@ public class TestIPC {
     }).when(spyFactory).createSocket();
     }).when(spyFactory).createSocket();
       
       
     Server server = new TestServer(1, true);
     Server server = new TestServer(1, true);
+    Client client = new Client(LongWritable.class, conf, spyFactory);
     server.start();
     server.start();
     try {
     try {
       // Call should fail due to injected exception.
       // Call should fail due to injected exception.
       InetSocketAddress address = NetUtils.getConnectAddress(server);
       InetSocketAddress address = NetUtils.getConnectAddress(server);
-      Client client = new Client(LongWritable.class, conf, spyFactory);
       try {
       try {
         client.call(new LongWritable(RANDOM.nextLong()),
         client.call(new LongWritable(RANDOM.nextLong()),
                 address, null, null, 0, conf);
                 address, null, null, 0, conf);
@@ -577,6 +582,7 @@ public class TestIPC {
       client.call(new LongWritable(RANDOM.nextLong()),
       client.call(new LongWritable(RANDOM.nextLong()),
           address, null, null, 0, conf);
           address, null, null, 0, conf);
     } finally {
     } finally {
+      client.stop();
       server.stop();
       server.stop();
     }
     }
   }
   }
@@ -601,6 +607,7 @@ public class TestIPC {
     // set timeout to be bigger than 3*ping interval
     // set timeout to be bigger than 3*ping interval
     client.call(new LongWritable(RANDOM.nextLong()),
     client.call(new LongWritable(RANDOM.nextLong()),
         addr, null, null, 3*PING_INTERVAL+MIN_SLEEP_TIME, conf);
         addr, null, null, 3*PING_INTERVAL+MIN_SLEEP_TIME, conf);
+    client.stop();
   }
   }
 
 
   @Test(timeout=60000)
   @Test(timeout=60000)
@@ -621,6 +628,7 @@ public class TestIPC {
     } catch (SocketTimeoutException e) {
     } catch (SocketTimeoutException e) {
       LOG.info("Get a SocketTimeoutException ", e);
       LOG.info("Get a SocketTimeoutException ", e);
     }
     }
+    client.stop();
   }
   }
   
   
   /**
   /**
@@ -851,6 +859,8 @@ public class TestIPC {
             } catch (IOException e) {
             } catch (IOException e) {
               LOG.error(e);
               LOG.error(e);
             } catch (InterruptedException e) {
             } catch (InterruptedException e) {
+            } finally {
+              client.stop();
             }
             }
           }
           }
         });
         });
@@ -952,6 +962,31 @@ public class TestIPC {
         endFds - startFds < 20);
         endFds - startFds < 20);
   }
   }
   
   
+  /**
+   * Check if Client is interrupted after handling
+   * InterruptedException during cleanup
+   */
+  @Test(timeout=30000)
+  public void testInterrupted() {
+    Client client = new Client(LongWritable.class, conf);
+    client.getClientExecutor().submit(new Runnable() {
+      public void run() {
+        while(true);
+      }
+    });
+    Thread.currentThread().interrupt();
+    client.stop();
+    try {
+      assertTrue(Thread.currentThread().isInterrupted());
+      LOG.info("Expected thread interrupt during client cleanup");
+    } catch (AssertionError e) {
+      LOG.error("The Client did not interrupt after handling an Interrupted Exception");
+      Assert.fail("The Client did not interrupt after handling an Interrupted Exception");
+    }
+    // Clear Thread interrupt
+    Thread.currentThread().interrupted();
+  }
+
   private long countOpenFileDescriptors() {
   private long countOpenFileDescriptors() {
     return FD_DIR.list().length;
     return FD_DIR.list().length;
   }
   }
@@ -1315,6 +1350,7 @@ public class TestIPC {
       Mockito.verify(mockFactory, Mockito.times(maxTimeoutRetries))
       Mockito.verify(mockFactory, Mockito.times(maxTimeoutRetries))
           .createSocket();
           .createSocket();
     }
     }
+    client.stop();
   }
   }
   
   
   private void doIpcVersionTest(
   private void doIpcVersionTest(

+ 8 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestSaslRPC.java

@@ -558,9 +558,16 @@ public class TestSaslRPC {
       e = se;
       e = se;
     }
     }
     assertNotNull(e);
     assertNotNull(e);
-    assertEquals("PLAIN auth failed: wrong password", e.getMessage());
+    String message = e.getMessage();
+    assertContains("PLAIN auth failed", message);
+    assertContains("wrong password", message);
   }
   }
 
 
+  private void assertContains(String expected, String text) {
+    assertNotNull("null text", text );
+    assertTrue("No {" + expected + "} in {" + text + "}",
+        text.contains(expected));
+  }
 
 
   private void runNegotiation(CallbackHandler clientCbh,
   private void runNegotiation(CallbackHandler clientCbh,
                               CallbackHandler serverCbh)
                               CallbackHandler serverCbh)

+ 13 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestMetricsSystemImpl.java

@@ -544,6 +544,19 @@ public class TestMetricsSystemImpl {
     }
     }
   }
   }
 
 
+  @Test
+  public void testRegisterSourceJmxCacheTTL() {
+    MetricsSystem ms = new MetricsSystemImpl();
+    ms.init("TestMetricsSystem");
+    TestSource ts = new TestSource("ts");
+    ms.register(ts);
+    MetricsSourceAdapter sa = ((MetricsSystemImpl) ms)
+        .getSourceAdapter("TestSource");
+    assertEquals(MetricsConfig.PERIOD_DEFAULT * 1000 + 1,
+        sa.getJmxCacheTTL());
+    ms.shutdown();
+  }
+
   @Metrics(context="test")
   @Metrics(context="test")
   private static class TestSource {
   private static class TestSource {
     @Metric("C1 desc") MutableCounterLong c1;
     @Metric("C1 desc") MutableCounterLong c1;

+ 122 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestStatsDMetrics.java

@@ -0,0 +1,122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.metrics2.impl;
+
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.net.DatagramPacket;
+import java.net.DatagramSocket;
+import java.nio.charset.Charset;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.metrics2.AbstractMetric;
+import org.apache.hadoop.metrics2.MetricType;
+import org.apache.hadoop.metrics2.MetricsRecord;
+import org.apache.hadoop.metrics2.MetricsTag;
+import org.apache.hadoop.metrics2.sink.StatsDSink;
+import org.apache.hadoop.metrics2.sink.StatsDSink.StatsD;
+import org.junit.Test;
+import org.mockito.internal.util.reflection.Whitebox;
+
+public class TestStatsDMetrics {
+
+  private AbstractMetric makeMetric(String name, Number value,
+      MetricType type) {
+    AbstractMetric metric = mock(AbstractMetric.class);
+    when(metric.name()).thenReturn(name);
+    when(metric.value()).thenReturn(value);
+    when(metric.type()).thenReturn(type);
+    return metric;
+  }
+
+  @Test(timeout=3000)
+  public void testPutMetrics() throws IOException, InterruptedException {
+    final StatsDSink sink = new StatsDSink();
+    List<MetricsTag> tags = new ArrayList<MetricsTag>();
+    tags.add(new MetricsTag(MsInfo.Hostname, "host"));
+    tags.add(new MetricsTag(MsInfo.Context, "jvm"));
+    tags.add(new MetricsTag(MsInfo.ProcessName, "process"));
+    Set<AbstractMetric> metrics = new HashSet<AbstractMetric>();
+    metrics.add(makeMetric("foo1", 1.25, MetricType.COUNTER));
+    metrics.add(makeMetric("foo2", 2.25, MetricType.GAUGE));
+    final MetricsRecord record =
+        new MetricsRecordImpl(MsInfo.Context, (long) 10000, tags, metrics);
+
+    try (DatagramSocket sock = new DatagramSocket()) {
+      sock.setReceiveBufferSize(8192);
+      final StatsDSink.StatsD mockStatsD =
+          new StatsD(sock.getLocalAddress().getHostName(),
+              sock.getLocalPort());
+      Whitebox.setInternalState(sink, "statsd", mockStatsD);
+      final DatagramPacket p = new DatagramPacket(new byte[8192], 8192);
+      sink.putMetrics(record);
+      sock.receive(p);
+
+      String result =new String(p.getData(), 0, p.getLength(),
+          Charset.forName("UTF-8"));
+      assertTrue(
+          "Received data did not match data sent",
+          result.equals("host.process.jvm.Context.foo1:1.25|c") ||
+          result.equals("host.process.jvm.Context.foo2:2.25|g"));
+
+    } finally {
+      sink.close();
+    }
+  }
+
+  @Test(timeout=3000)
+  public void testPutMetrics2() throws IOException {
+    StatsDSink sink = new StatsDSink();
+    List<MetricsTag> tags = new ArrayList<MetricsTag>();
+    tags.add(new MetricsTag(MsInfo.Hostname, null));
+    tags.add(new MetricsTag(MsInfo.Context, "jvm"));
+    tags.add(new MetricsTag(MsInfo.ProcessName, "process"));
+    Set<AbstractMetric> metrics = new HashSet<AbstractMetric>();
+    metrics.add(makeMetric("foo1", 1, MetricType.COUNTER));
+    metrics.add(makeMetric("foo2", 2, MetricType.GAUGE));
+    MetricsRecord record =
+        new MetricsRecordImpl(MsInfo.Context, (long) 10000, tags, metrics);
+
+    try (DatagramSocket sock = new DatagramSocket()) {
+      sock.setReceiveBufferSize(8192);
+      final StatsDSink.StatsD mockStatsD =
+          new StatsD(sock.getLocalAddress().getHostName(),
+              sock.getLocalPort());
+      Whitebox.setInternalState(sink, "statsd", mockStatsD);
+      final DatagramPacket p = new DatagramPacket(new byte[8192], 8192);
+      sink.putMetrics(record);
+      sock.receive(p);
+      String result =
+          new String(p.getData(), 0, p.getLength(), Charset.forName("UTF-8"));
+
+      assertTrue("Received data did not match data sent",
+          result.equals("process.jvm.Context.foo1:1|c") ||
+          result.equals("process.jvm.Context.foo2:2|g"));
+    } finally {
+      sink.close();
+    }
+  }
+
+}

+ 9 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/authorize/TestAccessControlList.java

@@ -37,6 +37,10 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.NativeCodeLoader;
 import org.apache.hadoop.util.NativeCodeLoader;
 import org.junit.Test;
 import org.junit.Test;
 
 
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.spy;
+import static org.mockito.Mockito.verify;
+
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 public class TestAccessControlList {
 public class TestAccessControlList {
@@ -449,6 +453,11 @@ public class TestAccessControlList {
     assertUserAllowed(susan, acl);
     assertUserAllowed(susan, acl);
     assertUserAllowed(barbara, acl);
     assertUserAllowed(barbara, acl);
     assertUserAllowed(ian, acl);
     assertUserAllowed(ian, acl);
+
+    acl = new AccessControlList("");
+    UserGroupInformation spyUser = spy(drwho);
+    acl.isUserAllowed(spyUser);
+    verify(spyUser, never()).getGroupNames();
   }
   }
 
 
   private void assertUserAllowed(UserGroupInformation ugi,
   private void assertUserAllowed(UserGroupInformation ugi,

+ 1 - 6
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java

@@ -176,13 +176,8 @@ public class TestWebDelegationToken {
 
 
   protected Server createJettyServer() {
   protected Server createJettyServer() {
     try {
     try {
-      InetAddress localhost = InetAddress.getLocalHost();
-      ServerSocket ss = new ServerSocket(0, 50, localhost);
-      int port = ss.getLocalPort();
-      ss.close();
       jetty = new Server(0);
       jetty = new Server(0);
       jetty.getConnectors()[0].setHost("localhost");
       jetty.getConnectors()[0].setHost("localhost");
-      jetty.getConnectors()[0].setPort(port);
       return jetty;
       return jetty;
     } catch (Exception ex) {
     } catch (Exception ex) {
       throw new RuntimeException("Could not setup Jetty: " + ex.getMessage(),
       throw new RuntimeException("Could not setup Jetty: " + ex.getMessage(),
@@ -192,7 +187,7 @@ public class TestWebDelegationToken {
 
 
   protected String getJettyURL() {
   protected String getJettyURL() {
     Connector c = jetty.getConnectors()[0];
     Connector c = jetty.getConnectors()[0];
-    return "http://" + c.getHost() + ":" + c.getPort();
+    return "http://" + c.getHost() + ":" + c.getLocalPort();
   }
   }
 
 
   @Before
   @Before

+ 15 - 7
hadoop-common-project/hadoop-common/src/test/resources/testConf.xml

@@ -336,7 +336,7 @@
       <comparators>
       <comparators>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
-          <expected-output>^-cp \[-f\] \[-p \| -p\[topax\]\] &lt;src&gt; \.\.\. &lt;dst&gt; :\s*</expected-output>
+          <expected-output>^-cp \[-f\] \[-p \| -p\[topax\]\] \[-d\] &lt;src&gt; \.\.\. &lt;dst&gt; :\s*</expected-output>
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
@@ -376,7 +376,11 @@
         </comparator>
         </comparator>
         <comparator>
         <comparator>
             <type>RegexpComparator</type>
             <type>RegexpComparator</type>
-            <expected-output>^\s*\(or absence\) of the \/\.reserved\/raw prefix and not by the -p option.( )*</expected-output>
+          <expected-output>^\s*\(or absence\) of the \/\.reserved\/raw prefix and not by the -p option\. Passing -d( )*</expected-output>
+        </comparator>
+        <comparator>
+            <type>RegexpComparator</type>
+            <expected-output>^\s*will skip creation of temporary file\(&lt;dst&gt;\._COPYING_\)\.( )*</expected-output>
         </comparator>
         </comparator>
       </comparators>
       </comparators>
     </test>
     </test>
@@ -391,7 +395,7 @@
       <comparators>
       <comparators>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
-          <expected-output>^-rm \[-f\] \[-r\|-R\] \[-skipTrash\] &lt;src&gt; \.\.\. :\s*</expected-output>
+          <expected-output>^-rm \[-f\] \[-r\|-R\] \[-skipTrash\] \[-safely\] &lt;src&gt; \.\.\. :\s*</expected-output>
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
@@ -403,7 +407,7 @@
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
-          <expected-output>^\s*-skipTrash\s+option bypasses trash, if enabled, and immediately deletes &lt;src&gt;( )*</expected-output>
+          <expected-output>^\s*-skipTrash\s+option bypasses trash, if enabled, and immediately deletes &lt;src&gt;\.( )*</expected-output>
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
@@ -415,7 +419,7 @@
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
-          <expected-output>^\s+-\[rR\]\s+Recursively deletes directories\s*</expected-output>
+          <expected-output>^\s+-\[rR\]\s+Recursively deletes directories\.\s*</expected-output>
         </comparator>
         </comparator>
       </comparators>
       </comparators>
     </test>
     </test>
@@ -472,7 +476,7 @@
       <comparators>
       <comparators>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
-          <expected-output>^-put \[-f\] \[-p\] \[-l\] &lt;localsrc&gt; \.\.\. &lt;dst&gt; :( )*</expected-output>
+          <expected-output>^-put \[-f\] \[-p\] \[-l\] \[-d\] &lt;localsrc&gt; \.\.\. &lt;dst&gt; :( )*</expected-output>
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
@@ -506,6 +510,10 @@
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
           <expected-output>^\s*durability. Use with care.( )*</expected-output>
           <expected-output>^\s*durability. Use with care.( )*</expected-output>
         </comparator>
         </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*-d  Skip creation of temporary file\(&lt;dst&gt;\._COPYING_\).( )*</expected-output>
+        </comparator>
       </comparators>
       </comparators>
     </test>
     </test>
 
 
@@ -519,7 +527,7 @@
       <comparators>
       <comparators>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
-          <expected-output>^-copyFromLocal \[-f\] \[-p\] \[-l\] &lt;localsrc&gt; \.\.\. &lt;dst&gt; :\s*</expected-output>
+          <expected-output>^-copyFromLocal \[-f\] \[-p\] \[-l\] \[-d\] &lt;localsrc&gt; \.\.\. &lt;dst&gt; :\s*</expected-output>
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml

@@ -14,6 +14,7 @@
       <Class name="org.apache.hadoop.hdfs.protocol.SnapshotDiffReport$DiffReportEntry"/>
       <Class name="org.apache.hadoop.hdfs.protocol.SnapshotDiffReport$DiffReportEntry"/>
       <Class name="org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus"/>
       <Class name="org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus"/>
       <Class name="org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport"/>
       <Class name="org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport"/>
+      <Class name="org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslResponseWithNegotiatedCipherOption"/>
     </Or>
     </Or>
     <Bug pattern="EI_EXPOSE_REP,EI_EXPOSE_REP2" />
     <Bug pattern="EI_EXPOSE_REP,EI_EXPOSE_REP2" />
   </Match>
   </Match>

+ 105 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java

@@ -25,18 +25,28 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.crypto.key.KeyProviderFactory;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.net.BasicInetPeer;
+import org.apache.hadoop.hdfs.net.NioInetPeer;
+import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
+import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
 import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.util.IOUtilsClient;
 import org.apache.hadoop.hdfs.web.WebHdfsConstants;
 import org.apache.hadoop.hdfs.web.WebHdfsConstants;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
@@ -46,8 +56,10 @@ import java.io.IOException;
 import java.io.UnsupportedEncodingException;
 import java.io.UnsupportedEncodingException;
 import java.net.InetAddress;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
+import java.net.Socket;
 import java.net.URI;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
+import java.nio.channels.SocketChannel;
 import java.text.SimpleDateFormat;
 import java.text.SimpleDateFormat;
 import java.util.Collection;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Collections;
@@ -523,4 +535,97 @@ public class DFSUtilClient {
     }
     }
     return keyProvider;
     return keyProvider;
   }
   }
+
+  public static Peer peerFromSocket(Socket socket)
+      throws IOException {
+    Peer peer = null;
+    boolean success = false;
+    try {
+      // TCP_NODELAY is crucial here because of bad interactions between
+      // Nagle's Algorithm and Delayed ACKs. With connection keepalive
+      // between the client and DN, the conversation looks like:
+      //   1. Client -> DN: Read block X
+      //   2. DN -> Client: data for block X
+      //   3. Client -> DN: Status OK (successful read)
+      //   4. Client -> DN: Read block Y
+      // The fact that step #3 and #4 are both in the client->DN direction
+      // triggers Nagling. If the DN is using delayed ACKs, this results
+      // in a delay of 40ms or more.
+      //
+      // TCP_NODELAY disables nagling and thus avoids this performance
+      // disaster.
+      socket.setTcpNoDelay(true);
+      SocketChannel channel = socket.getChannel();
+      if (channel == null) {
+        peer = new BasicInetPeer(socket);
+      } else {
+        peer = new NioInetPeer(socket);
+      }
+      success = true;
+      return peer;
+    } finally {
+      if (!success) {
+        if (peer != null) peer.close();
+        socket.close();
+      }
+    }
+  }
+
+  public static Peer peerFromSocketAndKey(
+        SaslDataTransferClient saslClient, Socket s,
+        DataEncryptionKeyFactory keyFactory,
+        Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
+        throws IOException {
+    Peer peer = null;
+    boolean success = false;
+    try {
+      peer = peerFromSocket(s);
+      peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId);
+      success = true;
+      return peer;
+    } finally {
+      if (!success) {
+        IOUtilsClient.cleanup(null, peer);
+      }
+    }
+  }
+
+  public static InetSocketAddress getNNAddress(String address) {
+    return NetUtils.createSocketAddr(address,
+        HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
+  }
+
+  public static InetSocketAddress getNNAddress(Configuration conf) {
+    URI filesystemURI = FileSystem.getDefaultUri(conf);
+    return getNNAddress(filesystemURI);
+  }
+
+  /**
+   * @return address of file system
+   */
+  public static InetSocketAddress getNNAddress(URI filesystemURI) {
+    String authority = filesystemURI.getAuthority();
+    if (authority == null) {
+      throw new IllegalArgumentException(String.format(
+          "Invalid URI for NameNode address (check %s): %s has no authority.",
+          FileSystem.FS_DEFAULT_NAME_KEY, filesystemURI.toString()));
+    }
+    if (!HdfsConstants.HDFS_URI_SCHEME.equalsIgnoreCase(
+        filesystemURI.getScheme())) {
+      throw new IllegalArgumentException(String.format(
+          "Invalid URI for NameNode address (check %s): " +
+          "%s is not of scheme '%s'.", FileSystem.FS_DEFAULT_NAME_KEY,
+          filesystemURI.toString(), HdfsConstants.HDFS_URI_SCHEME));
+    }
+    return getNNAddress(authority);
+  }
+
+  public static URI getNNUri(InetSocketAddress namenode) {
+    int port = namenode.getPort();
+    String portString =
+        (port == HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT) ?
+        "" : (":" + port);
+    return URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
+        + namenode.getHostName() + portString);
+  }
 }
 }

+ 14 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java

@@ -126,6 +126,20 @@ public interface HdfsClientConfigKeys {
   long    DFS_DATANODE_READAHEAD_BYTES_DEFAULT = 4 * 1024 * 1024; // 4MB
   long    DFS_DATANODE_READAHEAD_BYTES_DEFAULT = 4 * 1024 * 1024; // 4MB
   String  DFS_ENCRYPTION_KEY_PROVIDER_URI = "dfs.encryption.key.provider.uri";
   String  DFS_ENCRYPTION_KEY_PROVIDER_URI = "dfs.encryption.key.provider.uri";
 
 
+  String DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY =
+      "dfs.encrypt.data.transfer.cipher.suites";
+
+  String DFS_DATA_TRANSFER_PROTECTION_KEY = "dfs.data.transfer.protection";
+  String DFS_DATA_TRANSFER_PROTECTION_DEFAULT = "";
+  String DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY =
+      "dfs.data.transfer.saslproperties.resolver.class";
+
+  String DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY =
+      "dfs.encrypt.data.transfer.cipher.key.bitlength";
+  int    DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT = 128;
+
+  String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS = "dfs.trustedchannel.resolver.class";
+
   String REPLICA_ACCESSOR_BUILDER_CLASSES_KEY =
   String REPLICA_ACCESSOR_BUILDER_CLASSES_KEY =
       PREFIX + "replica.accessor.builder.classes";
       PREFIX + "replica.accessor.builder.classes";
 
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/BasicInetPeer.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/BasicInetPeer.java

@@ -30,7 +30,7 @@ import org.apache.hadoop.net.unix.DomainSocket;
  * that has no associated Channel.
  * that has no associated Channel.
  *
  *
  */
  */
-class BasicInetPeer implements Peer {
+public class BasicInetPeer implements Peer {
   private final Socket socket;
   private final Socket socket;
   private final OutputStream out;
   private final OutputStream out;
   private final InputStream in;
   private final InputStream in;

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java


+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/NioInetPeer.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/NioInetPeer.java

@@ -31,7 +31,7 @@ import org.apache.hadoop.net.unix.DomainSocket;
  * Represents a peer that we communicate with by using non-blocking I/O 
  * Represents a peer that we communicate with by using non-blocking I/O 
  * on a Socket.
  * on a Socket.
  */
  */
-class NioInetPeer implements Peer {
+public class NioInetPeer implements Peer {
   private final Socket socket;
   private final Socket socket;
 
 
   /**
   /**
@@ -46,7 +46,7 @@ class NioInetPeer implements Peer {
 
 
   private final boolean isLocal;
   private final boolean isLocal;
 
 
-  NioInetPeer(Socket socket) throws IOException {
+  public NioInetPeer(Socket socket) throws IOException {
     this.socket = socket;
     this.socket = socket;
     this.in = new SocketInputStream(socket.getChannel(), 0);
     this.in = new SocketInputStream(socket.getChannel(), 0);
     this.out = new SocketOutputStream(socket.getChannel(), 0);
     this.out = new SocketOutputStream(socket.getChannel(), 0);

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/net/Peer.java

@@ -57,8 +57,8 @@ public interface Peer extends Closeable {
    * Set the write timeout on this peer.
    * Set the write timeout on this peer.
    *
    *
    * Note: this is not honored for BasicInetPeer.
    * Note: this is not honored for BasicInetPeer.
-   * See {@link BasicSocketPeer#setWriteTimeout} for details.
-   * 
+   * See {@link BasicInetPeer#setWriteTimeout} for details.
+   *
    * @param timeoutMs       The timeout in milliseconds.
    * @param timeoutMs       The timeout in milliseconds.
    */
    */
   public void setWriteTimeout(int timeoutMs) throws IOException;
   public void setWriteTimeout(int timeoutMs) throws IOException;
@@ -76,13 +76,13 @@ public interface Peer extends Closeable {
   public void close() throws IOException;
   public void close() throws IOException;
 
 
   /**
   /**
-   * @return               A string representing the remote end of our 
+   * @return               A string representing the remote end of our
    *                       connection to the peer.
    *                       connection to the peer.
    */
    */
   public String getRemoteAddressString();
   public String getRemoteAddressString();
 
 
   /**
   /**
-   * @return               A string representing the local end of our 
+   * @return               A string representing the local end of our
    *                       connection to the peer.
    *                       connection to the peer.
    */
    */
   public String getLocalAddressString();
   public String getLocalAddressString();

+ 7 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java

@@ -149,4 +149,11 @@ public interface ClientDatanodeProtocol {
    */
    */
   void triggerBlockReport(BlockReportOptions options)
   void triggerBlockReport(BlockReportOptions options)
     throws IOException;
     throws IOException;
+
+  /**
+   * Get current value of the balancer bandwidth in bytes per second.
+   *
+   * @return balancer bandwidth
+   */
+  long getBalancerBandwidth() throws IOException;
 }
 }

+ 42 - 5
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java

@@ -53,7 +53,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
   private String location = NetworkTopology.DEFAULT_RACK;
   private String location = NetworkTopology.DEFAULT_RACK;
   private String softwareVersion;
   private String softwareVersion;
   private List<String> dependentHostNames = new LinkedList<String>();
   private List<String> dependentHostNames = new LinkedList<String>();
-
+  private String upgradeDomain;
 
 
   // Datanode administrative states
   // Datanode administrative states
   public enum AdminStates {
   public enum AdminStates {
@@ -95,6 +95,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
     this.xceiverCount = from.getXceiverCount();
     this.xceiverCount = from.getXceiverCount();
     this.location = from.getNetworkLocation();
     this.location = from.getNetworkLocation();
     this.adminState = from.getAdminState();
     this.adminState = from.getAdminState();
+    this.upgradeDomain = from.getUpgradeDomain();
   }
   }
 
 
   public DatanodeInfo(DatanodeID nodeID) {
   public DatanodeInfo(DatanodeID nodeID) {
@@ -120,12 +121,13 @@ public class DatanodeInfo extends DatanodeID implements Node {
       final long capacity, final long dfsUsed, final long remaining,
       final long capacity, final long dfsUsed, final long remaining,
       final long blockPoolUsed, final long cacheCapacity, final long cacheUsed,
       final long blockPoolUsed, final long cacheCapacity, final long cacheUsed,
       final long lastUpdate, final long lastUpdateMonotonic,
       final long lastUpdate, final long lastUpdateMonotonic,
-      final int xceiverCount, final AdminStates adminState) {
+      final int xceiverCount, final AdminStates adminState,
+      final String upgradeDomain) {
     this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getDatanodeUuid(),
     this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getDatanodeUuid(),
         nodeID.getXferPort(), nodeID.getInfoPort(), nodeID.getInfoSecurePort(),
         nodeID.getXferPort(), nodeID.getInfoPort(), nodeID.getInfoSecurePort(),
         nodeID.getIpcPort(), capacity, dfsUsed, remaining, blockPoolUsed,
         nodeID.getIpcPort(), capacity, dfsUsed, remaining, blockPoolUsed,
         cacheCapacity, cacheUsed, lastUpdate, lastUpdateMonotonic,
         cacheCapacity, cacheUsed, lastUpdate, lastUpdateMonotonic,
-        xceiverCount, location, adminState);
+        xceiverCount, location, adminState, upgradeDomain);
   }
   }
 
 
   /** Constructor */
   /** Constructor */
@@ -137,6 +139,22 @@ public class DatanodeInfo extends DatanodeID implements Node {
       final long lastUpdate, final long lastUpdateMonotonic,
       final long lastUpdate, final long lastUpdateMonotonic,
       final int xceiverCount, final String networkLocation,
       final int xceiverCount, final String networkLocation,
       final AdminStates adminState) {
       final AdminStates adminState) {
+    this(ipAddr, hostName, datanodeUuid, xferPort, infoPort, infoSecurePort,
+        ipcPort, capacity, dfsUsed, remaining, blockPoolUsed, cacheCapacity,
+        cacheUsed, lastUpdate, lastUpdateMonotonic, xceiverCount,
+        networkLocation, adminState, null);
+  }
+
+  /** Constructor */
+  public DatanodeInfo(final String ipAddr, final String hostName,
+      final String datanodeUuid, final int xferPort, final int infoPort,
+      final int infoSecurePort, final int ipcPort,
+      final long capacity, final long dfsUsed, final long remaining,
+      final long blockPoolUsed, final long cacheCapacity, final long cacheUsed,
+      final long lastUpdate, final long lastUpdateMonotonic,
+      final int xceiverCount, final String networkLocation,
+      final AdminStates adminState,
+      final String upgradeDomain) {
     super(ipAddr, hostName, datanodeUuid, xferPort, infoPort,
     super(ipAddr, hostName, datanodeUuid, xferPort, infoPort,
             infoSecurePort, ipcPort);
             infoSecurePort, ipcPort);
     this.capacity = capacity;
     this.capacity = capacity;
@@ -150,6 +168,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
     this.xceiverCount = xceiverCount;
     this.xceiverCount = xceiverCount;
     this.location = networkLocation;
     this.location = networkLocation;
     this.adminState = adminState;
     this.adminState = adminState;
+    this.upgradeDomain = upgradeDomain;
   }
   }
 
 
   /** Network location name */
   /** Network location name */
@@ -300,6 +319,16 @@ public class DatanodeInfo extends DatanodeID implements Node {
     this.location = NodeBase.normalize(location);
     this.location = NodeBase.normalize(location);
   }
   }
 
 
+  /** Sets the upgrade domain */
+  public void setUpgradeDomain(String upgradeDomain) {
+    this.upgradeDomain = upgradeDomain;
+  }
+
+  /** upgrade domain */
+  public String getUpgradeDomain() {
+    return upgradeDomain;
+  }
+
   /** Add a hostname to a list of network dependencies */
   /** Add a hostname to a list of network dependencies */
   public void addDependentHostName(String hostname) {
   public void addDependentHostName(String hostname) {
     dependentHostNames.add(hostname);
     dependentHostNames.add(hostname);
@@ -341,6 +370,9 @@ public class DatanodeInfo extends DatanodeID implements Node {
     if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
     if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
       buffer.append("Rack: "+location+"\n");
       buffer.append("Rack: "+location+"\n");
     }
     }
+    if (upgradeDomain != null) {
+      buffer.append("Upgrade domain: "+ upgradeDomain +"\n");
+    }
     buffer.append("Decommission Status : ");
     buffer.append("Decommission Status : ");
     if (isDecommissioned()) {
     if (isDecommissioned()) {
       buffer.append("Decommissioned\n");
       buffer.append("Decommissioned\n");
@@ -371,13 +403,18 @@ public class DatanodeInfo extends DatanodeID implements Node {
     long c = getCapacity();
     long c = getCapacity();
     long r = getRemaining();
     long r = getRemaining();
     long u = getDfsUsed();
     long u = getDfsUsed();
+    float usedPercent = getDfsUsedPercent();
     long cc = getCacheCapacity();
     long cc = getCacheCapacity();
     long cr = getCacheRemaining();
     long cr = getCacheRemaining();
     long cu = getCacheUsed();
     long cu = getCacheUsed();
+    float cacheUsedPercent = getCacheUsedPercent();
     buffer.append(getName());
     buffer.append(getName());
     if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
     if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
       buffer.append(" "+location);
       buffer.append(" "+location);
     }
     }
+    if (upgradeDomain != null) {
+      buffer.append(" " + upgradeDomain);
+    }
     if (isDecommissioned()) {
     if (isDecommissioned()) {
       buffer.append(" DD");
       buffer.append(" DD");
     } else if (isDecommissionInProgress()) {
     } else if (isDecommissionInProgress()) {
@@ -387,11 +424,11 @@ public class DatanodeInfo extends DatanodeID implements Node {
     }
     }
     buffer.append(" " + c + "(" + StringUtils.byteDesc(c)+")");
     buffer.append(" " + c + "(" + StringUtils.byteDesc(c)+")");
     buffer.append(" " + u + "(" + StringUtils.byteDesc(u)+")");
     buffer.append(" " + u + "(" + StringUtils.byteDesc(u)+")");
-    buffer.append(" " + percent2String(u/(double)c));
+    buffer.append(" " + percent2String(usedPercent));
     buffer.append(" " + r + "(" + StringUtils.byteDesc(r)+")");
     buffer.append(" " + r + "(" + StringUtils.byteDesc(r)+")");
     buffer.append(" " + cc + "(" + StringUtils.byteDesc(cc)+")");
     buffer.append(" " + cc + "(" + StringUtils.byteDesc(cc)+")");
     buffer.append(" " + cu + "(" + StringUtils.byteDesc(cu)+")");
     buffer.append(" " + cu + "(" + StringUtils.byteDesc(cu)+")");
-    buffer.append(" " + percent2String(cu/(double)cc));
+    buffer.append(" " + percent2String(cacheUsedPercent));
     buffer.append(" " + cr + "(" + StringUtils.byteDesc(cr)+")");
     buffer.append(" " + cr + "(" + StringUtils.byteDesc(cr)+")");
     buffer.append(" " + new Date(lastUpdate));
     buffer.append(" " + new Date(lastUpdate));
     return buffer.toString();
     return buffer.toString();

+ 8 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java

@@ -34,12 +34,20 @@ public final class HdfsConstants {
    * URI Scheme for hdfs://namenode/ URIs.
    * URI Scheme for hdfs://namenode/ URIs.
    */
    */
   public static final String HDFS_URI_SCHEME = "hdfs";
   public static final String HDFS_URI_SCHEME = "hdfs";
+
+  public static final byte MEMORY_STORAGE_POLICY_ID = 15;
   public static final String MEMORY_STORAGE_POLICY_NAME = "LAZY_PERSIST";
   public static final String MEMORY_STORAGE_POLICY_NAME = "LAZY_PERSIST";
+  public static final byte ALLSSD_STORAGE_POLICY_ID = 12;
   public static final String ALLSSD_STORAGE_POLICY_NAME = "ALL_SSD";
   public static final String ALLSSD_STORAGE_POLICY_NAME = "ALL_SSD";
+  public static final byte ONESSD_STORAGE_POLICY_ID = 10;
   public static final String ONESSD_STORAGE_POLICY_NAME = "ONE_SSD";
   public static final String ONESSD_STORAGE_POLICY_NAME = "ONE_SSD";
+  public static final byte HOT_STORAGE_POLICY_ID = 7;
   public static final String HOT_STORAGE_POLICY_NAME = "HOT";
   public static final String HOT_STORAGE_POLICY_NAME = "HOT";
+  public static final byte WARM_STORAGE_POLICY_ID = 5;
   public static final String WARM_STORAGE_POLICY_NAME = "WARM";
   public static final String WARM_STORAGE_POLICY_NAME = "WARM";
+  public static final byte COLD_STORAGE_POLICY_ID = 2;
   public static final String COLD_STORAGE_POLICY_NAME = "COLD";
   public static final String COLD_STORAGE_POLICY_NAME = "COLD";
+
   // TODO should be conf injected?
   // TODO should be conf injected?
   public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024;
   public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024;
   /**
   /**

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsLocatedFileStatus.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/IOStreamPair.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/IOStreamPair.java


+ 0 - 31
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PipelineAck.java

@@ -23,15 +23,10 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.io.OutputStream;
 import java.util.ArrayList;
 import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collections;
 
 
 import com.google.common.collect.Lists;
 import com.google.common.collect.Lists;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_OOB_TIMEOUT_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_OOB_TIMEOUT_DEFAULT;
 
 
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PipelineAckProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
@@ -46,9 +41,6 @@ public class PipelineAck {
   public final static long UNKOWN_SEQNO = -2;
   public final static long UNKOWN_SEQNO = -2;
   final static int OOB_START = Status.OOB_RESTART_VALUE; // the first OOB type
   final static int OOB_START = Status.OOB_RESTART_VALUE; // the first OOB type
   final static int OOB_END = Status.OOB_RESERVED3_VALUE; // the last OOB type
   final static int OOB_END = Status.OOB_RESERVED3_VALUE; // the last OOB type
-  final static int NUM_OOB_TYPES = OOB_END - OOB_START + 1;
-  // place holder for timeout value of each OOB type
-  final static long[] OOB_TIMEOUT;
 
 
   public enum ECN {
   public enum ECN {
     DISABLED(0),
     DISABLED(0),
@@ -99,16 +91,6 @@ public class PipelineAck {
     }
     }
   }
   }
 
 
-  static {
-    OOB_TIMEOUT = new long[NUM_OOB_TYPES];
-    HdfsConfiguration conf = new HdfsConfiguration();
-    String[] ele = conf.get(DFS_DATANODE_OOB_TIMEOUT_KEY,
-        DFS_DATANODE_OOB_TIMEOUT_DEFAULT).split(",");
-    for (int i = 0; i < NUM_OOB_TYPES; i++) {
-      OOB_TIMEOUT[i] = (i < ele.length) ? Long.parseLong(ele[i]) : 0;
-    }
-  }
-
   /** default constructor **/
   /** default constructor **/
   public PipelineAck() {
   public PipelineAck() {
   }
   }
@@ -216,19 +198,6 @@ public class PipelineAck {
     return null;
     return null;
   }
   }
 
 
-  /**
-   * Get the timeout to be used for transmitting the OOB type
-   * @return the timeout in milliseconds
-   */
-  public static long getOOBTimeout(Status status) throws IOException {
-    int index = status.getNumber() - OOB_START;
-    if (index >= 0 && index < NUM_OOB_TYPES) {
-      return OOB_TIMEOUT[index];
-    } 
-    // Not an OOB.
-    throw new IOException("Not an OOB status: " + status);
-  }
-
   /** Get the Restart OOB ack status */
   /** Get the Restart OOB ack status */
   public static Status getRestartOOBStatus() {
   public static Status getRestartOOBStatus() {
     return Status.OOB_RESTART;
     return Status.OOB_RESTART;

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/TrustedChannelResolver.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/TrustedChannelResolver.java

@@ -21,7 +21,7 @@ import java.net.InetAddress;
 
 
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 
 
 /**
 /**
@@ -45,7 +45,7 @@ public class TrustedChannelResolver implements Configurable {
   public static TrustedChannelResolver getInstance(Configuration conf) {
   public static TrustedChannelResolver getInstance(Configuration conf) {
     Class<? extends TrustedChannelResolver> clazz =
     Class<? extends TrustedChannelResolver> clazz =
       conf.getClass(
       conf.getClass(
-          DFSConfigKeys.DFS_TRUSTEDCHANNEL_RESOLVER_CLASS,
+          HdfsClientConfigKeys.DFS_TRUSTEDCHANNEL_RESOLVER_CLASS,
           TrustedChannelResolver.class, TrustedChannelResolver.class);
           TrustedChannelResolver.class, TrustedChannelResolver.class);
     return ReflectionUtils.newInstance(clazz, conf);
     return ReflectionUtils.newInstance(clazz, conf);
   }
   }

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataEncryptionKeyFactory.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataEncryptionKeyFactory.java


+ 10 - 10
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java

@@ -19,11 +19,11 @@ package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
 
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_RPC_PROTECTION;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS;
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_SASL_PROPS_RESOLVER_CLASS;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
 import static org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed;
 import static org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed;
 
 
 import java.io.IOException;
 import java.io.IOException;
@@ -50,7 +50,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyExceptio
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncryptorMessageProto.DataTransferEncryptorStatus;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto;
-import org.apache.hadoop.hdfs.protocolPB.PBHelper;
+import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.security.SaslPropertiesResolver;
 import org.apache.hadoop.security.SaslPropertiesResolver;
 import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection;
 import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
@@ -240,7 +240,7 @@ public final class DataTransferSaslUtil {
       List<CipherOptionProto> optionProtos = proto.getCipherOptionList();
       List<CipherOptionProto> optionProtos = proto.getCipherOptionList();
       if (optionProtos != null) {
       if (optionProtos != null) {
         for (CipherOptionProto optionProto : optionProtos) {
         for (CipherOptionProto optionProto : optionProtos) {
-          cipherOptions.add(PBHelper.convert(optionProto));
+          cipherOptions.add(PBHelperClient.convert(optionProto));
         }
         }
       }
       }
       return proto.getPayload().toByteArray();
       return proto.getPayload().toByteArray();
@@ -309,7 +309,7 @@ public final class DataTransferSaslUtil {
       builder.setPayload(ByteString.copyFrom(payload));
       builder.setPayload(ByteString.copyFrom(payload));
     }
     }
     if (option != null) {
     if (option != null) {
-      builder.addCipherOption(PBHelper.convert(option));
+      builder.addCipherOption(PBHelperClient.convert(option));
     }
     }
     
     
     DataTransferEncryptorMessageProto proto = builder.build();
     DataTransferEncryptorMessageProto proto = builder.build();
@@ -392,7 +392,7 @@ public final class DataTransferSaslUtil {
       builder.setPayload(ByteString.copyFrom(payload));
       builder.setPayload(ByteString.copyFrom(payload));
     }
     }
     if (options != null) {
     if (options != null) {
-      builder.addAllCipherOption(PBHelper.convertCipherOptions(options));
+      builder.addAllCipherOption(PBHelperClient.convertCipherOptions(options));
     }
     }
     
     
     DataTransferEncryptorMessageProto proto = builder.build();
     DataTransferEncryptorMessageProto proto = builder.build();
@@ -419,7 +419,7 @@ public final class DataTransferSaslUtil {
       throw new IOException(proto.getMessage());
       throw new IOException(proto.getMessage());
     } else {
     } else {
       byte[] response = proto.getPayload().toByteArray();
       byte[] response = proto.getPayload().toByteArray();
-      List<CipherOption> options = PBHelper.convertCipherOptionProtos(
+      List<CipherOption> options = PBHelperClient.convertCipherOptionProtos(
           proto.getCipherOptionList());
           proto.getCipherOptionList());
       CipherOption option = null;
       CipherOption option = null;
       if (options != null && !options.isEmpty()) {
       if (options != null && !options.isEmpty()) {

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java

@@ -17,7 +17,7 @@
  */
  */
 package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
 package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
 
 
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
 import static org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil.*;
 import static org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil.*;
 
 
 import java.io.DataInputStream;
 import java.io.DataInputStream;

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslParticipant.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslParticipant.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslResponseWithNegotiatedCipherOption.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslResponseWithNegotiatedCipherOption.java


+ 17 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java

@@ -41,6 +41,8 @@ import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto;
@@ -98,6 +100,9 @@ public class ClientDatanodeProtocolTranslatorPB implements
   private static final ListReconfigurablePropertiesRequestProto
   private static final ListReconfigurablePropertiesRequestProto
       VOID_LIST_RECONFIGURABLE_PROPERTIES =
       VOID_LIST_RECONFIGURABLE_PROPERTIES =
       ListReconfigurablePropertiesRequestProto.newBuilder().build();
       ListReconfigurablePropertiesRequestProto.newBuilder().build();
+  private static final GetBalancerBandwidthRequestProto
+      VOID_GET_BALANCER_BANDWIDTH =
+      GetBalancerBandwidthRequestProto.newBuilder().build();
 
 
   public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid,
   public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid,
       Configuration conf, int socketTimeout, boolean connectToDnViaHostname,
       Configuration conf, int socketTimeout, boolean connectToDnViaHostname,
@@ -323,4 +328,16 @@ public class ClientDatanodeProtocolTranslatorPB implements
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
   }
   }
+
+  @Override
+  public long getBalancerBandwidth() throws IOException {
+    GetBalancerBandwidthResponseProto response;
+    try {
+      response = rpcProxy.getBalancerBandwidth(NULL_CONTROLLER,
+          VOID_GET_BALANCER_BANDWIDTH);
+      return response.getBandwidth();
+    } catch (ServiceException e) {
+      throw ProtobufHelper.getRemoteException(e);
+    }
+  }
 }
 }

+ 2177 - 9
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java

@@ -17,44 +17,180 @@
  */
  */
 package org.apache.hadoop.hdfs.protocolPB;
 package org.apache.hadoop.hdfs.protocolPB;
 
 
+import java.io.EOFException;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Lists;
+import com.google.common.primitives.Shorts;
 import com.google.protobuf.ByteString;
 import com.google.protobuf.ByteString;
 import com.google.protobuf.CodedInputStream;
 import com.google.protobuf.CodedInputStream;
+
+import org.apache.hadoop.crypto.CipherOption;
+import org.apache.hadoop.crypto.CipherSuite;
+import org.apache.hadoop.crypto.CryptoProtocolVersion;
+import org.apache.hadoop.fs.CacheFlag;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.CreateFlag;
+import org.apache.hadoop.fs.FileEncryptionInfo;
+import org.apache.hadoop.fs.FsServerDefaults;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.AclEntryType;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSUtilClient;
+import org.apache.hadoop.hdfs.inotify.Event;
+import org.apache.hadoop.hdfs.inotify.EventBatch;
+import org.apache.hadoop.hdfs.inotify.EventBatchList;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
+import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
+import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
+import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
+import org.apache.hadoop.hdfs.protocol.CachePoolStats;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
 import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo;
+import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
+import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
+import org.apache.hadoop.hdfs.protocol.RollingUpgradeStatus;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
+import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
+import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
+import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto;
+import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryScopeProto;
+import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryTypeProto;
+import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto;
+import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclStatusProto;
+import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveStatsProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheFlagProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolStatsProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeStorageReportProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeActionProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RollingUpgradeInfoProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmIdProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto;
+import org.apache.hadoop.hdfs.protocol.proto.EncryptionZonesProtos.EncryptionZoneProto;
+import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos;
+import org.apache.hadoop.hdfs.protocol.proto.ErasureCodingProtos.BlockECRecoveryInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.BlockStoragePolicyProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ContentSummaryProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CorruptFileBlocksProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CryptoProtocolVersionProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DataEncryptionKeyProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeIDProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfoProto.AdminState;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeInfosProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeLocalInfoProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DatanodeStorageProto.StorageState;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.DirectoryListingProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ExtendedBlockProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ErasureCodingPolicyProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsPermissionProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FsServerDefaultsProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.FileType;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryListingProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshottableDirectoryStatusProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto;
+import org.apache.hadoop.hdfs.protocol.proto.InotifyProtos;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.ListXAttrsResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrProto.XAttrNamespaceProto;
+import org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.XAttrSetFlagProto;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
+import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
 import org.apache.hadoop.hdfs.util.ExactSizeInputStream;
 import org.apache.hadoop.hdfs.util.ExactSizeInputStream;
+import org.apache.hadoop.io.EnumSetWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DataChecksum;
 
 
-import java.io.EOFException;
-import java.io.IOException;
-import java.io.InputStream;
-import java.util.ArrayList;
-import java.util.List;
-
 /**
 /**
- * Utilities for converting protobuf classes to and from implementation classes
- * and other helper utilities to help in dealing with protobuf.
+ * Utilities for converting protobuf classes to and from hdfs-client side
+ * implementation classes and other helper utilities to help in dealing with
+ * protobuf.
  *
  *
  * Note that when converting from an internal type to protobuf type, the
  * Note that when converting from an internal type to protobuf type, the
  * converter never return null for protobuf type. The check for internal type
  * converter never return null for protobuf type. The check for internal type
  * being null must be done before calling the convert() method.
  * being null must be done before calling the convert() method.
  */
  */
 public class PBHelperClient {
 public class PBHelperClient {
+  private static final XAttr.NameSpace[] XATTR_NAMESPACE_VALUES =
+      XAttr.NameSpace.values();
+  private static final AclEntryType[] ACL_ENTRY_TYPE_VALUES =
+      AclEntryType.values();
+  private static final AclEntryScope[] ACL_ENTRY_SCOPE_VALUES =
+      AclEntryScope.values();
+  private static final FsAction[] FSACTION_VALUES =
+      FsAction.values();
+
   private PBHelperClient() {
   private PBHelperClient() {
     /** Hidden constructor */
     /** Hidden constructor */
   }
   }
@@ -138,6 +274,9 @@ public class PBHelperClient {
     if (info.getNetworkLocation() != null) {
     if (info.getNetworkLocation() != null) {
       builder.setLocation(info.getNetworkLocation());
       builder.setLocation(info.getNetworkLocation());
     }
     }
+    if (info.getUpgradeDomain() != null) {
+      builder.setUpgradeDomain(info.getUpgradeDomain());
+    }
     builder
     builder
       .setId(convert((DatanodeID) info))
       .setId(convert((DatanodeID) info))
       .setCapacity(info.getCapacity())
       .setCapacity(info.getCapacity())
@@ -248,7 +387,7 @@ public class PBHelperClient {
     final List<StorageTypeProto> protos = new ArrayList<>(
     final List<StorageTypeProto> protos = new ArrayList<>(
       types.length);
       types.length);
     for (int i = startIdx; i < types.length; ++i) {
     for (int i = startIdx; i < types.length; ++i) {
-      protos.add(PBHelperClient.convertStorageType(types[i]));
+      protos.add(convertStorageType(types[i]));
     }
     }
     return protos;
     return protos;
   }
   }
@@ -264,4 +403,2033 @@ public class PBHelperClient {
     assert size >= 0;
     assert size >= 0;
     return new ExactSizeInputStream(input, size);
     return new ExactSizeInputStream(input, size);
   }
   }
+
+  public static CipherOption convert(HdfsProtos.CipherOptionProto proto) {
+    if (proto != null) {
+      CipherSuite suite = null;
+      if (proto.getSuite() != null) {
+        suite = convert(proto.getSuite());
+      }
+      byte[] inKey = null;
+      if (proto.getInKey() != null) {
+        inKey = proto.getInKey().toByteArray();
+      }
+      byte[] inIv = null;
+      if (proto.getInIv() != null) {
+        inIv = proto.getInIv().toByteArray();
+      }
+      byte[] outKey = null;
+      if (proto.getOutKey() != null) {
+        outKey = proto.getOutKey().toByteArray();
+      }
+      byte[] outIv = null;
+      if (proto.getOutIv() != null) {
+        outIv = proto.getOutIv().toByteArray();
+      }
+      return new CipherOption(suite, inKey, inIv, outKey, outIv);
+    }
+    return null;
+  }
+
+  public static CipherSuite convert(HdfsProtos.CipherSuiteProto proto) {
+    switch (proto) {
+    case AES_CTR_NOPADDING:
+      return CipherSuite.AES_CTR_NOPADDING;
+    default:
+      // Set to UNKNOWN and stash the unknown enum value
+      CipherSuite suite = CipherSuite.UNKNOWN;
+      suite.setUnknownValue(proto.getNumber());
+      return suite;
+    }
+  }
+
+  public static HdfsProtos.CipherOptionProto convert(CipherOption option) {
+    if (option != null) {
+      HdfsProtos.CipherOptionProto.Builder builder = HdfsProtos.CipherOptionProto.
+          newBuilder();
+      if (option.getCipherSuite() != null) {
+        builder.setSuite(convert(option.getCipherSuite()));
+      }
+      if (option.getInKey() != null) {
+        builder.setInKey(ByteString.copyFrom(option.getInKey()));
+      }
+      if (option.getInIv() != null) {
+        builder.setInIv(ByteString.copyFrom(option.getInIv()));
+      }
+      if (option.getOutKey() != null) {
+        builder.setOutKey(ByteString.copyFrom(option.getOutKey()));
+      }
+      if (option.getOutIv() != null) {
+        builder.setOutIv(ByteString.copyFrom(option.getOutIv()));
+      }
+      return builder.build();
+    }
+    return null;
+  }
+
+  public static HdfsProtos.CipherSuiteProto convert(CipherSuite suite) {
+    switch (suite) {
+    case UNKNOWN:
+      return HdfsProtos.CipherSuiteProto.UNKNOWN;
+    case AES_CTR_NOPADDING:
+      return HdfsProtos.CipherSuiteProto.AES_CTR_NOPADDING;
+    default:
+      return null;
+    }
+  }
+
+  public static List<HdfsProtos.CipherOptionProto> convertCipherOptions(
+      List<CipherOption> options) {
+    if (options != null) {
+      List<HdfsProtos.CipherOptionProto> protos =
+          Lists.newArrayListWithCapacity(options.size());
+      for (CipherOption option : options) {
+        protos.add(convert(option));
+      }
+      return protos;
+    }
+    return null;
+  }
+
+  public static List<CipherOption> convertCipherOptionProtos(
+      List<HdfsProtos.CipherOptionProto> protos) {
+    if (protos != null) {
+      List<CipherOption> options =
+          Lists.newArrayListWithCapacity(protos.size());
+      for (HdfsProtos.CipherOptionProto proto : protos) {
+        options.add(convert(proto));
+      }
+      return options;
+    }
+    return null;
+  }
+
+  public static LocatedBlock convertLocatedBlockProto(LocatedBlockProto proto) {
+    if (proto == null) return null;
+    List<DatanodeInfoProto> locs = proto.getLocsList();
+    DatanodeInfo[] targets = new DatanodeInfo[locs.size()];
+    for (int i = 0; i < locs.size(); i++) {
+      targets[i] = convert(locs.get(i));
+    }
+
+    final StorageType[] storageTypes = convertStorageTypes(
+        proto.getStorageTypesList(), locs.size());
+
+    final int storageIDsCount = proto.getStorageIDsCount();
+    final String[] storageIDs;
+    if (storageIDsCount == 0) {
+      storageIDs = null;
+    } else {
+      Preconditions.checkState(storageIDsCount == locs.size());
+      storageIDs = proto.getStorageIDsList().toArray(new String[storageIDsCount]);
+    }
+
+    int[] indices = null;
+    final int indexCount = proto.getBlockIndexCount();
+    if (indexCount > 0) {
+      indices = new int[indexCount];
+      for (int i = 0; i < indexCount; i++) {
+        indices[i] = proto.getBlockIndex(i);
+      }
+    }
+
+    // Set values from the isCached list, re-using references from loc
+    List<DatanodeInfo> cachedLocs = new ArrayList<DatanodeInfo>(locs.size());
+    List<Boolean> isCachedList = proto.getIsCachedList();
+    for (int i=0; i<isCachedList.size(); i++) {
+      if (isCachedList.get(i)) {
+        cachedLocs.add(targets[i]);
+      }
+    }
+
+    final LocatedBlock lb;
+    if (indices == null) {
+      lb = new LocatedBlock(PBHelperClient.convert(proto.getB()), targets,
+          storageIDs, storageTypes, proto.getOffset(), proto.getCorrupt(),
+          cachedLocs.toArray(new DatanodeInfo[cachedLocs.size()]));
+    } else {
+      lb = new LocatedStripedBlock(PBHelperClient.convert(proto.getB()), targets,
+          storageIDs, storageTypes, indices, proto.getOffset(),
+          proto.getCorrupt(),
+          cachedLocs.toArray(new DatanodeInfo[cachedLocs.size()]));
+      List<TokenProto> tokenProtos = proto.getBlockTokensList();
+      Token<BlockTokenIdentifier>[] blockTokens = new Token[indices.length];
+      for (int i = 0; i < indices.length; i++) {
+        blockTokens[i] = convert(tokenProtos.get(i));
+      }
+      ((LocatedStripedBlock) lb).setBlockTokens(blockTokens);
+    }
+    lb.setBlockToken(convert(proto.getBlockToken()));
+
+    return lb;
+  }
+
+  static public DatanodeInfo convert(DatanodeInfoProto di) {
+    if (di == null) return null;
+    return new DatanodeInfo(
+        convert(di.getId()),
+        di.hasLocation() ? di.getLocation() : null,
+        di.getCapacity(),  di.getDfsUsed(),  di.getRemaining(),
+        di.getBlockPoolUsed(), di.getCacheCapacity(), di.getCacheUsed(),
+        di.getLastUpdate(), di.getLastUpdateMonotonic(),
+        di.getXceiverCount(), convert(di.getAdminState()),
+        di.hasUpgradeDomain() ? di.getUpgradeDomain() : null);
+  }
+
+  public static StorageType[] convertStorageTypes(
+      List<StorageTypeProto> storageTypesList, int expectedSize) {
+    final StorageType[] storageTypes = new StorageType[expectedSize];
+    if (storageTypesList.size() != expectedSize) {
+     // missing storage types
+      Preconditions.checkState(storageTypesList.isEmpty());
+      Arrays.fill(storageTypes, StorageType.DEFAULT);
+    } else {
+      for (int i = 0; i < storageTypes.length; ++i) {
+        storageTypes[i] = convertStorageType(storageTypesList.get(i));
+      }
+    }
+    return storageTypes;
+  }
+
+  public static Token<BlockTokenIdentifier> convert(
+      TokenProto blockToken) {
+    return new Token<>(blockToken.getIdentifier()
+        .toByteArray(), blockToken.getPassword().toByteArray(), new Text(
+        blockToken.getKind()), new Text(blockToken.getService()));
+  }
+
+  // DatanodeId
+  public static DatanodeID convert(DatanodeIDProto dn) {
+    return new DatanodeID(dn.getIpAddr(), dn.getHostName(), dn.getDatanodeUuid(),
+        dn.getXferPort(), dn.getInfoPort(), dn.hasInfoSecurePort() ? dn
+        .getInfoSecurePort() : 0, dn.getIpcPort());
+  }
+
+  public static AdminStates convert(AdminState adminState) {
+    switch(adminState) {
+    case DECOMMISSION_INPROGRESS:
+      return AdminStates.DECOMMISSION_INPROGRESS;
+    case DECOMMISSIONED:
+      return AdminStates.DECOMMISSIONED;
+    case NORMAL:
+    default:
+      return AdminStates.NORMAL;
+    }
+  }
+
+  // LocatedBlocks
+  public static LocatedBlocks convert(LocatedBlocksProto lb) {
+    return new LocatedBlocks(
+        lb.getFileLength(), lb.getUnderConstruction(),
+        convertLocatedBlocks(lb.getBlocksList()),
+        lb.hasLastBlock() ?
+            convertLocatedBlockProto(lb.getLastBlock()) : null,
+        lb.getIsLastBlockComplete(),
+        lb.hasFileEncryptionInfo() ? convert(lb.getFileEncryptionInfo()) : null,
+        lb.hasEcPolicy() ? convertErasureCodingPolicy(lb.getEcPolicy()) : null);
+  }
+
+  public static BlockStoragePolicy[] convertStoragePolicies(
+      List<BlockStoragePolicyProto> policyProtos) {
+    if (policyProtos == null || policyProtos.size() == 0) {
+      return new BlockStoragePolicy[0];
+    }
+    BlockStoragePolicy[] policies = new BlockStoragePolicy[policyProtos.size()];
+    int i = 0;
+    for (BlockStoragePolicyProto proto : policyProtos) {
+      policies[i++] = convert(proto);
+    }
+    return policies;
+  }
+
+  public static EventBatchList convert(GetEditsFromTxidResponseProto resp) throws
+    IOException {
+    final InotifyProtos.EventsListProto list = resp.getEventsList();
+    final long firstTxid = list.getFirstTxid();
+    final long lastTxid = list.getLastTxid();
+
+    List<EventBatch> batches = Lists.newArrayList();
+    if (list.getEventsList().size() > 0) {
+      throw new IOException("Can't handle old inotify server response.");
+    }
+    for (InotifyProtos.EventBatchProto bp : list.getBatchList()) {
+      long txid = bp.getTxid();
+      if ((txid != -1) && ((txid < firstTxid) || (txid > lastTxid))) {
+        throw new IOException("Error converting TxidResponseProto: got a " +
+            "transaction id " + txid + " that was outside the range of [" +
+            firstTxid + ", " + lastTxid + "].");
+      }
+      List<Event> events = Lists.newArrayList();
+      for (InotifyProtos.EventProto p : bp.getEventsList()) {
+        switch (p.getType()) {
+          case EVENT_CLOSE:
+            InotifyProtos.CloseEventProto close =
+                InotifyProtos.CloseEventProto.parseFrom(p.getContents());
+            events.add(new Event.CloseEvent(close.getPath(),
+                close.getFileSize(), close.getTimestamp()));
+            break;
+          case EVENT_CREATE:
+            InotifyProtos.CreateEventProto create =
+                InotifyProtos.CreateEventProto.parseFrom(p.getContents());
+            events.add(new Event.CreateEvent.Builder()
+                .iNodeType(createTypeConvert(create.getType()))
+                .path(create.getPath())
+                .ctime(create.getCtime())
+                .ownerName(create.getOwnerName())
+                .groupName(create.getGroupName())
+                .perms(convert(create.getPerms()))
+                .replication(create.getReplication())
+                .symlinkTarget(create.getSymlinkTarget().isEmpty() ? null :
+                    create.getSymlinkTarget())
+                .defaultBlockSize(create.getDefaultBlockSize())
+                .overwrite(create.getOverwrite()).build());
+            break;
+          case EVENT_METADATA:
+            InotifyProtos.MetadataUpdateEventProto meta =
+                InotifyProtos.MetadataUpdateEventProto.parseFrom(p.getContents());
+            events.add(new Event.MetadataUpdateEvent.Builder()
+                .path(meta.getPath())
+                .metadataType(metadataUpdateTypeConvert(meta.getType()))
+                .mtime(meta.getMtime())
+                .atime(meta.getAtime())
+                .replication(meta.getReplication())
+                .ownerName(
+                    meta.getOwnerName().isEmpty() ? null : meta.getOwnerName())
+                .groupName(
+                    meta.getGroupName().isEmpty() ? null : meta.getGroupName())
+                .perms(meta.hasPerms() ? convert(meta.getPerms()) : null)
+                .acls(meta.getAclsList().isEmpty() ? null : convertAclEntry(
+                    meta.getAclsList()))
+                .xAttrs(meta.getXAttrsList().isEmpty() ? null : convertXAttrs(
+                    meta.getXAttrsList()))
+                .xAttrsRemoved(meta.getXAttrsRemoved())
+                .build());
+            break;
+          case EVENT_RENAME:
+            InotifyProtos.RenameEventProto rename =
+                InotifyProtos.RenameEventProto.parseFrom(p.getContents());
+            events.add(new Event.RenameEvent.Builder()
+                  .srcPath(rename.getSrcPath())
+                  .dstPath(rename.getDestPath())
+                  .timestamp(rename.getTimestamp())
+                  .build());
+            break;
+          case EVENT_APPEND:
+            InotifyProtos.AppendEventProto append =
+                InotifyProtos.AppendEventProto.parseFrom(p.getContents());
+            events.add(new Event.AppendEvent.Builder().path(append.getPath())
+                .newBlock(append.hasNewBlock() && append.getNewBlock())
+                .build());
+            break;
+          case EVENT_UNLINK:
+            InotifyProtos.UnlinkEventProto unlink =
+                InotifyProtos.UnlinkEventProto.parseFrom(p.getContents());
+            events.add(new Event.UnlinkEvent.Builder()
+                  .path(unlink.getPath())
+                  .timestamp(unlink.getTimestamp())
+                  .build());
+            break;
+          case EVENT_TRUNCATE:
+            InotifyProtos.TruncateEventProto truncate =
+                InotifyProtos.TruncateEventProto.parseFrom(p.getContents());
+            events.add(new Event.TruncateEvent(truncate.getPath(),
+                truncate.getFileSize(), truncate.getTimestamp()));
+            break;
+          default:
+            throw new RuntimeException("Unexpected inotify event type: " +
+                p.getType());
+        }
+      }
+      batches.add(new EventBatch(txid, events.toArray(new Event[0])));
+    }
+    return new EventBatchList(batches, resp.getEventsList().getFirstTxid(),
+        resp.getEventsList().getLastTxid(), resp.getEventsList().getSyncTxid());
+  }
+
+  // Located Block Arrays and Lists
+  public static LocatedBlockProto[] convertLocatedBlocks(LocatedBlock[] lb) {
+    if (lb == null) return null;
+    return convertLocatedBlocks2(Arrays.asList(lb))
+        .toArray(new LocatedBlockProto[lb.length]);
+  }
+
+  public static LocatedBlock[] convertLocatedBlocks(LocatedBlockProto[] lb) {
+    if (lb == null) return null;
+    return convertLocatedBlocks(Arrays.asList(lb))
+        .toArray(new LocatedBlock[lb.length]);
+  }
+
+  public static List<LocatedBlock> convertLocatedBlocks(
+      List<LocatedBlockProto> lb) {
+    if (lb == null) return null;
+    final int len = lb.size();
+    List<LocatedBlock> result = new ArrayList<>(len);
+    for (LocatedBlockProto aLb : lb) {
+      result.add(convertLocatedBlockProto(aLb));
+    }
+    return result;
+  }
+
+  public static List<LocatedBlockProto> convertLocatedBlocks2(
+      List<LocatedBlock> lb) {
+    if (lb == null) return null;
+    final int len = lb.size();
+    List<LocatedBlockProto> result = new ArrayList<>(len);
+    for (LocatedBlock aLb : lb) {
+      result.add(convertLocatedBlock(aLb));
+    }
+    return result;
+  }
+
+  public static LocatedBlockProto convertLocatedBlock(LocatedBlock b) {
+    if (b == null) return null;
+    Builder builder = LocatedBlockProto.newBuilder();
+    DatanodeInfo[] locs = b.getLocations();
+    List<DatanodeInfo> cachedLocs =
+        Lists.newLinkedList(Arrays.asList(b.getCachedLocations()));
+    for (int i = 0; i < locs.length; i++) {
+      DatanodeInfo loc = locs[i];
+      builder.addLocs(i, PBHelperClient.convert(loc));
+      boolean locIsCached = cachedLocs.contains(loc);
+      builder.addIsCached(locIsCached);
+      if (locIsCached) {
+        cachedLocs.remove(loc);
+      }
+    }
+    Preconditions.checkArgument(cachedLocs.size() == 0,
+        "Found additional cached replica locations that are not in the set of"
+            + " storage-backed locations!");
+
+    StorageType[] storageTypes = b.getStorageTypes();
+    if (storageTypes != null) {
+      for (StorageType storageType : storageTypes) {
+        builder.addStorageTypes(convertStorageType(storageType));
+      }
+    }
+    final String[] storageIDs = b.getStorageIDs();
+    if (storageIDs != null) {
+      builder.addAllStorageIDs(Arrays.asList(storageIDs));
+    }
+    if (b instanceof LocatedStripedBlock) {
+      LocatedStripedBlock sb = (LocatedStripedBlock) b;
+      int[] indices = sb.getBlockIndices();
+      Token<BlockTokenIdentifier>[] blockTokens = sb.getBlockTokens();
+      for (int i = 0; i < indices.length; i++) {
+        builder.addBlockIndex(indices[i]);
+        builder.addBlockTokens(PBHelperClient.convert(blockTokens[i]));
+      }
+    }
+
+    return builder.setB(PBHelperClient.convert(b.getBlock()))
+        .setBlockToken(PBHelperClient.convert(b.getBlockToken()))
+        .setCorrupt(b.isCorrupt()).setOffset(b.getStartOffset()).build();
+  }
+
+  public static BlockStoragePolicy convert(BlockStoragePolicyProto proto) {
+    List<StorageTypeProto> cList = proto.getCreationPolicy()
+        .getStorageTypesList();
+    StorageType[] creationTypes = convertStorageTypes(cList, cList.size());
+    List<StorageTypeProto> cfList = proto.hasCreationFallbackPolicy() ? proto
+        .getCreationFallbackPolicy().getStorageTypesList() : null;
+    StorageType[] creationFallbackTypes = cfList == null ? StorageType
+        .EMPTY_ARRAY : convertStorageTypes(cfList, cfList.size());
+    List<StorageTypeProto> rfList = proto.hasReplicationFallbackPolicy() ?
+        proto.getReplicationFallbackPolicy().getStorageTypesList() : null;
+    StorageType[] replicationFallbackTypes = rfList == null ? StorageType
+        .EMPTY_ARRAY : convertStorageTypes(rfList, rfList.size());
+    return new BlockStoragePolicy((byte) proto.getPolicyId(), proto.getName(),
+        creationTypes, creationFallbackTypes, replicationFallbackTypes);
+  }
+
+  public static FsActionProto convert(FsAction v) {
+    return FsActionProto.valueOf(v != null ? v.ordinal() : 0);
+  }
+
+  public static XAttrProto convertXAttrProto(XAttr a) {
+    XAttrProto.Builder builder = XAttrProto.newBuilder();
+    builder.setNamespace(convert(a.getNameSpace()));
+    if (a.getName() != null) {
+      builder.setName(a.getName());
+    }
+    if (a.getValue() != null) {
+      builder.setValue(getByteString(a.getValue()));
+    }
+    return builder.build();
+  }
+
+  public static List<XAttr> convert(ListXAttrsResponseProto a) {
+    final List<XAttrProto> xAttrs = a.getXAttrsList();
+    return convertXAttrs(xAttrs);
+  }
+
+  public static List<XAttr> convert(GetXAttrsResponseProto a) {
+    List<XAttrProto> xAttrs = a.getXAttrsList();
+    return convertXAttrs(xAttrs);
+  }
+
+  public static List<XAttr> convertXAttrs(List<XAttrProto> xAttrSpec) {
+    ArrayList<XAttr> xAttrs = Lists.newArrayListWithCapacity(xAttrSpec.size());
+    for (XAttrProto a : xAttrSpec) {
+      XAttr.Builder builder = new XAttr.Builder();
+      builder.setNameSpace(convert(a.getNamespace()));
+      if (a.hasName()) {
+        builder.setName(a.getName());
+      }
+      if (a.hasValue()) {
+        builder.setValue(a.getValue().toByteArray());
+      }
+      xAttrs.add(builder.build());
+    }
+    return xAttrs;
+  }
+
+  static XAttrNamespaceProto convert(XAttr.NameSpace v) {
+    return XAttrNamespaceProto.valueOf(v.ordinal());
+  }
+
+  static XAttr.NameSpace convert(XAttrNamespaceProto v) {
+    return castEnum(v, XATTR_NAMESPACE_VALUES);
+  }
+
+  static <T extends Enum<T>, U extends Enum<U>> U castEnum(T from, U[] to) {
+    return to[from.ordinal()];
+  }
+
+  static InotifyProtos.MetadataUpdateType metadataUpdateTypeConvert(
+      Event.MetadataUpdateEvent.MetadataType type) {
+    switch (type) {
+    case TIMES:
+      return InotifyProtos.MetadataUpdateType.META_TYPE_TIMES;
+    case REPLICATION:
+      return InotifyProtos.MetadataUpdateType.META_TYPE_REPLICATION;
+    case OWNER:
+      return InotifyProtos.MetadataUpdateType.META_TYPE_OWNER;
+    case PERMS:
+      return InotifyProtos.MetadataUpdateType.META_TYPE_PERMS;
+    case ACLS:
+      return InotifyProtos.MetadataUpdateType.META_TYPE_ACLS;
+    case XATTRS:
+      return InotifyProtos.MetadataUpdateType.META_TYPE_XATTRS;
+    default:
+      return null;
+    }
+  }
+
+  private static Event.MetadataUpdateEvent.MetadataType metadataUpdateTypeConvert(
+      InotifyProtos.MetadataUpdateType type) {
+    switch (type) {
+    case META_TYPE_TIMES:
+      return Event.MetadataUpdateEvent.MetadataType.TIMES;
+    case META_TYPE_REPLICATION:
+      return Event.MetadataUpdateEvent.MetadataType.REPLICATION;
+    case META_TYPE_OWNER:
+      return Event.MetadataUpdateEvent.MetadataType.OWNER;
+    case META_TYPE_PERMS:
+      return Event.MetadataUpdateEvent.MetadataType.PERMS;
+    case META_TYPE_ACLS:
+      return Event.MetadataUpdateEvent.MetadataType.ACLS;
+    case META_TYPE_XATTRS:
+      return Event.MetadataUpdateEvent.MetadataType.XATTRS;
+    default:
+      return null;
+    }
+  }
+
+  static InotifyProtos.INodeType createTypeConvert(Event.CreateEvent.INodeType
+                                                       type) {
+    switch (type) {
+    case DIRECTORY:
+      return InotifyProtos.INodeType.I_TYPE_DIRECTORY;
+    case FILE:
+      return InotifyProtos.INodeType.I_TYPE_FILE;
+    case SYMLINK:
+      return InotifyProtos.INodeType.I_TYPE_SYMLINK;
+    default:
+      return null;
+    }
+  }
+
+  public static List<LocatedBlock> convertLocatedBlock(
+      List<LocatedBlockProto> lb) {
+    if (lb == null) return null;
+    final int len = lb.size();
+    List<LocatedBlock> result = new ArrayList<>(len);
+    for (int i = 0; i < len; ++i) {
+      result.add(convertLocatedBlockProto(lb.get(i)));
+    }
+    return result;
+  }
+
+  public static List<AclEntry> convertAclEntry(List<AclEntryProto> aclSpec) {
+    ArrayList<AclEntry> r = Lists.newArrayListWithCapacity(aclSpec.size());
+    for (AclEntryProto e : aclSpec) {
+      AclEntry.Builder builder = new AclEntry.Builder();
+      builder.setType(convert(e.getType()));
+      builder.setScope(convert(e.getScope()));
+      builder.setPermission(convert(e.getPermissions()));
+      if (e.hasName()) {
+        builder.setName(e.getName());
+      }
+      r.add(builder.build());
+    }
+    return r;
+  }
+
+  static AclEntryScopeProto convert(AclEntryScope v) {
+    return AclEntryScopeProto.valueOf(v.ordinal());
+  }
+
+  private static AclEntryScope convert(AclEntryScopeProto v) {
+    return castEnum(v, ACL_ENTRY_SCOPE_VALUES);
+  }
+
+  static AclEntryTypeProto convert(AclEntryType e) {
+    return AclEntryTypeProto.valueOf(e.ordinal());
+  }
+
+  private static AclEntryType convert(AclEntryTypeProto v) {
+    return castEnum(v, ACL_ENTRY_TYPE_VALUES);
+  }
+
+  public static FsAction convert(FsActionProto v) {
+    return castEnum(v, FSACTION_VALUES);
+  }
+
+  public static FsPermission convert(FsPermissionProto p) {
+    return new FsPermissionExtension((short)p.getPerm());
+  }
+
+  private static Event.CreateEvent.INodeType createTypeConvert(
+      InotifyProtos.INodeType type) {
+    switch (type) {
+    case I_TYPE_DIRECTORY:
+      return Event.CreateEvent.INodeType.DIRECTORY;
+    case I_TYPE_FILE:
+      return Event.CreateEvent.INodeType.FILE;
+    case I_TYPE_SYMLINK:
+      return Event.CreateEvent.INodeType.SYMLINK;
+    default:
+      return null;
+    }
+  }
+
+  public static HdfsProtos.FileEncryptionInfoProto convert(
+      FileEncryptionInfo info) {
+    if (info == null) {
+      return null;
+    }
+    return HdfsProtos.FileEncryptionInfoProto.newBuilder()
+        .setSuite(convert(info.getCipherSuite()))
+        .setCryptoProtocolVersion(convert(info.getCryptoProtocolVersion()))
+        .setKey(getByteString(info.getEncryptedDataEncryptionKey()))
+        .setIv(getByteString(info.getIV()))
+        .setEzKeyVersionName(info.getEzKeyVersionName())
+        .setKeyName(info.getKeyName())
+        .build();
+  }
+
+  public static CryptoProtocolVersionProto convert(CryptoProtocolVersion
+      version) {
+    switch(version) {
+    case UNKNOWN:
+      return CryptoProtocolVersionProto.UNKNOWN_PROTOCOL_VERSION;
+    case ENCRYPTION_ZONES:
+      return CryptoProtocolVersionProto.ENCRYPTION_ZONES;
+    default:
+      return null;
+    }
+  }
+
+  public static FileEncryptionInfo convert(
+      HdfsProtos.FileEncryptionInfoProto proto) {
+    if (proto == null) {
+      return null;
+    }
+    CipherSuite suite = convert(proto.getSuite());
+    CryptoProtocolVersion version = convert(proto.getCryptoProtocolVersion());
+    byte[] key = proto.getKey().toByteArray();
+    byte[] iv = proto.getIv().toByteArray();
+    String ezKeyVersionName = proto.getEzKeyVersionName();
+    String keyName = proto.getKeyName();
+    return new FileEncryptionInfo(suite, version, key, iv, keyName,
+        ezKeyVersionName);
+  }
+
+  public static CryptoProtocolVersion convert(CryptoProtocolVersionProto
+      proto) {
+    switch(proto) {
+    case ENCRYPTION_ZONES:
+      return CryptoProtocolVersion.ENCRYPTION_ZONES;
+    default:
+      // Set to UNKNOWN and stash the unknown enum value
+      CryptoProtocolVersion version = CryptoProtocolVersion.UNKNOWN;
+      version.setUnknownValue(proto.getNumber());
+      return version;
+    }
+  }
+
+  public static List<XAttrProto> convertXAttrProto(
+      List<XAttr> xAttrSpec) {
+    if (xAttrSpec == null) {
+      return Lists.newArrayListWithCapacity(0);
+    }
+    ArrayList<XAttrProto> xAttrs = Lists.newArrayListWithCapacity(
+        xAttrSpec.size());
+    for (XAttr a : xAttrSpec) {
+      XAttrProto.Builder builder = XAttrProto.newBuilder();
+      builder.setNamespace(convert(a.getNameSpace()));
+      if (a.getName() != null) {
+        builder.setName(a.getName());
+      }
+      if (a.getValue() != null) {
+        builder.setValue(getByteString(a.getValue()));
+      }
+      xAttrs.add(builder.build());
+    }
+    return xAttrs;
+  }
+
+  /**
+   * The flag field in PB is a bitmask whose values are the same a the
+   * emum values of XAttrSetFlag
+   */
+  public static int convert(EnumSet<XAttrSetFlag> flag) {
+    int value = 0;
+    if (flag.contains(XAttrSetFlag.CREATE)) {
+      value |= XAttrSetFlagProto.XATTR_CREATE.getNumber();
+    }
+    if (flag.contains(XAttrSetFlag.REPLACE)) {
+      value |= XAttrSetFlagProto.XATTR_REPLACE.getNumber();
+    }
+    return value;
+  }
+
+  public static EncryptionZone convert(EncryptionZoneProto proto) {
+    return new EncryptionZone(proto.getId(), proto.getPath(),
+        convert(proto.getSuite()), convert(proto.getCryptoProtocolVersion()),
+        proto.getKeyName());
+  }
+
+  public static AclStatus convert(GetAclStatusResponseProto e) {
+    AclStatusProto r = e.getResult();
+    AclStatus.Builder builder = new AclStatus.Builder();
+    builder.owner(r.getOwner()).group(r.getGroup()).stickyBit(r.getSticky())
+        .addEntries(convertAclEntry(r.getEntriesList()));
+    if (r.hasPermission()) {
+      builder.setPermission(convert(r.getPermission()));
+    }
+    return builder.build();
+  }
+
+  public static List<AclEntryProto> convertAclEntryProto(
+      List<AclEntry> aclSpec) {
+    ArrayList<AclEntryProto> r = Lists.newArrayListWithCapacity(aclSpec.size());
+    for (AclEntry e : aclSpec) {
+      AclEntryProto.Builder builder = AclEntryProto.newBuilder();
+      builder.setType(convert(e.getType()));
+      builder.setScope(convert(e.getScope()));
+      builder.setPermissions(convert(e.getPermission()));
+      if (e.getName() != null) {
+        builder.setName(e.getName());
+      }
+      r.add(builder.build());
+    }
+    return r;
+  }
+
+  public static CachePoolEntry convert(CachePoolEntryProto proto) {
+    CachePoolInfo info = convert(proto.getInfo());
+    CachePoolStats stats = convert(proto.getStats());
+    return new CachePoolEntry(info, stats);
+  }
+
+  public static CachePoolInfo convert (CachePoolInfoProto proto) {
+    // Pool name is a required field, the rest are optional
+    String poolName = Preconditions.checkNotNull(proto.getPoolName());
+    CachePoolInfo info = new CachePoolInfo(poolName);
+    if (proto.hasOwnerName()) {
+        info.setOwnerName(proto.getOwnerName());
+    }
+    if (proto.hasGroupName()) {
+      info.setGroupName(proto.getGroupName());
+    }
+    if (proto.hasMode()) {
+      info.setMode(new FsPermission((short)proto.getMode()));
+    }
+    if (proto.hasLimit())  {
+      info.setLimit(proto.getLimit());
+    }
+    if (proto.hasMaxRelativeExpiry()) {
+      info.setMaxRelativeExpiryMs(proto.getMaxRelativeExpiry());
+    }
+    return info;
+  }
+
+  public static CachePoolStats convert (CachePoolStatsProto proto) {
+    CachePoolStats.Builder builder = new CachePoolStats.Builder();
+    builder.setBytesNeeded(proto.getBytesNeeded());
+    builder.setBytesCached(proto.getBytesCached());
+    builder.setBytesOverlimit(proto.getBytesOverlimit());
+    builder.setFilesNeeded(proto.getFilesNeeded());
+    builder.setFilesCached(proto.getFilesCached());
+    return builder.build();
+  }
+
+  public static CachePoolInfoProto convert(CachePoolInfo info) {
+    CachePoolInfoProto.Builder builder = CachePoolInfoProto.newBuilder();
+    builder.setPoolName(info.getPoolName());
+    if (info.getOwnerName() != null) {
+      builder.setOwnerName(info.getOwnerName());
+    }
+    if (info.getGroupName() != null) {
+      builder.setGroupName(info.getGroupName());
+    }
+    if (info.getMode() != null) {
+      builder.setMode(info.getMode().toShort());
+    }
+    if (info.getLimit() != null) {
+      builder.setLimit(info.getLimit());
+    }
+    if (info.getMaxRelativeExpiryMs() != null) {
+      builder.setMaxRelativeExpiry(info.getMaxRelativeExpiryMs());
+    }
+    return builder.build();
+  }
+
+  public static CacheDirectiveInfoProto convert
+      (CacheDirectiveInfo info) {
+    CacheDirectiveInfoProto.Builder builder =
+        CacheDirectiveInfoProto.newBuilder();
+    if (info.getId() != null) {
+      builder.setId(info.getId());
+    }
+    if (info.getPath() != null) {
+      builder.setPath(info.getPath().toUri().getPath());
+    }
+    if (info.getReplication() != null) {
+      builder.setReplication(info.getReplication());
+    }
+    if (info.getPool() != null) {
+      builder.setPool(info.getPool());
+    }
+    if (info.getExpiration() != null) {
+      builder.setExpiration(convert(info.getExpiration()));
+    }
+    return builder.build();
+  }
+
+  public static CacheDirectiveInfoExpirationProto convert(
+      CacheDirectiveInfo.Expiration expiration) {
+    return CacheDirectiveInfoExpirationProto.newBuilder()
+        .setIsRelative(expiration.isRelative())
+        .setMillis(expiration.getMillis())
+        .build();
+  }
+
+  public static CacheDirectiveEntry convert(CacheDirectiveEntryProto proto) {
+    CacheDirectiveInfo info = convert(proto.getInfo());
+    CacheDirectiveStats stats = convert(proto.getStats());
+    return new CacheDirectiveEntry(info, stats);
+  }
+
+  public static CacheDirectiveStats convert(CacheDirectiveStatsProto proto) {
+    CacheDirectiveStats.Builder builder = new CacheDirectiveStats.Builder();
+    builder.setBytesNeeded(proto.getBytesNeeded());
+    builder.setBytesCached(proto.getBytesCached());
+    builder.setFilesNeeded(proto.getFilesNeeded());
+    builder.setFilesCached(proto.getFilesCached());
+    builder.setHasExpired(proto.getHasExpired());
+    return builder.build();
+  }
+
+  public static CacheDirectiveInfo convert
+      (CacheDirectiveInfoProto proto) {
+    CacheDirectiveInfo.Builder builder =
+        new CacheDirectiveInfo.Builder();
+    if (proto.hasId()) {
+      builder.setId(proto.getId());
+    }
+    if (proto.hasPath()) {
+      builder.setPath(new Path(proto.getPath()));
+    }
+    if (proto.hasReplication()) {
+      builder.setReplication(Shorts.checkedCast(
+          proto.getReplication()));
+    }
+    if (proto.hasPool()) {
+      builder.setPool(proto.getPool());
+    }
+    if (proto.hasExpiration()) {
+      builder.setExpiration(convert(proto.getExpiration()));
+    }
+    return builder.build();
+  }
+
+  public static CacheDirectiveInfo.Expiration convert(
+      CacheDirectiveInfoExpirationProto proto) {
+    if (proto.getIsRelative()) {
+      return CacheDirectiveInfo.Expiration.newRelative(proto.getMillis());
+    }
+    return CacheDirectiveInfo.Expiration.newAbsolute(proto.getMillis());
+  }
+
+  public static int convertCacheFlags(EnumSet<CacheFlag> flags) {
+    int value = 0;
+    if (flags.contains(CacheFlag.FORCE)) {
+      value |= CacheFlagProto.FORCE.getNumber();
+    }
+    return value;
+  }
+
+  public static SnapshotDiffReport convert(SnapshotDiffReportProto reportProto) {
+    if (reportProto == null) {
+      return null;
+    }
+    String snapshotDir = reportProto.getSnapshotRoot();
+    String fromSnapshot = reportProto.getFromSnapshot();
+    String toSnapshot = reportProto.getToSnapshot();
+    List<SnapshotDiffReportEntryProto> list = reportProto
+        .getDiffReportEntriesList();
+    List<DiffReportEntry> entries = new ArrayList<>();
+    for (SnapshotDiffReportEntryProto entryProto : list) {
+      DiffReportEntry entry = convert(entryProto);
+      if (entry != null)
+        entries.add(entry);
+    }
+    return new SnapshotDiffReport(snapshotDir, fromSnapshot, toSnapshot,
+        entries);
+  }
+
+  public static DiffReportEntry convert(SnapshotDiffReportEntryProto entry) {
+    if (entry == null) {
+      return null;
+    }
+    DiffType type = DiffType.getTypeFromLabel(entry
+        .getModificationLabel());
+    return type == null ? null : new DiffReportEntry(type, entry.getFullpath()
+        .toByteArray(), entry.hasTargetPath() ? entry.getTargetPath()
+        .toByteArray() : null);
+  }
+
+  public static SnapshottableDirectoryStatus[] convert(
+      SnapshottableDirectoryListingProto sdlp) {
+    if (sdlp == null)
+      return null;
+    List<SnapshottableDirectoryStatusProto> list = sdlp
+        .getSnapshottableDirListingList();
+    if (list.isEmpty()) {
+      return new SnapshottableDirectoryStatus[0];
+    } else {
+      SnapshottableDirectoryStatus[] result =
+          new SnapshottableDirectoryStatus[list.size()];
+      for (int i = 0; i < list.size(); i++) {
+        result[i] = convert(list.get(i));
+      }
+      return result;
+    }
+  }
+
+  public static SnapshottableDirectoryStatus convert(
+      SnapshottableDirectoryStatusProto sdirStatusProto) {
+    if (sdirStatusProto == null) {
+      return null;
+    }
+    final HdfsFileStatusProto status = sdirStatusProto.getDirStatus();
+    return new SnapshottableDirectoryStatus(
+        status.getModificationTime(),
+        status.getAccessTime(),
+        convert(status.getPermission()),
+        status.getOwner(),
+        status.getGroup(),
+        status.getPath().toByteArray(),
+        status.getFileId(),
+        status.getChildrenNum(),
+        sdirStatusProto.getSnapshotNumber(),
+        sdirStatusProto.getSnapshotQuota(),
+        sdirStatusProto.getParentFullpath().toByteArray());
+  }
+
+  // DataEncryptionKey
+  public static DataEncryptionKey convert(DataEncryptionKeyProto bet) {
+    String encryptionAlgorithm = bet.getEncryptionAlgorithm();
+    return new DataEncryptionKey(bet.getKeyId(),
+        bet.getBlockPoolId(),
+        bet.getNonce().toByteArray(),
+        bet.getEncryptionKey().toByteArray(),
+        bet.getExpiryDate(),
+        encryptionAlgorithm.isEmpty() ? null : encryptionAlgorithm);
+  }
+
+  public static Token<DelegationTokenIdentifier> convertDelegationToken(
+      TokenProto blockToken) {
+    return new Token<>(blockToken.getIdentifier()
+        .toByteArray(), blockToken.getPassword().toByteArray(), new Text(
+        blockToken.getKind()), new Text(blockToken.getService()));
+  }
+
+  // Arrays of DatanodeId
+  public static DatanodeIDProto[] convert(DatanodeID[] did) {
+    if (did == null)
+      return null;
+    final int len = did.length;
+    DatanodeIDProto[] result = new DatanodeIDProto[len];
+    for (int i = 0; i < len; ++i) {
+      result[i] = convert(did[i]);
+    }
+    return result;
+  }
+
+  public static FsPermissionProto convert(FsPermission p) {
+    return FsPermissionProto.newBuilder().setPerm(p.toExtendedShort()).build();
+  }
+
+  public static HdfsFileStatus convert(HdfsFileStatusProto fs) {
+    if (fs == null)
+      return null;
+    return new HdfsLocatedFileStatus(
+        fs.getLength(), fs.getFileType().equals(FileType.IS_DIR),
+        fs.getBlockReplication(), fs.getBlocksize(),
+        fs.getModificationTime(), fs.getAccessTime(),
+        convert(fs.getPermission()), fs.getOwner(), fs.getGroup(),
+        fs.getFileType().equals(FileType.IS_SYMLINK) ?
+            fs.getSymlink().toByteArray() : null,
+        fs.getPath().toByteArray(),
+        fs.hasFileId()? fs.getFileId(): HdfsConstants.GRANDFATHER_INODE_ID,
+        fs.hasLocations() ? convert(fs.getLocations()) : null,
+        fs.hasChildrenNum() ? fs.getChildrenNum() : -1,
+        fs.hasFileEncryptionInfo() ? convert(fs.getFileEncryptionInfo()) : null,
+        fs.hasStoragePolicy() ? (byte) fs.getStoragePolicy()
+            : HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED,
+    fs.hasEcPolicy() ? convertErasureCodingPolicy(fs.getEcPolicy()) : null);
+  }
+
+  public static CorruptFileBlocks convert(CorruptFileBlocksProto c) {
+    if (c == null)
+      return null;
+    List<String> fileList = c.getFilesList();
+    return new CorruptFileBlocks(fileList.toArray(new String[fileList.size()]),
+        c.getCookie());
+  }
+
+  public static ContentSummary convert(ContentSummaryProto cs) {
+    if (cs == null) return null;
+    ContentSummary.Builder builder = new ContentSummary.Builder();
+    builder.length(cs.getLength()).
+        fileCount(cs.getFileCount()).
+        directoryCount(cs.getDirectoryCount()).
+        quota(cs.getQuota()).
+        spaceConsumed(cs.getSpaceConsumed()).
+        spaceQuota(cs.getSpaceQuota());
+    if (cs.hasTypeQuotaInfos()) {
+      for (HdfsProtos.StorageTypeQuotaInfoProto info :
+          cs.getTypeQuotaInfos().getTypeQuotaInfoList()) {
+        StorageType type = convertStorageType(info.getType());
+        builder.typeConsumed(type, info.getConsumed());
+        builder.typeQuota(type, info.getQuota());
+      }
+    }
+    return builder.build();
+  }
+
+  public static RollingUpgradeActionProto convert(RollingUpgradeAction a) {
+    switch (a) {
+    case QUERY:
+      return RollingUpgradeActionProto.QUERY;
+    case PREPARE:
+      return RollingUpgradeActionProto.START;
+    case FINALIZE:
+      return RollingUpgradeActionProto.FINALIZE;
+    default:
+      throw new IllegalArgumentException("Unexpected value: " + a);
+    }
+  }
+
+  public static RollingUpgradeInfo convert(RollingUpgradeInfoProto proto) {
+    RollingUpgradeStatusProto status = proto.getStatus();
+    return new RollingUpgradeInfo(status.getBlockPoolId(),
+        proto.getCreatedRollbackImages(),
+        proto.getStartTime(), proto.getFinalizeTime());
+  }
+
+  public static DatanodeStorageReport[] convertDatanodeStorageReports(
+      List<DatanodeStorageReportProto> protos) {
+    final DatanodeStorageReport[] reports
+        = new DatanodeStorageReport[protos.size()];
+    for(int i = 0; i < reports.length; i++) {
+      reports[i] = convertDatanodeStorageReport(protos.get(i));
+    }
+    return reports;
+  }
+
+  public static DatanodeStorageReport convertDatanodeStorageReport(
+      DatanodeStorageReportProto proto) {
+    return new DatanodeStorageReport(
+        convert(proto.getDatanodeInfo()),
+        convertStorageReports(proto.getStorageReportsList()));
+  }
+
+  public static StorageReport[] convertStorageReports(
+      List<StorageReportProto> list) {
+    final StorageReport[] report = new StorageReport[list.size()];
+    for (int i = 0; i < report.length; i++) {
+      report[i] = convert(list.get(i));
+    }
+    return report;
+  }
+
+  public static StorageReport convert(StorageReportProto p) {
+    return new StorageReport(
+        p.hasStorage() ?
+            convert(p.getStorage()) :
+            new DatanodeStorage(p.getStorageUuid()),
+        p.getFailed(), p.getCapacity(), p.getDfsUsed(), p.getRemaining(),
+        p.getBlockPoolUsed());
+  }
+
+  public static DatanodeStorage convert(DatanodeStorageProto s) {
+    return new DatanodeStorage(s.getStorageUuid(),
+        convertState(s.getState()), convertStorageType(s.getStorageType()));
+  }
+
+  private static State convertState(StorageState state) {
+    switch(state) {
+    case READ_ONLY_SHARED:
+      return State.READ_ONLY_SHARED;
+    case NORMAL:
+    default:
+      return State.NORMAL;
+    }
+  }
+
+  public static SafeModeActionProto convert(
+      SafeModeAction a) {
+    switch (a) {
+    case SAFEMODE_LEAVE:
+      return SafeModeActionProto.SAFEMODE_LEAVE;
+    case SAFEMODE_ENTER:
+      return SafeModeActionProto.SAFEMODE_ENTER;
+    case SAFEMODE_GET:
+      return SafeModeActionProto.SAFEMODE_GET;
+    default:
+      throw new IllegalArgumentException("Unexpected SafeModeAction :" + a);
+    }
+  }
+
+  public static DatanodeInfo[] convert(List<DatanodeInfoProto> list) {
+    DatanodeInfo[] info = new DatanodeInfo[list.size()];
+    for (int i = 0; i < info.length; i++) {
+      info[i] = convert(list.get(i));
+    }
+    return info;
+  }
+
+  public static long[] convert(GetFsStatsResponseProto res) {
+    long[] result = new long[7];
+    result[ClientProtocol.GET_STATS_CAPACITY_IDX] = res.getCapacity();
+    result[ClientProtocol.GET_STATS_USED_IDX] = res.getUsed();
+    result[ClientProtocol.GET_STATS_REMAINING_IDX] = res.getRemaining();
+    result[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX] = res.getUnderReplicated();
+    result[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX] = res.getCorruptBlocks();
+    result[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX] = res.getMissingBlocks();
+    result[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX] =
+        res.getMissingReplOneBlocks();
+    return result;
+  }
+
+  public static DatanodeReportTypeProto
+    convert(DatanodeReportType t) {
+    switch (t) {
+    case ALL: return DatanodeReportTypeProto.ALL;
+    case LIVE: return DatanodeReportTypeProto.LIVE;
+    case DEAD: return DatanodeReportTypeProto.DEAD;
+    case DECOMMISSIONING: return DatanodeReportTypeProto.DECOMMISSIONING;
+    default:
+      throw new IllegalArgumentException("Unexpected data type report:" + t);
+    }
+  }
+
+  public static DirectoryListing convert(DirectoryListingProto dl) {
+    if (dl == null)
+      return null;
+    List<HdfsFileStatusProto> partList =  dl.getPartialListingList();
+    return new DirectoryListing(partList.isEmpty() ?
+        new HdfsLocatedFileStatus[0] :
+        convert(partList.toArray(new HdfsFileStatusProto[partList.size()])),
+        dl.getRemainingEntries());
+  }
+
+  public static HdfsFileStatus[] convert(HdfsFileStatusProto[] fs) {
+    if (fs == null) return null;
+    final int len = fs.length;
+    HdfsFileStatus[] result = new HdfsFileStatus[len];
+    for (int i = 0; i < len; ++i) {
+      result[i] = convert(fs[i]);
+    }
+    return result;
+  }
+
+  // The creatFlag field in PB is a bitmask whose values are the same a the
+  // emum values of CreateFlag
+  public static int convertCreateFlag(EnumSetWritable<CreateFlag> flag) {
+    int value = 0;
+    if (flag.contains(CreateFlag.APPEND)) {
+      value |= CreateFlagProto.APPEND.getNumber();
+    }
+    if (flag.contains(CreateFlag.CREATE)) {
+      value |= CreateFlagProto.CREATE.getNumber();
+    }
+    if (flag.contains(CreateFlag.OVERWRITE)) {
+      value |= CreateFlagProto.OVERWRITE.getNumber();
+    }
+    if (flag.contains(CreateFlag.LAZY_PERSIST)) {
+      value |= CreateFlagProto.LAZY_PERSIST.getNumber();
+    }
+    if (flag.contains(CreateFlag.NEW_BLOCK)) {
+      value |= CreateFlagProto.NEW_BLOCK.getNumber();
+    }
+    return value;
+  }
+
+  public static FsServerDefaults convert(FsServerDefaultsProto fs) {
+    if (fs == null) return null;
+    return new FsServerDefaults(
+        fs.getBlockSize(), fs.getBytesPerChecksum(),
+        fs.getWritePacketSize(), (short) fs.getReplication(),
+        fs.getFileBufferSize(),
+        fs.getEncryptDataTransfer(),
+        fs.getTrashInterval(),
+        convert(fs.getChecksumType()));
+  }
+
+  public static List<CryptoProtocolVersionProto> convert(
+      CryptoProtocolVersion[] versions) {
+    List<CryptoProtocolVersionProto> protos =
+        Lists.newArrayListWithCapacity(versions.length);
+    for (CryptoProtocolVersion v: versions) {
+      protos.add(convert(v));
+    }
+    return protos;
+  }
+
+  static List<StorageTypesProto> convert(StorageType[][] types) {
+    List<StorageTypesProto> list = Lists.newArrayList();
+    if (types != null) {
+      for (StorageType[] ts : types) {
+        StorageTypesProto.Builder builder = StorageTypesProto.newBuilder();
+        builder.addAllStorageTypes(convertStorageTypes(ts));
+        list.add(builder.build());
+      }
+    }
+    return list;
+  }
+
+  public static BlockStoragePolicyProto convert(BlockStoragePolicy policy) {
+    BlockStoragePolicyProto.Builder builder = BlockStoragePolicyProto
+        .newBuilder().setPolicyId(policy.getId()).setName(policy.getName());
+    // creation storage types
+    StorageTypesProto creationProto = convert(policy.getStorageTypes());
+    Preconditions.checkArgument(creationProto != null);
+    builder.setCreationPolicy(creationProto);
+    // creation fallback
+    StorageTypesProto creationFallbackProto = convert(
+        policy.getCreationFallbacks());
+    if (creationFallbackProto != null) {
+      builder.setCreationFallbackPolicy(creationFallbackProto);
+    }
+    // replication fallback
+    StorageTypesProto replicationFallbackProto = convert(
+        policy.getReplicationFallbacks());
+    if (replicationFallbackProto != null) {
+      builder.setReplicationFallbackPolicy(replicationFallbackProto);
+    }
+    return builder.build();
+  }
+
+  public static StorageTypesProto convert(StorageType[] types) {
+    if (types == null || types.length == 0) {
+      return null;
+    }
+    List<StorageTypeProto> list = convertStorageTypes(types);
+    return StorageTypesProto.newBuilder().addAllStorageTypes(list).build();
+  }
+
+  public static DatanodeID[] convert(DatanodeIDProto[] did) {
+    if (did == null) return null;
+    final int len = did.length;
+    DatanodeID[] result = new DatanodeID[len];
+    for (int i = 0; i < len; ++i) {
+      result[i] = convert(did[i]);
+    }
+    return result;
+  }
+
+  // Block
+  public static BlockProto convert(Block b) {
+    return BlockProto.newBuilder().setBlockId(b.getBlockId())
+        .setGenStamp(b.getGenerationStamp()).setNumBytes(b.getNumBytes())
+        .build();
+  }
+
+  public static Block convert(BlockProto b) {
+    return new Block(b.getBlockId(), b.getNumBytes(), b.getGenStamp());
+  }
+
+  static public DatanodeInfo[] convert(DatanodeInfoProto di[]) {
+    if (di == null) return null;
+    DatanodeInfo[] result = new DatanodeInfo[di.length];
+    for (int i = 0; i < di.length; i++) {
+      result[i] = convert(di[i]);
+    }
+    return result;
+  }
+
+  public static DatanodeStorageReportProto convertDatanodeStorageReport(
+      DatanodeStorageReport report) {
+    return DatanodeStorageReportProto.newBuilder()
+        .setDatanodeInfo(convert(report.getDatanodeInfo()))
+        .addAllStorageReports(convertStorageReports(report.getStorageReports()))
+        .build();
+  }
+
+  public static List<DatanodeStorageReportProto> convertDatanodeStorageReports(
+      DatanodeStorageReport[] reports) {
+    final List<DatanodeStorageReportProto> protos
+        = new ArrayList<>(reports.length);
+    for(int i = 0; i < reports.length; i++) {
+      protos.add(convertDatanodeStorageReport(reports[i]));
+    }
+    return protos;
+  }
+
+  public static LocatedBlock[] convertLocatedBlock(LocatedBlockProto[] lb) {
+    if (lb == null) return null;
+    return convertLocatedBlock(Arrays.asList(lb)).toArray(
+        new LocatedBlock[lb.length]);
+  }
+
+  public static LocatedBlocksProto convert(LocatedBlocks lb) {
+    if (lb == null) {
+      return null;
+    }
+    LocatedBlocksProto.Builder builder =
+        LocatedBlocksProto.newBuilder();
+    if (lb.getLastLocatedBlock() != null) {
+      builder.setLastBlock(
+          convertLocatedBlock(lb.getLastLocatedBlock()));
+    }
+    if (lb.getFileEncryptionInfo() != null) {
+      builder.setFileEncryptionInfo(convert(lb.getFileEncryptionInfo()));
+    }
+    if (lb.getErasureCodingPolicy() != null) {
+      builder.setEcPolicy(convertErasureCodingPolicy(lb.getErasureCodingPolicy()));
+    }
+    return builder.setFileLength(lb.getFileLength())
+        .setUnderConstruction(lb.isUnderConstruction())
+        .addAllBlocks(convertLocatedBlocks2(lb.getLocatedBlocks()))
+        .setIsLastBlockComplete(lb.isLastBlockComplete()).build();
+  }
+
+  public static DataEncryptionKeyProto convert(DataEncryptionKey bet) {
+    DataEncryptionKeyProto.Builder b = DataEncryptionKeyProto.newBuilder()
+        .setKeyId(bet.keyId)
+        .setBlockPoolId(bet.blockPoolId)
+        .setNonce(ByteString.copyFrom(bet.nonce))
+        .setEncryptionKey(ByteString.copyFrom(bet.encryptionKey))
+        .setExpiryDate(bet.expiryDate);
+    if (bet.encryptionAlgorithm != null) {
+      b.setEncryptionAlgorithm(bet.encryptionAlgorithm);
+    }
+    return b.build();
+  }
+
+  public static FsServerDefaultsProto convert(FsServerDefaults fs) {
+    if (fs == null) return null;
+    return FsServerDefaultsProto.newBuilder().
+      setBlockSize(fs.getBlockSize()).
+      setBytesPerChecksum(fs.getBytesPerChecksum()).
+      setWritePacketSize(fs.getWritePacketSize())
+      .setReplication(fs.getReplication())
+      .setFileBufferSize(fs.getFileBufferSize())
+      .setEncryptDataTransfer(fs.getEncryptDataTransfer())
+      .setTrashInterval(fs.getTrashInterval())
+      .setChecksumType(convert(fs.getChecksumType()))
+      .build();
+  }
+
+  public static EnumSetWritable<CreateFlag> convertCreateFlag(int flag) {
+    EnumSet<CreateFlag> result =
+       EnumSet.noneOf(CreateFlag.class);
+    if ((flag & CreateFlagProto.APPEND_VALUE) == CreateFlagProto.APPEND_VALUE) {
+      result.add(CreateFlag.APPEND);
+    }
+    if ((flag & CreateFlagProto.CREATE_VALUE) == CreateFlagProto.CREATE_VALUE) {
+      result.add(CreateFlag.CREATE);
+    }
+    if ((flag & CreateFlagProto.OVERWRITE_VALUE)
+        == CreateFlagProto.OVERWRITE_VALUE) {
+      result.add(CreateFlag.OVERWRITE);
+    }
+    if ((flag & CreateFlagProto.LAZY_PERSIST_VALUE)
+        == CreateFlagProto.LAZY_PERSIST_VALUE) {
+      result.add(CreateFlag.LAZY_PERSIST);
+    }
+    if ((flag & CreateFlagProto.NEW_BLOCK_VALUE)
+        == CreateFlagProto.NEW_BLOCK_VALUE) {
+      result.add(CreateFlag.NEW_BLOCK);
+    }
+    return new EnumSetWritable<CreateFlag>(result, CreateFlag.class);
+  }
+
+  public static EnumSet<CacheFlag> convertCacheFlags(int flags) {
+    EnumSet<CacheFlag> result = EnumSet.noneOf(CacheFlag.class);
+    if ((flags & CacheFlagProto.FORCE_VALUE) == CacheFlagProto.FORCE_VALUE) {
+      result.add(CacheFlag.FORCE);
+    }
+    return result;
+  }
+
+  public static HdfsFileStatusProto convert(HdfsFileStatus fs) {
+    if (fs == null)
+      return null;
+    FileType fType = FileType.IS_FILE;
+    if (fs.isDir()) {
+      fType = FileType.IS_DIR;
+    } else if (fs.isSymlink()) {
+      fType = FileType.IS_SYMLINK;
+    }
+
+    HdfsFileStatusProto.Builder builder =
+     HdfsFileStatusProto.newBuilder().
+      setLength(fs.getLen()).
+      setFileType(fType).
+      setBlockReplication(fs.getReplication()).
+      setBlocksize(fs.getBlockSize()).
+      setModificationTime(fs.getModificationTime()).
+      setAccessTime(fs.getAccessTime()).
+      setPermission(convert(fs.getPermission())).
+      setOwner(fs.getOwner()).
+      setGroup(fs.getGroup()).
+      setFileId(fs.getFileId()).
+      setChildrenNum(fs.getChildrenNum()).
+      setPath(ByteString.copyFrom(fs.getLocalNameInBytes())).
+      setStoragePolicy(fs.getStoragePolicy());
+    if (fs.isSymlink())  {
+      builder.setSymlink(ByteString.copyFrom(fs.getSymlinkInBytes()));
+    }
+    if (fs.getFileEncryptionInfo() != null) {
+      builder.setFileEncryptionInfo(convert(fs.getFileEncryptionInfo()));
+    }
+    if (fs instanceof HdfsLocatedFileStatus) {
+      final HdfsLocatedFileStatus lfs = (HdfsLocatedFileStatus) fs;
+      LocatedBlocks locations = lfs.getBlockLocations();
+      if (locations != null) {
+        builder.setLocations(convert(locations));
+      }
+    }
+    if(fs.getErasureCodingPolicy() != null) {
+      builder.setEcPolicy(convertErasureCodingPolicy(fs.getErasureCodingPolicy()));
+    }
+    return builder.build();
+  }
+
+  public static SnapshottableDirectoryStatusProto convert(
+      SnapshottableDirectoryStatus status) {
+    if (status == null) {
+      return null;
+    }
+    int snapshotNumber = status.getSnapshotNumber();
+    int snapshotQuota = status.getSnapshotQuota();
+    byte[] parentFullPath = status.getParentFullPath();
+    ByteString parentFullPathBytes = ByteString.copyFrom(
+        parentFullPath == null ? DFSUtilClient.EMPTY_BYTES : parentFullPath);
+    HdfsFileStatusProto fs = convert(status.getDirStatus());
+    SnapshottableDirectoryStatusProto.Builder builder =
+        SnapshottableDirectoryStatusProto
+        .newBuilder().setSnapshotNumber(snapshotNumber)
+        .setSnapshotQuota(snapshotQuota).setParentFullpath(parentFullPathBytes)
+        .setDirStatus(fs);
+    return builder.build();
+  }
+
+  public static HdfsFileStatusProto[] convert(HdfsFileStatus[] fs) {
+    if (fs == null) return null;
+    final int len = fs.length;
+    HdfsFileStatusProto[] result = new HdfsFileStatusProto[len];
+    for (int i = 0; i < len; ++i) {
+      result[i] = convert(fs[i]);
+    }
+    return result;
+  }
+
+  public static DirectoryListingProto convert(DirectoryListing d) {
+    if (d == null)
+      return null;
+    return DirectoryListingProto.newBuilder().
+        addAllPartialListing(Arrays.asList(
+            convert(d.getPartialListing()))).
+        setRemainingEntries(d.getRemainingEntries()).
+        build();
+  }
+
+  public static GetFsStatsResponseProto convert(long[] fsStats) {
+    GetFsStatsResponseProto.Builder result = GetFsStatsResponseProto
+        .newBuilder();
+    if (fsStats.length >= ClientProtocol.GET_STATS_CAPACITY_IDX + 1)
+      result.setCapacity(fsStats[ClientProtocol.GET_STATS_CAPACITY_IDX]);
+    if (fsStats.length >= ClientProtocol.GET_STATS_USED_IDX + 1)
+      result.setUsed(fsStats[ClientProtocol.GET_STATS_USED_IDX]);
+    if (fsStats.length >= ClientProtocol.GET_STATS_REMAINING_IDX + 1)
+      result.setRemaining(fsStats[ClientProtocol.GET_STATS_REMAINING_IDX]);
+    if (fsStats.length >= ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX + 1)
+      result.setUnderReplicated(
+              fsStats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX]);
+    if (fsStats.length >= ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX + 1)
+      result.setCorruptBlocks(
+          fsStats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX]);
+    if (fsStats.length >= ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX + 1)
+      result.setMissingBlocks(
+          fsStats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX]);
+    if (fsStats.length >= ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX + 1)
+      result.setMissingReplOneBlocks(
+          fsStats[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX]);
+    return result.build();
+  }
+
+  public static DatanodeReportType convert(DatanodeReportTypeProto t) {
+    switch (t) {
+    case ALL: return DatanodeReportType.ALL;
+    case LIVE: return DatanodeReportType.LIVE;
+    case DEAD: return DatanodeReportType.DEAD;
+    case DECOMMISSIONING: return DatanodeReportType.DECOMMISSIONING;
+    default:
+      throw new IllegalArgumentException("Unexpected data type report:" + t);
+    }
+  }
+
+  public static SafeModeAction convert(
+      SafeModeActionProto a) {
+    switch (a) {
+    case SAFEMODE_LEAVE:
+      return SafeModeAction.SAFEMODE_LEAVE;
+    case SAFEMODE_ENTER:
+      return SafeModeAction.SAFEMODE_ENTER;
+    case SAFEMODE_GET:
+      return SafeModeAction.SAFEMODE_GET;
+    default:
+      throw new IllegalArgumentException("Unexpected SafeModeAction :" + a);
+    }
+  }
+
+  public static RollingUpgradeAction convert(RollingUpgradeActionProto a) {
+    switch (a) {
+    case QUERY:
+      return RollingUpgradeAction.QUERY;
+    case START:
+      return RollingUpgradeAction.PREPARE;
+    case FINALIZE:
+      return RollingUpgradeAction.FINALIZE;
+    default:
+      throw new IllegalArgumentException("Unexpected value: " + a);
+    }
+  }
+
+  public static RollingUpgradeStatusProto convertRollingUpgradeStatus(
+      RollingUpgradeStatus status) {
+    return RollingUpgradeStatusProto.newBuilder()
+        .setBlockPoolId(status.getBlockPoolId())
+        .setFinalized(status.isFinalized())
+        .build();
+  }
+
+  public static RollingUpgradeStatus convert(RollingUpgradeStatusProto proto) {
+    return new RollingUpgradeStatus(proto.getBlockPoolId(),
+        proto.getFinalized());
+  }
+
+  public static RollingUpgradeInfoProto convert(RollingUpgradeInfo info) {
+    return RollingUpgradeInfoProto.newBuilder()
+        .setStatus(convertRollingUpgradeStatus(info))
+        .setCreatedRollbackImages(info.createdRollbackImages())
+        .setStartTime(info.getStartTime())
+        .setFinalizeTime(info.getFinalizeTime())
+        .build();
+  }
+
+  public static CorruptFileBlocksProto convert(CorruptFileBlocks c) {
+    if (c == null)
+      return null;
+    return CorruptFileBlocksProto.newBuilder().
+        addAllFiles(Arrays.asList(c.getFiles())).
+        setCookie(c.getCookie()).
+        build();
+  }
+
+  public static ContentSummaryProto convert(ContentSummary cs) {
+    if (cs == null) return null;
+    ContentSummaryProto.Builder builder = ContentSummaryProto.newBuilder();
+        builder.setLength(cs.getLength()).
+        setFileCount(cs.getFileCount()).
+        setDirectoryCount(cs.getDirectoryCount()).
+        setQuota(cs.getQuota()).
+        setSpaceConsumed(cs.getSpaceConsumed()).
+        setSpaceQuota(cs.getSpaceQuota());
+
+    if (cs.isTypeQuotaSet() || cs.isTypeConsumedAvailable()) {
+      HdfsProtos.StorageTypeQuotaInfosProto.Builder isb =
+          HdfsProtos.StorageTypeQuotaInfosProto.newBuilder();
+      for (StorageType t: StorageType.getTypesSupportingQuota()) {
+        HdfsProtos.StorageTypeQuotaInfoProto info =
+            HdfsProtos.StorageTypeQuotaInfoProto.newBuilder().
+                setType(convertStorageType(t)).
+                setConsumed(cs.getTypeConsumed(t)).
+                setQuota(cs.getTypeQuota(t)).
+                build();
+        isb.addTypeQuotaInfo(info);
+      }
+      builder.setTypeQuotaInfos(isb);
+    }
+    return builder.build();
+  }
+
+  public static DatanodeStorageProto convert(DatanodeStorage s) {
+    return DatanodeStorageProto.newBuilder()
+        .setState(convertState(s.getState()))
+        .setStorageType(convertStorageType(s.getStorageType()))
+        .setStorageUuid(s.getStorageID()).build();
+  }
+
+  private static StorageState convertState(State state) {
+    switch(state) {
+    case READ_ONLY_SHARED:
+      return StorageState.READ_ONLY_SHARED;
+    case NORMAL:
+    default:
+      return StorageState.NORMAL;
+    }
+  }
+
+  public static StorageReportProto convert(StorageReport r) {
+    StorageReportProto.Builder builder = StorageReportProto.newBuilder()
+        .setBlockPoolUsed(r.getBlockPoolUsed()).setCapacity(r.getCapacity())
+        .setDfsUsed(r.getDfsUsed()).setRemaining(r.getRemaining())
+        .setStorageUuid(r.getStorage().getStorageID())
+        .setStorage(convert(r.getStorage()));
+    return builder.build();
+  }
+
+  public static List<StorageReportProto> convertStorageReports(StorageReport[] storages) {
+    final List<StorageReportProto> protos = new ArrayList<StorageReportProto>(
+        storages.length);
+    for(int i = 0; i < storages.length; i++) {
+      protos.add(convert(storages[i]));
+    }
+    return protos;
+  }
+
+  public static SnapshottableDirectoryListingProto convert(
+      SnapshottableDirectoryStatus[] status) {
+    if (status == null)
+      return null;
+    SnapshottableDirectoryStatusProto[] protos =
+        new SnapshottableDirectoryStatusProto[status.length];
+    for (int i = 0; i < status.length; i++) {
+      protos[i] = convert(status[i]);
+    }
+    List<SnapshottableDirectoryStatusProto> protoList = Arrays.asList(protos);
+    return SnapshottableDirectoryListingProto.newBuilder()
+        .addAllSnapshottableDirListing(protoList).build();
+  }
+
+  public static SnapshotDiffReportEntryProto convert(DiffReportEntry entry) {
+    if (entry == null) {
+      return null;
+    }
+    ByteString sourcePath = ByteString
+        .copyFrom(entry.getSourcePath() == null ? DFSUtilClient.EMPTY_BYTES : entry
+            .getSourcePath());
+    String modification = entry.getType().getLabel();
+    SnapshotDiffReportEntryProto.Builder builder = SnapshotDiffReportEntryProto
+        .newBuilder().setFullpath(sourcePath)
+        .setModificationLabel(modification);
+    if (entry.getType() == DiffType.RENAME) {
+      ByteString targetPath = ByteString
+          .copyFrom(entry.getTargetPath() == null ? DFSUtilClient.EMPTY_BYTES : entry
+              .getTargetPath());
+      builder.setTargetPath(targetPath);
+    }
+    return builder.build();
+  }
+
+  public static SnapshotDiffReportProto convert(SnapshotDiffReport report) {
+    if (report == null) {
+      return null;
+    }
+    List<DiffReportEntry> entries = report.getDiffList();
+    List<SnapshotDiffReportEntryProto> entryProtos = new ArrayList<>();
+    for (DiffReportEntry entry : entries) {
+      SnapshotDiffReportEntryProto entryProto = convert(entry);
+      if (entryProto != null)
+        entryProtos.add(entryProto);
+    }
+
+    SnapshotDiffReportProto reportProto = SnapshotDiffReportProto.newBuilder()
+        .setSnapshotRoot(report.getSnapshotRoot())
+        .setFromSnapshot(report.getFromSnapshot())
+        .setToSnapshot(report.getLaterSnapshotName())
+        .addAllDiffReportEntries(entryProtos).build();
+    return reportProto;
+  }
+
+  public static CacheDirectiveStatsProto convert(CacheDirectiveStats stats) {
+    CacheDirectiveStatsProto.Builder builder =
+        CacheDirectiveStatsProto.newBuilder();
+    builder.setBytesNeeded(stats.getBytesNeeded());
+    builder.setBytesCached(stats.getBytesCached());
+    builder.setFilesNeeded(stats.getFilesNeeded());
+    builder.setFilesCached(stats.getFilesCached());
+    builder.setHasExpired(stats.hasExpired());
+    return builder.build();
+  }
+
+  public static CacheDirectiveEntryProto convert(CacheDirectiveEntry entry) {
+    CacheDirectiveEntryProto.Builder builder =
+        CacheDirectiveEntryProto.newBuilder();
+    builder.setInfo(convert(entry.getInfo()));
+    builder.setStats(convert(entry.getStats()));
+    return builder.build();
+  }
+
+  public static boolean[] convertBooleanList(
+    List<Boolean> targetPinningsList) {
+    final boolean[] targetPinnings = new boolean[targetPinningsList.size()];
+    for (int i = 0; i < targetPinningsList.size(); i++) {
+      targetPinnings[i] = targetPinningsList.get(i);
+    }
+    return targetPinnings;
+  }
+
+  public static CachePoolStatsProto convert(CachePoolStats stats) {
+    CachePoolStatsProto.Builder builder = CachePoolStatsProto.newBuilder();
+    builder.setBytesNeeded(stats.getBytesNeeded());
+    builder.setBytesCached(stats.getBytesCached());
+    builder.setBytesOverlimit(stats.getBytesOverlimit());
+    builder.setFilesNeeded(stats.getFilesNeeded());
+    builder.setFilesCached(stats.getFilesCached());
+    return builder.build();
+  }
+
+  public static CachePoolEntryProto convert(CachePoolEntry entry) {
+    CachePoolEntryProto.Builder builder = CachePoolEntryProto.newBuilder();
+    builder.setInfo(convert(entry.getInfo()));
+    builder.setStats(convert(entry.getStats()));
+    return builder.build();
+  }
+
+  public static DatanodeLocalInfoProto convert(DatanodeLocalInfo info) {
+    DatanodeLocalInfoProto.Builder builder = DatanodeLocalInfoProto.newBuilder();
+    builder.setSoftwareVersion(info.getSoftwareVersion());
+    builder.setConfigVersion(info.getConfigVersion());
+    builder.setUptime(info.getUptime());
+    return builder.build();
+  }
+
+  public static GetAclStatusResponseProto convert(AclStatus e) {
+    AclStatusProto.Builder builder = AclStatusProto.newBuilder();
+    builder.setOwner(e.getOwner())
+        .setGroup(e.getGroup()).setSticky(e.isStickyBit())
+        .addAllEntries(convertAclEntryProto(e.getEntries()));
+    if (e.getPermission() != null) {
+      builder.setPermission(convert(e.getPermission()));
+    }
+    AclStatusProto r = builder.build();
+    return GetAclStatusResponseProto.newBuilder().setResult(r).build();
+  }
+
+  public static EnumSet<XAttrSetFlag> convert(int flag) {
+    EnumSet<XAttrSetFlag> result =
+        EnumSet.noneOf(XAttrSetFlag.class);
+    if ((flag & XAttrSetFlagProto.XATTR_CREATE_VALUE) ==
+        XAttrSetFlagProto.XATTR_CREATE_VALUE) {
+      result.add(XAttrSetFlag.CREATE);
+    }
+    if ((flag & XAttrSetFlagProto.XATTR_REPLACE_VALUE) ==
+        XAttrSetFlagProto.XATTR_REPLACE_VALUE) {
+      result.add(XAttrSetFlag.REPLACE);
+    }
+    return result;
+  }
+
+  public static XAttr convertXAttr(XAttrProto a) {
+    XAttr.Builder builder = new XAttr.Builder();
+    builder.setNameSpace(convert(a.getNamespace()));
+    if (a.hasName()) {
+      builder.setName(a.getName());
+    }
+    if (a.hasValue()) {
+      builder.setValue(a.getValue().toByteArray());
+    }
+    return builder.build();
+  }
+
+  public static GetXAttrsResponseProto convertXAttrsResponse(
+      List<XAttr> xAttrs) {
+    GetXAttrsResponseProto.Builder builder = GetXAttrsResponseProto
+        .newBuilder();
+    if (xAttrs != null) {
+      builder.addAllXAttrs(convertXAttrProto(xAttrs));
+    }
+    return builder.build();
+  }
+
+  public static ListXAttrsResponseProto convertListXAttrsResponse(
+    List<XAttr> names) {
+    ListXAttrsResponseProto.Builder builder =
+      ListXAttrsResponseProto.newBuilder();
+    if (names != null) {
+      builder.addAllXAttrs(convertXAttrProto(names));
+    }
+    return builder.build();
+  }
+
+  public static EncryptionZoneProto convert(EncryptionZone zone) {
+    return EncryptionZoneProto.newBuilder()
+        .setId(zone.getId())
+        .setPath(zone.getPath())
+        .setSuite(convert(zone.getSuite()))
+        .setCryptoProtocolVersion(convert(zone.getVersion()))
+        .setKeyName(zone.getKeyName())
+        .build();
+  }
+
+  public static SlotId convert(ShortCircuitShmSlotProto slotId) {
+    return new SlotId(convert(slotId.getShmId()),
+        slotId.getSlotIdx());
+  }
+
+  public static GetEditsFromTxidResponseProto convertEditsResponse(EventBatchList el) {
+    InotifyProtos.EventsListProto.Builder builder =
+        InotifyProtos.EventsListProto.newBuilder();
+    for (EventBatch b : el.getBatches()) {
+      List<InotifyProtos.EventProto> events = Lists.newArrayList();
+      for (Event e : b.getEvents()) {
+        switch (e.getEventType()) {
+          case CLOSE:
+            Event.CloseEvent ce = (Event.CloseEvent) e;
+            events.add(InotifyProtos.EventProto.newBuilder()
+                .setType(InotifyProtos.EventType.EVENT_CLOSE)
+                .setContents(
+                    InotifyProtos.CloseEventProto.newBuilder()
+                        .setPath(ce.getPath())
+                        .setFileSize(ce.getFileSize())
+                        .setTimestamp(ce.getTimestamp()).build().toByteString()
+                ).build());
+            break;
+          case CREATE:
+            Event.CreateEvent ce2 = (Event.CreateEvent) e;
+            events.add(InotifyProtos.EventProto.newBuilder()
+                .setType(InotifyProtos.EventType.EVENT_CREATE)
+                .setContents(
+                    InotifyProtos.CreateEventProto.newBuilder()
+                        .setType(createTypeConvert(ce2.getiNodeType()))
+                        .setPath(ce2.getPath())
+                        .setCtime(ce2.getCtime())
+                        .setOwnerName(ce2.getOwnerName())
+                        .setGroupName(ce2.getGroupName())
+                        .setPerms(convert(ce2.getPerms()))
+                        .setReplication(ce2.getReplication())
+                        .setSymlinkTarget(ce2.getSymlinkTarget() == null ?
+                            "" : ce2.getSymlinkTarget())
+                        .setDefaultBlockSize(ce2.getDefaultBlockSize())
+                        .setOverwrite(ce2.getOverwrite()).build().toByteString()
+                ).build());
+            break;
+          case METADATA:
+            Event.MetadataUpdateEvent me = (Event.MetadataUpdateEvent) e;
+            InotifyProtos.MetadataUpdateEventProto.Builder metaB =
+                InotifyProtos.MetadataUpdateEventProto.newBuilder()
+                    .setPath(me.getPath())
+                    .setType(metadataUpdateTypeConvert(me.getMetadataType()))
+                    .setMtime(me.getMtime())
+                    .setAtime(me.getAtime())
+                    .setReplication(me.getReplication())
+                    .setOwnerName(me.getOwnerName() == null ? "" :
+                        me.getOwnerName())
+                    .setGroupName(me.getGroupName() == null ? "" :
+                        me.getGroupName())
+                    .addAllAcls(me.getAcls() == null ?
+                        Lists.<AclEntryProto>newArrayList() :
+                        convertAclEntryProto(me.getAcls()))
+                    .addAllXAttrs(me.getxAttrs() == null ?
+                        Lists.<XAttrProto>newArrayList() :
+                        convertXAttrProto(me.getxAttrs()))
+                    .setXAttrsRemoved(me.isxAttrsRemoved());
+            if (me.getPerms() != null) {
+              metaB.setPerms(convert(me.getPerms()));
+            }
+            events.add(InotifyProtos.EventProto.newBuilder()
+                .setType(InotifyProtos.EventType.EVENT_METADATA)
+                .setContents(metaB.build().toByteString())
+                .build());
+            break;
+          case RENAME:
+            Event.RenameEvent re = (Event.RenameEvent) e;
+            events.add(InotifyProtos.EventProto.newBuilder()
+                .setType(InotifyProtos.EventType.EVENT_RENAME)
+                .setContents(
+                    InotifyProtos.RenameEventProto.newBuilder()
+                        .setSrcPath(re.getSrcPath())
+                        .setDestPath(re.getDstPath())
+                        .setTimestamp(re.getTimestamp()).build().toByteString()
+                ).build());
+            break;
+          case APPEND:
+            Event.AppendEvent re2 = (Event.AppendEvent) e;
+            events.add(InotifyProtos.EventProto.newBuilder()
+                .setType(InotifyProtos.EventType.EVENT_APPEND)
+                .setContents(InotifyProtos.AppendEventProto.newBuilder()
+                    .setPath(re2.getPath())
+                    .setNewBlock(re2.toNewBlock()).build().toByteString())
+                .build());
+            break;
+          case UNLINK:
+            Event.UnlinkEvent ue = (Event.UnlinkEvent) e;
+            events.add(InotifyProtos.EventProto.newBuilder()
+                .setType(InotifyProtos.EventType.EVENT_UNLINK)
+                .setContents(
+                    InotifyProtos.UnlinkEventProto.newBuilder()
+                        .setPath(ue.getPath())
+                        .setTimestamp(ue.getTimestamp()).build().toByteString()
+                ).build());
+            break;
+          case TRUNCATE:
+            Event.TruncateEvent te = (Event.TruncateEvent) e;
+            events.add(InotifyProtos.EventProto.newBuilder()
+                .setType(InotifyProtos.EventType.EVENT_TRUNCATE)
+                .setContents(
+                    InotifyProtos.TruncateEventProto.newBuilder()
+                        .setPath(te.getPath())
+                        .setFileSize(te.getFileSize())
+                        .setTimestamp(te.getTimestamp()).build().toByteString()
+                ).build());
+            break;
+          default:
+            throw new RuntimeException("Unexpected inotify event: " + e);
+        }
+      }
+      builder.addBatch(InotifyProtos.EventBatchProto.newBuilder().
+          setTxid(b.getTxid()).
+          addAllEvents(events));
+    }
+    builder.setFirstTxid(el.getFirstTxid());
+    builder.setLastTxid(el.getLastTxid());
+    builder.setSyncTxid(el.getSyncTxid());
+    return GetEditsFromTxidResponseProto.newBuilder().setEventsList(
+        builder.build()).build();
+  }
+
+  public static CryptoProtocolVersion[] convertCryptoProtocolVersions(
+      List<CryptoProtocolVersionProto> protos) {
+    List<CryptoProtocolVersion> versions =
+        Lists.newArrayListWithCapacity(protos.size());
+    for (CryptoProtocolVersionProto p: protos) {
+      versions.add(convert(p));
+    }
+    return versions.toArray(new CryptoProtocolVersion[]{});
+  }
+
+  public static HdfsProtos.PerFileEncryptionInfoProto convertPerFileEncInfo(
+      FileEncryptionInfo info) {
+    if (info == null) {
+      return null;
+    }
+    return HdfsProtos.PerFileEncryptionInfoProto.newBuilder()
+        .setKey(getByteString(info.getEncryptedDataEncryptionKey()))
+        .setIv(getByteString(info.getIV()))
+        .setEzKeyVersionName(info.getEzKeyVersionName())
+        .build();
+  }
+
+  public static HdfsProtos.ZoneEncryptionInfoProto convert(
+      CipherSuite suite, CryptoProtocolVersion version, String keyName) {
+    if (suite == null || version == null || keyName == null) {
+      return null;
+    }
+    return HdfsProtos.ZoneEncryptionInfoProto.newBuilder()
+        .setSuite(convert(suite))
+        .setCryptoProtocolVersion(convert(version))
+        .setKeyName(keyName)
+        .build();
+  }
+
+  public static FileEncryptionInfo convert(
+      HdfsProtos.PerFileEncryptionInfoProto fileProto,
+      CipherSuite suite, CryptoProtocolVersion version, String keyName) {
+    if (fileProto == null || suite == null || version == null ||
+        keyName == null) {
+      return null;
+    }
+    byte[] key = fileProto.getKey().toByteArray();
+    byte[] iv = fileProto.getIv().toByteArray();
+    String ezKeyVersionName = fileProto.getEzKeyVersionName();
+    return new FileEncryptionInfo(suite, version, key, iv, keyName,
+        ezKeyVersionName);
+  }
+
+  public static DatanodeInfo[] convert(DatanodeInfosProto datanodeInfosProto) {
+    List<DatanodeInfoProto> proto = datanodeInfosProto.getDatanodesList();
+    DatanodeInfo[] infos = new DatanodeInfo[proto.size()];
+    for (int i = 0; i < infos.length; i++) {
+      infos[i] = convert(proto.get(i));
+    }
+    return infos;
+  }
+
+  static List<DatanodeInfosProto> convert(DatanodeInfo[][] targets) {
+    DatanodeInfosProto[] ret = new DatanodeInfosProto[targets.length];
+    for (int i = 0; i < targets.length; i++) {
+      ret[i] = DatanodeInfosProto.newBuilder()
+          .addAllDatanodes(convert(targets[i])).build();
+    }
+    return Arrays.asList(ret);
+  }
+
+  public static ECSchema convertECSchema(HdfsProtos.ECSchemaProto schema) {
+    List<HdfsProtos.ECSchemaOptionEntryProto> optionsList = schema.getOptionsList();
+    Map<String, String> options = new HashMap<>(optionsList.size());
+    for (HdfsProtos.ECSchemaOptionEntryProto option : optionsList) {
+      options.put(option.getKey(), option.getValue());
+    }
+    return new ECSchema(schema.getCodecName(), schema.getDataUnits(),
+        schema.getParityUnits(), options);
+  }
+
+  public static HdfsProtos.ECSchemaProto convertECSchema(ECSchema schema) {
+    HdfsProtos.ECSchemaProto.Builder builder = HdfsProtos.ECSchemaProto.newBuilder()
+        .setCodecName(schema.getCodecName())
+        .setDataUnits(schema.getNumDataUnits())
+        .setParityUnits(schema.getNumParityUnits());
+    Set<Map.Entry<String, String>> entrySet = schema.getExtraOptions().entrySet();
+    for (Map.Entry<String, String> entry : entrySet) {
+      builder.addOptions(HdfsProtos.ECSchemaOptionEntryProto.newBuilder()
+          .setKey(entry.getKey()).setValue(entry.getValue()).build());
+    }
+    return builder.build();
+  }
+
+  public static ErasureCodingPolicy convertErasureCodingPolicy(
+      ErasureCodingPolicyProto policy) {
+    return new ErasureCodingPolicy(policy.getName(),
+        convertECSchema(policy.getSchema()),
+        policy.getCellSize());
+  }
+
+  public static ErasureCodingPolicyProto convertErasureCodingPolicy(
+      ErasureCodingPolicy policy) {
+    ErasureCodingPolicyProto.Builder builder = ErasureCodingPolicyProto
+        .newBuilder()
+        .setName(policy.getName())
+        .setSchema(convertECSchema(policy.getSchema()))
+        .setCellSize(policy.getCellSize());
+    return builder.build();
+  }
 }
 }

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java → hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/LongBitFormat.java


+ 11 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/ByteRangeInputStream.java

@@ -274,4 +274,15 @@ public abstract class ByteRangeInputStream extends FSInputStream {
     }
     }
     status = StreamStatus.CLOSED;
     status = StreamStatus.CLOSED;
   }
   }
+
+  @Override
+  public synchronized int available() throws IOException{
+    getInputStream();
+    if(fileLength != null){
+      long remaining = fileLength - currentPos;
+      return remaining <= Integer.MAX_VALUE ? (int) remaining : Integer.MAX_VALUE;
+    }else {
+      return Integer.MAX_VALUE;
+    }
+  }
 }
 }

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java

@@ -241,7 +241,8 @@ class JsonUtilClient {
         getLong(m, "lastUpdateMonotonic", 0l),
         getLong(m, "lastUpdateMonotonic", 0l),
         getInt(m, "xceiverCount", 0),
         getInt(m, "xceiverCount", 0),
         getString(m, "networkLocation", ""),
         getString(m, "networkLocation", ""),
-        DatanodeInfo.AdminStates.valueOf(getString(m, "adminState", "NORMAL")));
+        DatanodeInfo.AdminStates.valueOf(getString(m, "adminState", "NORMAL")),
+        getString(m, "upgradeDomain", ""));
   }
   }
 
 
   /** Convert an Object[] to a DatanodeInfo[]. */
   /** Convert an Object[] to a DatanodeInfo[]. */

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java

@@ -154,11 +154,11 @@ public class WebHdfsFileSystem extends FileSystem
         HdfsClientConfigKeys.DFS_WEBHDFS_OAUTH_ENABLED_DEFAULT);
         HdfsClientConfigKeys.DFS_WEBHDFS_OAUTH_ENABLED_DEFAULT);
 
 
     if(isOAuth) {
     if(isOAuth) {
-      LOG.info("Enabling OAuth2 in WebHDFS");
+      LOG.debug("Enabling OAuth2 in WebHDFS");
       connectionFactory = URLConnectionFactory
       connectionFactory = URLConnectionFactory
           .newOAuth2URLConnectionFactory(conf);
           .newOAuth2URLConnectionFactory(conf);
     } else {
     } else {
-      LOG.info("Not enabling OAuth2 in WebHDFS");
+      LOG.debug("Not enabling OAuth2 in WebHDFS");
       connectionFactory = URLConnectionFactory
       connectionFactory = URLConnectionFactory
           .newDefaultURLConnectionFactory(conf);
           .newDefaultURLConnectionFactory(conf);
     }
     }

+ 16 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto

@@ -162,6 +162,16 @@ message ListReconfigurablePropertiesResponseProto {
   repeated string name = 1;
   repeated string name = 1;
 }
 }
 
 
+message GetBalancerBandwidthRequestProto {
+}
+
+/**
+ * bandwidth - balancer bandwidth value of the datanode.
+ */
+message GetBalancerBandwidthResponseProto {
+  required uint64 bandwidth = 1;
+}
+
 /**
 /**
  * Protocol used from client to the Datanode.
  * Protocol used from client to the Datanode.
  * See the request and response for details of rpc call.
  * See the request and response for details of rpc call.
@@ -211,4 +221,10 @@ service ClientDatanodeProtocolService {
 
 
   rpc triggerBlockReport(TriggerBlockReportRequestProto)
   rpc triggerBlockReport(TriggerBlockReportRequestProto)
       returns(TriggerBlockReportResponseProto);
       returns(TriggerBlockReportResponseProto);
+
+  /**
+   * Returns the balancer bandwidth value of datanode.
+   */
+  rpc getBalancerBandwidth(GetBalancerBandwidthRequestProto)
+      returns(GetBalancerBandwidthResponseProto);
 }
 }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto

@@ -98,6 +98,7 @@ message DatanodeInfoProto {
   optional uint64 cacheCapacity = 11 [default = 0];
   optional uint64 cacheCapacity = 11 [default = 0];
   optional uint64 cacheUsed = 12 [default = 0];
   optional uint64 cacheUsed = 12 [default = 0];
   optional uint64 lastUpdateMonotonic = 13 [default = 0];
   optional uint64 lastUpdateMonotonic = 13 [default = 0];
+  optional string upgradeDomain = 14;
 }
 }
 
 
 /**
 /**

+ 17 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem

@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+org.apache.hadoop.hdfs.web.WebHdfsFileSystem
+org.apache.hadoop.hdfs.web.SWebHdfsFileSystem

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java

@@ -27,10 +27,10 @@ import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.mount.MountEntry;
 import org.apache.hadoop.mount.MountEntry;
 import org.apache.hadoop.mount.MountInterface;
 import org.apache.hadoop.mount.MountInterface;
 import org.apache.hadoop.mount.MountResponse;
 import org.apache.hadoop.mount.MountResponse;
@@ -90,7 +90,7 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface {
     UserGroupInformation.setConfiguration(config);
     UserGroupInformation.setConfiguration(config);
     SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY,
     SecurityUtil.login(config, NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY,
         NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY);
         NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY);
-    this.dfsClient = new DFSClient(NameNode.getAddress(config), config);
+    this.dfsClient = new DFSClient(DFSUtilClient.getNNAddress(config), config);
   }
   }
   
   
   @Override
   @Override

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java

@@ -33,8 +33,8 @@ import org.apache.commons.logging.Log;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSInputStream;
 import org.apache.hadoop.hdfs.DFSInputStream;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.io.MultipleIOException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.util.ShutdownHookManager;
@@ -173,7 +173,7 @@ class DFSClientCache {
         return ugi.doAs(new PrivilegedExceptionAction<DFSClient>() {
         return ugi.doAs(new PrivilegedExceptionAction<DFSClient>() {
           @Override
           @Override
           public DFSClient run() throws IOException {
           public DFSClient run() throws IOException {
-            return new DFSClient(NameNode.getAddress(config), config);
+            return new DFSClient(DFSUtilClient.getNNAddress(config), config);
           }
           }
         });
         });
       }
       }

+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/nfs3/TestWrites.java

@@ -28,6 +28,7 @@ import java.util.Arrays;
 import java.util.concurrent.ConcurrentNavigableMap;
 import java.util.concurrent.ConcurrentNavigableMap;
 
 
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
 import org.apache.hadoop.hdfs.nfs.conf.NfsConfigKeys;
@@ -35,7 +36,6 @@ import org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration;
 import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS;
 import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS;
 import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx;
 import org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.nfs.nfs3.FileHandle;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
 import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow;
@@ -480,7 +480,7 @@ public class TestWrites {
     try {
     try {
       cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
       cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
       cluster.waitActive();
       cluster.waitActive();
-      client = new DFSClient(NameNode.getAddress(config), config);
+      client = new DFSClient(DFSUtilClient.getNNAddress(config), config);
 
 
       // Use emphral port in case tests are running in parallel
       // Use emphral port in case tests are running in parallel
       config.setInt("nfs3.mountd.port", 0);
       config.setInt("nfs3.mountd.port", 0);
@@ -596,7 +596,8 @@ public class TestWrites {
       nfs3.startServiceInternal(false);
       nfs3.startServiceInternal(false);
       nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
       nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
 
 
-      DFSClient dfsClient = new DFSClient(NameNode.getAddress(config), config);
+      DFSClient dfsClient = new DFSClient(DFSUtilClient.getNNAddress(config),
+          config);
       HdfsFileStatus status = dfsClient.getFileInfo("/");
       HdfsFileStatus status = dfsClient.getFileInfo("/");
       FileHandle rootHandle = new FileHandle(status.getFileId());
       FileHandle rootHandle = new FileHandle(status.getFileId());
 
 

+ 276 - 133
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -36,6 +36,9 @@ Trunk (Unreleased)
 
 
     HDFS-8895. Remove deprecated BlockStorageLocation APIs. (wang)
     HDFS-8895. Remove deprecated BlockStorageLocation APIs. (wang)
 
 
+    HDFS-8981. Adding revision to data node jmx getVersion() method. (Siqi Li
+    via mingma)
+
   NEW FEATURES
   NEW FEATURES
 
 
     HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
     HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)
@@ -438,8 +441,6 @@ Release 2.8.0 - UNRELEASED
     HDFS-8085. Move CorruptFileBlockIterator to a new hdfs.client.impl package.
     HDFS-8085. Move CorruptFileBlockIterator to a new hdfs.client.impl package.
     (szetszwo)
     (szetszwo)
 
 
-    HDFS-8046. Allow better control of getContentSummary (kihwal)
-
     HDFS-8076. Code cleanup for DFSInputStream: use offset instead of
     HDFS-8076. Code cleanup for DFSInputStream: use offset instead of
     LocatedBlock when possible. (Zhe Zhang via wang)
     LocatedBlock when possible. (Zhe Zhang via wang)
 
 
@@ -453,9 +454,6 @@ Release 2.8.0 - UNRELEASED
     HDFS-8101. DFSClient use of non-constant DFSConfigKeys pulls in WebHDFS
     HDFS-8101. DFSClient use of non-constant DFSConfigKeys pulls in WebHDFS
     classes at runtime. (Sean Busbey via atm)
     classes at runtime. (Sean Busbey via atm)
 
 
-    HDFS-8099. Change "DFSInputStream has been closed already" message to
-    debug log level (Charles Lamb via Colin P. McCabe)
-
     HDFS-8102. Separate webhdfs retry configuration keys from DFSConfigKeys.
     HDFS-8102. Separate webhdfs retry configuration keys from DFSConfigKeys.
     (wheat9)
     (wheat9)
 
 
@@ -730,9 +728,6 @@ Release 2.8.0 - UNRELEASED
     HDFS-8742. Inotify: Support event for OP_TRUNCATE.
     HDFS-8742. Inotify: Support event for OP_TRUNCATE.
     (Surendra Singh Lilhore via aajisaka)
     (Surendra Singh Lilhore via aajisaka)
 
 
-    HDFS-7314. When the DFSClient lease cannot be renewed, abort open-for-write
-    files rather than the entire DFSClient. (mingma)
-
     HDFS-8794. Improve CorruptReplicasMap#corruptReplicasMap. (yliu)
     HDFS-8794. Improve CorruptReplicasMap#corruptReplicasMap. (yliu)
 
 
     HDFS-7483. Display information per tier on the Namenode UI.
     HDFS-7483. Display information per tier on the Namenode UI.
@@ -840,9 +835,6 @@ Release 2.8.0 - UNRELEASED
 
 
     HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu)
     HDFS-8900. Compact XAttrs to optimize memory footprint. (yliu)
 
 
-    HDFS-8846. Add a unit test for INotify functionality across a layout
-    version upgrade (Zhe Zhang via Colin P. McCabe)
-
     HDFS-8951. Move the shortcircuit package to hdfs-client.
     HDFS-8951. Move the shortcircuit package to hdfs-client.
     (Mingliang Liu via wheat9)
     (Mingliang Liu via wheat9)
 
 
@@ -874,6 +866,84 @@ Release 2.8.0 - UNRELEASED
 
 
     HDFS-8965. Harden edit log reading code against out of memory errors (cmccabe)
     HDFS-8965. Harden edit log reading code against out of memory errors (cmccabe)
 
 
+    HDFS-2070. Add more unit tests for FsShell getmerge (Daniel Templeton via
+    Colin P. McCabe)
+
+    HDFS-328. Improve fs -setrep error message for invalid replication factors.
+    (Daniel Templeton via wang)
+
+    HDFS-8890. Allow admin to specify which blockpools the balancer should run
+    on. (Chris Trezzo via mingma)
+
+    HDFS-9002. Move o.a.h.hdfs.net/*Peer classes to hdfs-client.
+    (Mingliang Liu via wheat9)
+
+    HDFS-9021. Use a yellow elephant rather than a blue one in diagram. (wang)
+
+    HDFS-9012. Move o.a.h.hdfs.protocol.datatransfer.PipelineAck class to
+    hadoop-hdfs-client module. (Mingliang Liu via wheat9)
+
+    HDFS-8984. Move replication queues related methods in FSNamesystem to
+    BlockManager. (wheat9)
+
+    HDFS-9019. Adding informative message to sticky bit permission denied
+    exception. (xyao)
+
+    HDFS-8860. Remove unused Replica copyOnWrite code (Lei (Eddy) Xu via Colin P. McCabe)
+
+    HDFS-8716. Introduce a new config specifically for safe mode block count
+    (Chang Li via kihwal)
+
+    HDFS-7116. Add a command to get the balancer bandwidth
+    (Rakesh R via vinayakumarb)
+
+    HDFS-8974. Convert docs in xdoc format to markdown.
+    (Masatake Iwasaki via aajisaka)
+
+    HDFS-6763. Initialize file system-wide quota once on transitioning to active
+    (kihwal)
+
+    HDFS-9027. Refactor o.a.h.hdfs.DataStreamer#isLazyPersist() method.
+    (Mingliang Liu via Arpit Agarwal)
+
+    HDFS-8996. Consolidate validateLog and scanLog in FJM#EditLogFile (Zhe
+    Zhang via Colin P. McCabe)
+
+    HDFS-9010. Replace NameNode.DEFAULT_PORT with HdfsClientConfigKeys.
+    DFS_NAMENODE_RPC_PORT_DEFAULT config key. (Mingliang Liu via wheat9)
+
+    HDFS-9065. Include commas on # of files, blocks, total filesystem objects
+    in NN Web UI. (Daniel Templeton via wheat9)
+
+    HDFS-9008. Balancer#Parameters class could use a builder pattern.
+    (Chris Trezzo via mingma)
+
+    HDFS-8953. DataNode Metrics logging (Kanaka Kumar Avvaru via vinayakumarb)
+
+    HDFS-9082. Change the log level in WebHdfsFileSystem.initialize() from INFO
+    to DEBUG. (Santhosh Nayak via cnauroth)
+
+    HDFS-7986. Allow files / directories to be deleted from the NameNode UI.
+    (Ravi Prakash via wheat9)
+
+    HDFS-7995. Implement chmod in the HDFS Web UI.
+    (Ravi Prakash and Haohui Mai via wheat9)
+
+    HDFS-9022. Move NameNode.getAddress() and NameNode.getUri() to
+    hadoop-hdfs-client. (Mingliang Liu via wheat9)
+
+    HDFS-5802. NameNode does not check for inode type before traversing down a
+    path. (Xiao Chen via Yongjun Zhang)
+
+    HDFS-9101. Remove deprecated NameNode.getUri() static helper method.
+    (Mingliang Liu via wheat9)
+
+    HDFS-9111. Move hdfs-client protobuf convert methods from PBHelper to
+    PBHelperClient. (Mingliang Liu via wheat9)
+
+    HADOOP-12428. Fix inconsistency between log-level guards and statements.
+    (Jagadesh Kiran N and Jackie Chang via ozawa)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
@@ -890,6 +960,13 @@ Release 2.8.0 - UNRELEASED
 
 
     HDFS-8862. BlockManager#excessReplicateMap should use a HashMap. (yliu)
     HDFS-8862. BlockManager#excessReplicateMap should use a HashMap. (yliu)
 
 
+    HDFS-8929. Add a metric to expose the timestamp of the last journal
+    (surendra singh lilhore via vinayakumarb)
+
+    HDFS-8829. Make SO_RCVBUF and SO_SNDBUF size configurable for
+    DataTransferProtocol sockets and allow configuring auto-tuning (He Tianyi
+    via Colin P. McCabe)
+
   BUG FIXES
   BUG FIXES
 
 
     HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.
     HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.
@@ -995,9 +1072,6 @@ Release 2.8.0 - UNRELEASED
     HDFS-8290. WebHDFS calls before namesystem initialization can cause
     HDFS-8290. WebHDFS calls before namesystem initialization can cause
     NullPointerException. (cnauroth)
     NullPointerException. (cnauroth)
 
 
-    HDFS-8219. setStoragePolicy with folder behavior is different after cluster restart.
-    (surendra singh lilhore via Xiaoyu Yao)
-
     HDFS-8310. Fix TestCLI.testAll "help: help for find" on Windows.
     HDFS-8310. Fix TestCLI.testAll "help: help for find" on Windows.
     (Kiran Kumar M R via Xiaoyu Yao)
     (Kiran Kumar M R via Xiaoyu Yao)
 
 
@@ -1104,18 +1178,12 @@ Release 2.8.0 - UNRELEASED
     HDFS-8268. Port conflict log for data node server is not sufficient
     HDFS-8268. Port conflict log for data node server is not sufficient
     (Mohammad Shahid Khan via vinayakumarb)
     (Mohammad Shahid Khan via vinayakumarb)
 
 
-    HDFS-8431. hdfs crypto class not found in Windows.
-    (Anu Engineer via cnauroth)
-
     HDFS-8407. hdfsListDirectory must set errno to 0 on success (Masatake
     HDFS-8407. hdfsListDirectory must set errno to 0 on success (Masatake
     Iwasaki via Colin P. McCabe)
     Iwasaki via Colin P. McCabe)
 
 
     HDFS-7401. Add block info to DFSInputStream' WARN message when it adds
     HDFS-7401. Add block info to DFSInputStream' WARN message when it adds
     node to deadNodes (Arshad Mohammad via vinayakumarb)
     node to deadNodes (Arshad Mohammad via vinayakumarb)
 
 
-    HDFS-7609. Avoid retry cache collision when Standby NameNode loading edits.
-    (Ming Ma via jing9)
-
     HDFS-8490. Typo in trace enabled log in ExceptionHandler of WebHDFS.
     HDFS-8490. Typo in trace enabled log in ExceptionHandler of WebHDFS.
     (Archana T via ozawa)
     (Archana T via ozawa)
 
 
@@ -1270,6 +1338,46 @@ Release 2.8.0 - UNRELEASED
     HDFS-8950. NameNode refresh doesn't remove DataNodes that are no longer in
     HDFS-8950. NameNode refresh doesn't remove DataNodes that are no longer in
     the allowed list (Daniel Templeton)
     the allowed list (Daniel Templeton)
 
 
+    HDFS-8388. Time and Date format need to be in sync in NameNode UI page.
+    (Surendra Singh Lilhore via aajisaka)
+
+    HDFS-9003. ForkJoin thread pool leaks. (Kihwal Lee via jing9)
+
+    HDFS-8885. ByteRangeInputStream used in webhdfs does not override
+    available(). (Shradha Revankar via aajisaka)
+
+    HDFS-9009. Send metrics logs to NullAppender by default. (Arpit Agarwal)
+
+    HDFS-8964. When validating the edit log, do not read at or beyond the file
+    offset that is being written (Zhe Zhang via Colin P. McCabe)
+
+    HDFS-8939. Test(S)WebHdfsFileContextMainOperations failing on branch-2.
+    (Chris Nauroth via jghoman)
+
+    HDFS-8581. ContentSummary on / skips further counts on yielding lock
+    (J.Andreina via vinayakumarb)
+
+    HDFS-9036. In BlockPlacementPolicyWithNodeGroup#chooseLocalStorage , random
+    node is selected eventhough fallbackToLocalRack is true.
+    (J.Andreina via vinayakumarb)
+
+    HDFS-9041. Move entries in META-INF/services/o.a.h.fs.FileSystem to
+    hdfs-client. (Mingliang Liu via wheat9)
+
+    HDFS-9069. TestNameNodeMetricsLogger failing -port in use.
+    (stevel)
+
+    HDFS-9067. o.a.h.hdfs.server.datanode.fsdataset.impl.TestLazyWriter
+    is failing in trunk (Surendra Singh Lilhore via vinayakumarb)
+
+    HDFS-9072. Fix random failures in TestJMXGet.
+    (J.Andreina via stevel)
+
+    HDFS-9073. Fix failures in TestLazyPersistLockedMemory
+    testReleaseOnEviction(). (J.Andreina via stevel)
+
+    HDFS-9063. Correctly handle snapshot path for getContentSummary. (jing9)
+
 Release 2.7.2 - UNRELEASED
 Release 2.7.2 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -1280,6 +1388,12 @@ Release 2.7.2 - UNRELEASED
 
 
     HDFS-8659. Block scanner INFO message is spamming logs. (Yongjun Zhang)
     HDFS-8659. Block scanner INFO message is spamming logs. (Yongjun Zhang)
 
 
+    HADOOP-5323. Trash documentation should describe its directory structure and
+    configurations. (Weiwei Yang via ozawa)
+
+    HDFS-8099. Change "DFSInputStream has been closed already" message to
+    debug log level (Charles Lamb via Colin P. McCabe)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)
     HDFS-8722. Optimize datanode writes for small writes and flushes (kihwal)
@@ -1297,11 +1411,17 @@ Release 2.7.2 - UNRELEASED
 
 
     HDFS-8867. Enable optimized block reports. (Daryn Sharp via jing9)
     HDFS-8867. Enable optimized block reports. (Daryn Sharp via jing9)
 
 
-    HDFS-8863. The remaining space check in BlockPlacementPolicyDefault is
-    flawed. (Kihwal Lee via yliu)
-
     HDFS-8891. HDFS concat should keep srcs order. (Yong Zhang via jing9)
     HDFS-8891. HDFS concat should keep srcs order. (Yong Zhang via jing9)
 
 
+    HDFS-8995. Flaw in registration bookeeping can make DN die on reconnect.
+    (Kihwal Lee via yliu)
+
+    HDFS-9033. dfsadmin -metasave prints "NaN" for cache used%.
+    (Brahma Reddy Battula via aajisaka)
+
+    HDFS-9042. Update document for the Storage policy name
+    (J.Andreina via vinayakumarb)
+
 Release 2.7.1 - 2015-07-06
 Release 2.7.1 - 2015-07-06
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -1338,15 +1458,9 @@ Release 2.7.1 - 2015-07-06
     (Surendra Singh Lilhore via szetszwo)
     (Surendra Singh Lilhore via szetszwo)
 
 
   OPTIMIZATIONS
   OPTIMIZATIONS
-    HDFS-8480. Fix performance and timeout issues in HDFS-7929 by using
-    hard-links to preserve old edit logs, instead of copying them. (Zhe Zhang
-    via Colin P. McCabe)
 
 
   BUG FIXES
   BUG FIXES
 
 
-    HDFS-8127. NameNode Failover during HA upgrade can cause DataNode to
-    finalize upgrade. (jing9)
-
     HDFS-8151. Always use snapshot path as source when invalid snapshot names
     HDFS-8151. Always use snapshot path as source when invalid snapshot names
     are used for diff based distcp. (jing9)
     are used for diff based distcp. (jing9)
 
 
@@ -1368,9 +1482,6 @@ Release 2.7.1 - 2015-07-06
     HDFS-8147. StorageGroup in Dispatcher should override equals nad hashCode.
     HDFS-8147. StorageGroup in Dispatcher should override equals nad hashCode.
     (surendra singh lilhore via szetszwo)
     (surendra singh lilhore via szetszwo)
 
 
-    HDFS-8070. Pre-HDFS-7915 DFSClient cannot use short circuit on
-    post-HDFS-7915 DataNode (cmccabe)
-
     HDFS-8273. FSNamesystem#Delete() should not call logSync() when holding the
     HDFS-8273. FSNamesystem#Delete() should not call logSync() when holding the
     lock. (wheat9)
     lock. (wheat9)
 
 
@@ -1383,17 +1494,8 @@ Release 2.7.1 - 2015-07-06
     HDFS-8305: HDFS INotify: the destination field of RenameOp should always
     HDFS-8305: HDFS INotify: the destination field of RenameOp should always
     end with the file name (cmccabe)
     end with the file name (cmccabe)
 
 
-    HDFS-7980. Incremental BlockReport will dramatically slow down namenode
-    startup.  (Walter Su via szetszwo)
-
     HDFS-8226. Non-HA rollback compatibility broken (J.Andreina via vinayakumarb)
     HDFS-8226. Non-HA rollback compatibility broken (J.Andreina via vinayakumarb)
 
 
-    HDFS-7894. Rolling upgrade readiness is not updated in jmx until query
-    command is issued. (Brahma Reddy Battula  via kihwal)
-
-    HDFS-8254. Standby namenode doesn't process DELETED_BLOCK if the add block
-    request is in edit log. (Rushabh S Shah via kihwal)
-
     HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
     HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
     goes for infinite loop (Rushabh S Shah  via kihwal)
     goes for infinite loop (Rushabh S Shah  via kihwal)
 
 
@@ -1402,9 +1504,6 @@ Release 2.7.1 - 2015-07-06
 
 
     HDFS-8405. Fix a typo in NamenodeFsck.  (Takanobu Asanuma via szetszwo)
     HDFS-8405. Fix a typo in NamenodeFsck.  (Takanobu Asanuma via szetszwo)
 
 
-    HDFS-8404. Pending block replication can get stuck using older genstamp
-    (Nathan Roberts via kihwal)
-
     HDFS-8451. DFSClient probe for encryption testing interprets empty URI
     HDFS-8451. DFSClient probe for encryption testing interprets empty URI
     property for "enabled". (Steve Loughran via xyao)
     property for "enabled". (Steve Loughran via xyao)
 
 
@@ -1453,9 +1552,6 @@ Release 2.7.0 - 2015-04-20
 
 
   NEW FEATURES
   NEW FEATURES
     
     
-    HDFS-7278. Add a command that allows sysadmins to manually trigger full
-    block reports from a DN (cmccabe)
-
     HDFS-6663. Admin command to track file and locations from block id.
     HDFS-6663. Admin command to track file and locations from block id.
     (Chen He via kihwal)
     (Chen He via kihwal)
 
 
@@ -1549,9 +1645,6 @@ Release 2.7.0 - 2015-04-20
     HDFS-3342. SocketTimeoutException in BlockSender.sendChunks could
     HDFS-3342. SocketTimeoutException in BlockSender.sendChunks could
     have a better error message. (Yongjun Zhang via wang)
     have a better error message. (Yongjun Zhang via wang)
 
 
-    HDFS-7035. Make adding a new data directory to the DataNode an atomic
-    operation and improve error handling (Lei Xu via Colin P. McCabe)
-    
     HDFS-6917. Add an hdfs debug command to validate blocks, call recoverlease,
     HDFS-6917. Add an hdfs debug command to validate blocks, call recoverlease,
     etc. (cmccabe)
     etc. (cmccabe)
 
 
@@ -1637,9 +1730,6 @@ Release 2.7.0 - 2015-04-20
     HDFS-7462. Consolidate implementation of mkdirs() into a single class.
     HDFS-7462. Consolidate implementation of mkdirs() into a single class.
     (wheat9)
     (wheat9)
 
 
-    HDFS-7446. HDFS inotify should have the ability to determine what txid it
-    has read up to (cmccabe)
-
     HDFS-6735. A minor optimization to avoid pread() be blocked by read()
     HDFS-6735. A minor optimization to avoid pread() be blocked by read()
     inside the same DFSInputStream (Lars Hofhansl via stack)
     inside the same DFSInputStream (Lars Hofhansl via stack)
     
     
@@ -1682,9 +1772,6 @@ Release 2.7.0 - 2015-04-20
     HDFS-7528. Consolidate symlink-related implementation into a single class.
     HDFS-7528. Consolidate symlink-related implementation into a single class.
     (wheat9)
     (wheat9)
 
 
-    HDFS-7531. Improve the concurrent access on FsVolumeList (Lei Xu via Colin
-    P. McCabe)
-
     HDFS-7373. Clean up temporary files after fsimage transfer failures.
     HDFS-7373. Clean up temporary files after fsimage transfer failures.
     (kihwal)
     (kihwal)
 
 
@@ -1702,8 +1789,6 @@ Release 2.7.0 - 2015-04-20
     HADOOP-11470. Remove some uses of obsolete guava APIs from the hadoop
     HADOOP-11470. Remove some uses of obsolete guava APIs from the hadoop
     codebase. (Sangjin Lee via Colin P. McCabe)
     codebase. (Sangjin Lee via Colin P. McCabe)
 
 
-    HDFS-7182. JMX metrics aren't accessible when NN is busy. (Ming Ma via jing9)
-
     HDFS-7323. Move the get/setStoragePolicy commands out from dfsadmin.
     HDFS-7323. Move the get/setStoragePolicy commands out from dfsadmin.
     (jing9 via yliu)
     (jing9 via yliu)
 
 
@@ -1897,9 +1982,6 @@ Release 2.7.0 - 2015-04-20
     HDFS-7410. Support CreateFlags with append() to support hsync() for
     HDFS-7410. Support CreateFlags with append() to support hsync() for
     appending streams (Vinayakumar B via Colin P. McCabe)
     appending streams (Vinayakumar B via Colin P. McCabe)
 
 
-    HDFS-7742. Favoring decommissioning node for replication can cause a block 
-    to stay underreplicated for long periods (Nathan Roberts via kihwal)
-
     HDFS-8008. Support client-side back off when the datanodes are congested.
     HDFS-8008. Support client-side back off when the datanodes are congested.
     (wheat9)
     (wheat9)
 
 
@@ -2047,9 +2129,6 @@ Release 2.7.0 - 2015-04-20
     HDFS-7431. log message for InvalidMagicNumberException may be incorrect.
     HDFS-7431. log message for InvalidMagicNumberException may be incorrect.
     (Yi Liu via cnauroth)
     (Yi Liu via cnauroth)
 
 
-    HDFS-7552. Change FsVolumeList toString() to fix
-    TestDataNodeVolumeFailureToleration (Liang Xie via Colin P. McCabe)
-
     HDFS-7557. Fix spacing for a few keys in DFSConfigKeys.java 
     HDFS-7557. Fix spacing for a few keys in DFSConfigKeys.java 
     (Colin P.McCabe)
     (Colin P.McCabe)
 
 
@@ -2076,21 +2155,9 @@ Release 2.7.0 - 2015-04-20
     HDFS-7589. Break the dependency between libnative_mini_dfs and libhdfs.
     HDFS-7589. Break the dependency between libnative_mini_dfs and libhdfs.
     (Zhanwei Wang via cnauroth)
     (Zhanwei Wang via cnauroth)
 
 
-    HDFS-7579. Improve log reporting during block report rpc failure.
-    (Charles Lamb via cnauroth)
-
-    HDFS-7596. NameNode should prune dead storages from storageMap.
-    (Arpit Agarwal via cnauroth)
-
-    HDFS-7533. Datanode sometimes does not shutdown on receiving upgrade
-    shutdown command (Eric Payne via kihwal)
-
     HDFS-5445. PacketReceiver populates the packetLen field in PacketHeader
     HDFS-5445. PacketReceiver populates the packetLen field in PacketHeader
     incorrectly (Jonathan Mace via Colin P. McCabe)
     incorrectly (Jonathan Mace via Colin P. McCabe)
 
 
-    HDFS-7470. SecondaryNameNode need twice memory when calling
-    reloadFromImageFile. (zhaoyunjiong via cnauroth)
-
     HDFS-7585. Get TestEnhancedByteBufferAccess working on CPU architectures
     HDFS-7585. Get TestEnhancedByteBufferAccess working on CPU architectures
     with page sizes other than 4096 (Sam Liu via Colin P. McCabe)
     with page sizes other than 4096 (Sam Liu via Colin P. McCabe)
 
 
@@ -2108,15 +2175,9 @@ Release 2.7.0 - 2015-04-20
     HDFS-7496. Fix FsVolume removal race conditions on the DataNode by
     HDFS-7496. Fix FsVolume removal race conditions on the DataNode by
     reference-counting the volume instances (lei via cmccabe)
     reference-counting the volume instances (lei via cmccabe)
 
 
-    HDFS-7610. Fix removal of dynamically added DN volumes (Lei (Eddy) Xu via
-    Colin P. McCabe)
-
     HDFS-7548. Corrupt block reporting delayed until datablock scanner thread
     HDFS-7548. Corrupt block reporting delayed until datablock scanner thread
     detects it (Rushabh Shah via kihwal)
     detects it (Rushabh Shah via kihwal)
 
 
-    HDFS-7575. Upgrade should generate a unique storage ID for each
-    volume. (Arpit Agarwal)
-
     HDFS-3519. Checkpoint upload may interfere with a concurrent saveNamespace.
     HDFS-3519. Checkpoint upload may interfere with a concurrent saveNamespace.
     (Ming Ma via cnauroth)
     (Ming Ma via cnauroth)
 
 
@@ -2161,9 +2222,6 @@ Release 2.7.0 - 2015-04-20
     HDFS-6651. Deletion failure can leak inodes permanently.
     HDFS-6651. Deletion failure can leak inodes permanently.
     (Jing Zhao via wheat9)
     (Jing Zhao via wheat9)
 
 
-    HDFS-7707. Edit log corruption due to delayed block removal again.
-    (Yongjun Zhang via kihwal)
-
     HDFS-7734. Class cast exception in NameNode#main. (yliu via wang)
     HDFS-7734. Class cast exception in NameNode#main. (yliu via wang)
 
 
     HDFS-7719. BlockPoolSliceStorage#removeVolumes fails to remove some
     HDFS-7719. BlockPoolSliceStorage#removeVolumes fails to remove some
@@ -2190,9 +2248,6 @@ Release 2.7.0 - 2015-04-20
     HDFS-7718. Store KeyProvider in ClientContext to avoid leaking key provider
     HDFS-7718. Store KeyProvider in ClientContext to avoid leaking key provider
     threads when using FileContext (Arun Suresh via Colin P. McCabe)
     threads when using FileContext (Arun Suresh via Colin P. McCabe)
 
 
-    HDFS-7714. Simultaneous restart of HA NameNodes and DataNode can cause
-    DataNode to register successfully with only one NameNode.(vinayakumarb)
-
     HDFS-7769. TestHDFSCLI should not create files in hdfs project root dir.
     HDFS-7769. TestHDFSCLI should not create files in hdfs project root dir.
     (szetszwo)
     (szetszwo)
 
 
@@ -2226,15 +2281,9 @@ Release 2.7.0 - 2015-04-20
     HDFS-6662. WebHDFS cannot open a file if its path contains "%".
     HDFS-6662. WebHDFS cannot open a file if its path contains "%".
     (Gerson Carlos via wheat9)
     (Gerson Carlos via wheat9)
 
 
-    HDFS-7788. Post-2.6 namenode may not start up with an image containing
-    inodes created with an old release. (Rushabh Shah via kihwal)
-
     HDFS-7814. Fix usage string of storageType parameter for
     HDFS-7814. Fix usage string of storageType parameter for
     "dfsadmin -setSpaceQuota/clrSpaceQuota". (Xiaoyu Yao via cnauroth)
     "dfsadmin -setSpaceQuota/clrSpaceQuota". (Xiaoyu Yao via cnauroth)
 
 
-    HDFS-7009. Active NN and standby NN have different live nodes.
-    (Ming Ma via cnauroth)
-
     HDFS-7807. libhdfs htable.c: fix htable resizing, add unit test (cmccabe)
     HDFS-7807. libhdfs htable.c: fix htable resizing, add unit test (cmccabe)
 
 
     HDFS-7805. NameNode recovery prompt should be printed on console (Surendra
     HDFS-7805. NameNode recovery prompt should be printed on console (Surendra
@@ -2246,9 +2295,6 @@ Release 2.7.0 - 2015-04-20
     HDFS-7831. Fix the starting index and end condition of the loop in
     HDFS-7831. Fix the starting index and end condition of the loop in
     FileDiffList.findEarlierSnapshotBlocks(). (Konstantin Shvachko via jing9)
     FileDiffList.findEarlierSnapshotBlocks(). (Konstantin Shvachko via jing9)
 
 
-    HDFS-7763. fix zkfc hung issue due to not catching exception in a corner
-    case. (Liang Xie via wang)
-
     HDFS-7843. A truncated file is corrupted after rollback from a rolling
     HDFS-7843. A truncated file is corrupted after rollback from a rolling
     upgrade.  (szetszwo)
     upgrade.  (szetszwo)
 
 
@@ -2261,9 +2307,6 @@ Release 2.7.0 - 2015-04-20
     HDFS-7785. Improve diagnostics information for HttpPutFailedException.
     HDFS-7785. Improve diagnostics information for HttpPutFailedException.
     (Chengbing Liu via wheat9)
     (Chengbing Liu via wheat9)
 
 
-    HDFS-7871. NameNodeEditLogRoller can keep printing "Swallowing exception"
-    message. (jing9)
-
     HDFS-7757. Misleading error messages in FSImage.java. (Brahma Reddy Battula
     HDFS-7757. Misleading error messages in FSImage.java. (Brahma Reddy Battula
     via Arpit Agarwal)
     via Arpit Agarwal)
 
 
@@ -2281,15 +2324,9 @@ Release 2.7.0 - 2015-04-20
 
 
     HDFS-7434. DatanodeID hashCode should not be mutable. (daryn via kihwal)
     HDFS-7434. DatanodeID hashCode should not be mutable. (daryn via kihwal)
 
 
-    HDFS-7885. Datanode should not trust the generation stamp provided by
-    client. (Tsz Wo Nicholas Sze via jing9)
-
     HDFS-7818. OffsetParam should return the default value instead of throwing
     HDFS-7818. OffsetParam should return the default value instead of throwing
     NPE when the value is unspecified. (Eric Payne via wheat9)
     NPE when the value is unspecified. (Eric Payne via wheat9)
 
 
-    HDFS-7830. DataNode does not release the volume lock when adding a volume
-    fails. (Lei Xu via Colin P. Mccabe)
-
     HDFS-6833.  DirectoryScanner should not register a deleting block with
     HDFS-6833.  DirectoryScanner should not register a deleting block with
     memory of DataNode.  (Shinichi Yamashita via szetszwo)
     memory of DataNode.  (Shinichi Yamashita via szetszwo)
 
 
@@ -2299,9 +2336,6 @@ Release 2.7.0 - 2015-04-20
     HDFS-7903. Cannot recover block after truncate and delete snapshot.
     HDFS-7903. Cannot recover block after truncate and delete snapshot.
     (Plamen Jeliazkov via shv)
     (Plamen Jeliazkov via shv)
 
 
-    HDFS-7915. The DataNode can sometimes allocate a ShortCircuitShm slot and
-    fail to tell the DFSClient about it because of a network error (cmccabe)
-
     HDFS-7886. Fix TestFileTruncate falures. (Plamen Jeliazkov and shv)
     HDFS-7886. Fix TestFileTruncate falures. (Plamen Jeliazkov and shv)
 
 
     HDFS-7946. TestDataNodeVolumeFailureReporting NPE on Windows. (Xiaoyu Yao
     HDFS-7946. TestDataNodeVolumeFailureReporting NPE on Windows. (Xiaoyu Yao
@@ -2335,18 +2369,10 @@ Release 2.7.0 - 2015-04-20
     HDFS-7943. Append cannot handle the last block with length greater than
     HDFS-7943. Append cannot handle the last block with length greater than
     the preferred block size. (jing9)
     the preferred block size. (jing9)
 
 
-    HDFS-7929. inotify unable fetch pre-upgrade edit log segments once upgrade
-    starts (Zhe Zhang via Colin P. McCabe)
-
-    HDFS-7587. Edit log corruption can happen if append fails with a quota
-    violation. (jing9)
-
     HDFS-7816. Unable to open webhdfs paths with "+". (wheat9 via kihwal)
     HDFS-7816. Unable to open webhdfs paths with "+". (wheat9 via kihwal)
 
 
     HDFS-7932. Speed up the shutdown of datanode during rolling upgrade.(kihwal)
     HDFS-7932. Speed up the shutdown of datanode during rolling upgrade.(kihwal)
 
 
-    HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu)
-
     HDFS-7957. Truncate should verify quota before making changes. (jing9)
     HDFS-7957. Truncate should verify quota before making changes. (jing9)
 
 
     HDFS-6841. Use Time.monotonicNow() wherever applicable instead of Time.now()
     HDFS-6841. Use Time.monotonicNow() wherever applicable instead of Time.now()
@@ -2354,13 +2380,6 @@ Release 2.7.0 - 2015-04-20
 
 
     HDFS-7942. NFS: support regexp grouping in nfs.exports.allowed.hosts (brandonli)
     HDFS-7942. NFS: support regexp grouping in nfs.exports.allowed.hosts (brandonli)
 
 
-    HDFS-7884. Fix NullPointerException in BlockSender when the generation stamp
-    provided by the client is larger than the one stored in the datanode.
-    (Brahma Reddy Battula via szetszwo)
-
-    HDFS-7960. The full block report should prune zombie storages even if
-    they're not empty. (cmccabe and Eddy Xu via wang)
-
     HDFS-7961. Trigger full block report after hot swapping disk. (Eddy Xu via wang)
     HDFS-7961. Trigger full block report after hot swapping disk. (Eddy Xu via wang)
 
 
     HDFS-7977. NFS couldn't take percentile intervals (brandonli)
     HDFS-7977. NFS couldn't take percentile intervals (brandonli)
@@ -2393,15 +2412,9 @@ Release 2.7.0 - 2015-04-20
     HDFS-8051. FsVolumeList#addVolume should release volume reference if not
     HDFS-8051. FsVolumeList#addVolume should release volume reference if not
     put it into BlockScanner. (Lei (Eddy) Xu via Colin P. McCabe)
     put it into BlockScanner. (Lei (Eddy) Xu via Colin P. McCabe)
 
 
-    HDFS-7999. FsDatasetImpl#createTemporary sometimes holds the FSDatasetImpl
-    lock for a very long time (sinago via cmccabe)
-
     HDFS-8038. PBImageDelimitedTextWriter#getEntry output HDFS path in
     HDFS-8038. PBImageDelimitedTextWriter#getEntry output HDFS path in
     platform-specific format. (Xiaoyu Yao via cnauroth)
     platform-specific format. (Xiaoyu Yao via cnauroth)
 
 
-    HDFS-8072. Reserved RBW space is not released if client terminates while
-    writing block. (Arpit Agarwal)
-
     HDFS-8063: Fix intermittent test failures in TestTracing (Masatake Iwasaki
     HDFS-8063: Fix intermittent test failures in TestTracing (Masatake Iwasaki
     via Colin P. McCabe)
     via Colin P. McCabe)
 
 
@@ -2443,16 +2456,42 @@ Release 2.6.2 - UNRELEASED
 
 
   BUG FIXES
   BUG FIXES
 
 
-Release 2.6.1 - UNRELEASED
+Release 2.6.1 - 2015-09-09
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
 
 
   NEW FEATURES
   NEW FEATURES
 
 
+    HDFS-7278. Add a command that allows sysadmins to manually trigger full
+    block reports from a DN (cmccabe)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
+    HDFS-7035. Make adding a new data directory to the DataNode an atomic
+    operation and improve error handling (Lei Xu via Colin P. McCabe)
+
+    HDFS-7531. Improve the concurrent access on FsVolumeList (Lei Xu via Colin
+    P. McCabe)
+
+    HDFS-7579. Improve log reporting during block report rpc failure.
+    (Charles Lamb via cnauroth)
+
+    HDFS-7182. JMX metrics aren't accessible when NN is busy. (Ming Ma via jing9)
+
+    HDFS-7596. NameNode should prune dead storages from storageMap.
+    (Arpit Agarwal via cnauroth)
+
+    HDFS-8046. Allow better control of getContentSummary (kihwal)
+
+    HDFS-8384. Allow NN to startup if there are files having a lease but are not
+    under construction. (jing9)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
+    HDFS-8480. Fix performance and timeout issues in HDFS-7929 by using
+    hard-links to preserve old edit logs, instead of copying them. (Zhe Zhang
+    via Colin P. McCabe)
+
   BUG FIXES
   BUG FIXES
 
 
     HDFS-7425. NameNode block deletion logging uses incorrect appender.
     HDFS-7425. NameNode block deletion logging uses incorrect appender.
@@ -2476,12 +2515,14 @@ Release 2.6.1 - UNRELEASED
     HDFS-7733. NFS: readdir/readdirplus return null directory
     HDFS-7733. NFS: readdir/readdirplus return null directory
     attribute on failure. (Arpit Agarwal)
     attribute on failure. (Arpit Agarwal)
 
 
-    HDFS-8486. DN startup may cause severe data loss (Daryn Sharp via Colin P.
-    McCabe)
+    HDFS-8486. DN startup may cause severe data loss. (daryn via cmccabe)
 
 
     HDFS-7213. processIncrementalBlockReport performance degradation.
     HDFS-7213. processIncrementalBlockReport performance degradation.
     (Eric Payne via kihwal)
     (Eric Payne via kihwal)
 
 
+    HDFS-7314. When the DFSClient lease cannot be renewed, abort open-for-write
+    files rather than the entire DFSClient. (mingma)
+
     HDFS-7235. DataNode#transferBlock should report blocks that don't exist
     HDFS-7235. DataNode#transferBlock should report blocks that don't exist
     using reportBadBlock (yzhang via cmccabe)
     using reportBadBlock (yzhang via cmccabe)
 
 
@@ -2491,9 +2532,111 @@ Release 2.6.1 - UNRELEASED
     HDFS-7225. Remove stale block invalidation work when DN re-registers with
     HDFS-7225. Remove stale block invalidation work when DN re-registers with
     different UUID. (Zhe Zhang and Andrew Wang)
     different UUID. (Zhe Zhang and Andrew Wang)
 
 
+    HDFS-7533. Datanode sometimes does not shutdown on receiving upgrade
+    shutdown command (Eric Payne via kihwal)
+
+    HDFS-7575. Upgrade should generate a unique storage ID for each
+    volume. (Arpit Agarwal)
+
+    HDFS-7707. Edit log corruption due to delayed block removal again.
+    (Yongjun Zhang via kihwal)
+
+    HDFS-7714. Simultaneous restart of HA NameNodes and DataNode can cause
+    DataNode to register successfully with only one NameNode.(vinayakumarb)
+
+    HDFS-7788. Post-2.6 namenode may not start up with an image containing
+    inodes created with an old release. (Rushabh Shah via kihwal)
+
+    HDFS-7009. Active NN and standby NN have different live nodes.
+    (Ming Ma via cnauroth)
+
+    HDFS-7763. fix zkfc hung issue due to not catching exception in a corner
+    case. (Liang Xie via wang)
+
+    HDFS-7871. NameNodeEditLogRoller can keep printing "Swallowing exception"
+    message. (jing9)
+
+    HDFS-7885. Datanode should not trust the generation stamp provided by
+    client. (Tsz Wo Nicholas Sze via jing9)
+
+    HDFS-7610. Fix removal of dynamically added DN volumes (Lei (Eddy) Xu via
+    Colin P. McCabe)
+
+    HDFS-7830. DataNode does not release the volume lock when adding a volume
+    fails. (Lei Xu via Colin P. Mccabe)
+
+    HDFS-7587. Edit log corruption can happen if append fails with a quota
+    violation. (jing9)
+
+    HDFS-7929. inotify unable fetch pre-upgrade edit log segments once upgrade
+    starts (Zhe Zhang via Colin P. McCabe)
+
+    HDFS-7930. commitBlockSynchronization() does not remove locations. (yliu)
+
+    HDFS-7884. Fix NullPointerException in BlockSender when the generation stamp
+    provided by the client is larger than the one stored in the datanode.
+    (Brahma Reddy Battula via szetszwo)
+
+    HDFS-7960. The full block report should prune zombie storages even if
+    they're not empty. (cmccabe and Eddy Xu via wang)
+
+    HDFS-7742. Favoring decommissioning node for replication can cause a block
+    to stay underreplicated for long periods (Nathan Roberts via kihwal)
+
+    HDFS-7999. FsDatasetImpl#createTemporary sometimes holds the FSDatasetImpl
+    lock for a very long time (sinago via cmccabe)
+
+    HDFS-8072. Reserved RBW space is not released if client terminates while
+    writing block. (Arpit Agarwal)
+
+    HDFS-8127. NameNode Failover during HA upgrade can cause DataNode to
+    finalize upgrade. (jing9)
+
+    HDFS-7915. The DataNode can sometimes allocate a ShortCircuitShm slot and
+    fail to tell the DFSClient about it because of a network error (cmccabe)
+
+    HDFS-8070. Pre-HDFS-7915 DFSClient cannot use short circuit on
+    post-HDFS-7915 DataNode (cmccabe)
+
+    HDFS-8219. setStoragePolicy with folder behavior is different after cluster restart.
+    (surendra singh lilhore via Xiaoyu Yao)
+
+    HDFS-7894. Rolling upgrade readiness is not updated in jmx until query
+    command is issued. (Brahma Reddy Battula  via kihwal)
+
+    HDFS-8254. Standby namenode doesn't process DELETED_BLOCK if the add block
+    request is in edit log. (Rushabh S Shah via kihwal)
+
+    HDFS-8404. Pending block replication can get stuck using older genstamp
+    (Nathan Roberts via kihwal)
+
+    HDFS-8431. hdfs crypto class not found in Windows.
+    (Anu Engineer via cnauroth)
+
+    HDFS-7609. Avoid retry cache collision when Standby NameNode loading edits.
+    (Ming Ma via jing9)
+
     HDFS-8270. create() always retried with hardcoded timeout when file already
     HDFS-8270. create() always retried with hardcoded timeout when file already
     exists with open lease (J.Andreina via vinayakumarb)
     exists with open lease (J.Andreina via vinayakumarb)
 
 
+    HDFS-7980. Incremental BlockReport will dramatically slow down namenode
+    startup.  (Walter Su via szetszwo)
+
+    HDFS-7446. HDFS inotify should have the ability to determine what txid it
+    has read up to (cmccabe)
+
+    HDFS-8846. Add a unit test for INotify functionality across a layout
+    version upgrade (Zhe Zhang via Colin P. McCabe)
+
+    HDFS-7470. SecondaryNameNode need twice memory when calling
+    reloadFromImageFile. (zhaoyunjiong via cnauroth)
+
+    HDFS-8863. The remaining space check in BlockPlacementPolicyDefault is
+    flawed. (Kihwal Lee via yliu)
+
+    HDFS-7552. Change FsVolumeList toString() to fix
+    TestDataNodeVolumeFailureToleration (Liang Xie via Colin P. McCabe)
+
 Release 2.6.0 - 2014-11-18
 Release 2.6.0 - 2014-11-18
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogTestUtil.java

@@ -33,7 +33,8 @@ public class FSEditLogTestUtil {
 
 
   public static long countTransactionsInStream(EditLogInputStream in) 
   public static long countTransactionsInStream(EditLogInputStream in) 
       throws IOException {
       throws IOException {
-    FSEditLogLoader.EditLogValidation validation = FSEditLogLoader.validateEditLog(in);
+    FSEditLogLoader.EditLogValidation validation =
+        FSEditLogLoader.scanEditLog(in, Long.MAX_VALUE);
     return (validation.getEndTxId() - in.getFirstTxId()) + 1;
     return (validation.getEndTxId() - in.getFirstTxId()) + 1;
   }
   }
 }
 }

+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java

@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSInputStream;
 import org.apache.hadoop.hdfs.DFSInputStream;
 import org.apache.hadoop.hdfs.DFSOutputStream;
 import org.apache.hadoop.hdfs.DFSOutputStream;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
 import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
@@ -49,7 +50,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
@@ -77,7 +77,8 @@ public class Hdfs extends AbstractFileSystem {
    * @throws IOException
    * @throws IOException
    */
    */
   Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException {
   Hdfs(final URI theUri, final Configuration conf) throws IOException, URISyntaxException {
-    super(theUri, HdfsConstants.HDFS_URI_SCHEME, true, NameNode.DEFAULT_PORT);
+    super(theUri, HdfsConstants.HDFS_URI_SCHEME, true,
+        HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT);
 
 
     if (!theUri.getScheme().equalsIgnoreCase(HdfsConstants.HDFS_URI_SCHEME)) {
     if (!theUri.getScheme().equalsIgnoreCase(HdfsConstants.HDFS_URI_SCHEME)) {
       throw new IllegalArgumentException("Passed URI's scheme is not for Hdfs");
       throw new IllegalArgumentException("Passed URI's scheme is not for Hdfs");
@@ -92,7 +93,7 @@ public class Hdfs extends AbstractFileSystem {
 
 
   @Override
   @Override
   public int getUriDefaultPort() {
   public int getUriDefaultPort() {
-    return NameNode.DEFAULT_PORT;
+    return HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT;
   }
   }
 
 
   @Override
   @Override

+ 14 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/SWebHdfs.java

@@ -46,6 +46,19 @@ public class SWebHdfs extends DelegateToFileSystem {
    */
    */
   SWebHdfs(URI theUri, Configuration conf)
   SWebHdfs(URI theUri, Configuration conf)
       throws IOException, URISyntaxException {
       throws IOException, URISyntaxException {
-    super(theUri, new SWebHdfsFileSystem(), conf, SCHEME, false);
+    super(theUri, createSWebHdfsFileSystem(conf), conf, SCHEME, false);
+  }
+
+  /**
+   * Returns a new {@link SWebHdfsFileSystem}, with the given configuration.
+   *
+   * @param conf configuration
+   * @return new SWebHdfsFileSystem
+   */
+  private static SWebHdfsFileSystem createSWebHdfsFileSystem(
+      Configuration conf) {
+    SWebHdfsFileSystem fs = new SWebHdfsFileSystem();
+    fs.setConf(conf);
+    return fs;
   }
   }
 }
 }

+ 13 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/WebHdfs.java

@@ -46,6 +46,18 @@ public class WebHdfs extends DelegateToFileSystem {
    */
    */
   WebHdfs(URI theUri, Configuration conf)
   WebHdfs(URI theUri, Configuration conf)
       throws IOException, URISyntaxException {
       throws IOException, URISyntaxException {
-    super(theUri, new WebHdfsFileSystem(), conf, SCHEME, false);
+    super(theUri, createWebHdfsFileSystem(conf), conf, SCHEME, false);
+  }
+
+  /**
+   * Returns a new {@link WebHdfsFileSystem}, with the given configuration.
+   *
+   * @param conf configuration
+   * @return new WebHdfsFileSystem
+   */
+  private static WebHdfsFileSystem createWebHdfsFileSystem(Configuration conf) {
+    WebHdfsFileSystem fs = new WebHdfsFileSystem();
+    fs.setConf(conf);
+    return fs;
   }
   }
 }
 }

+ 4 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -99,7 +99,6 @@ import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.client.impl.LeaseRenewer;
 import org.apache.hadoop.hdfs.client.impl.LeaseRenewer;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.net.Peer;
-import org.apache.hadoop.hdfs.net.TcpPeerServer;
 import org.apache.hadoop.hdfs.protocol.AclException;
 import org.apache.hadoop.hdfs.protocol.AclException;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
@@ -150,7 +149,6 @@ import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.io.DataOutputBuffer;
@@ -253,17 +251,17 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
       = new HashMap<Long, DFSOutputStream>();
       = new HashMap<Long, DFSOutputStream>();
 
 
   /**
   /**
-   * Same as this(NameNode.getAddress(conf), conf);
+   * Same as this(NameNode.getNNAddress(conf), conf);
    * @see #DFSClient(InetSocketAddress, Configuration)
    * @see #DFSClient(InetSocketAddress, Configuration)
    * @deprecated Deprecated at 0.21
    * @deprecated Deprecated at 0.21
    */
    */
   @Deprecated
   @Deprecated
   public DFSClient(Configuration conf) throws IOException {
   public DFSClient(Configuration conf) throws IOException {
-    this(NameNode.getAddress(conf), conf);
+    this(DFSUtilClient.getNNAddress(conf), conf);
   }
   }
   
   
   public DFSClient(InetSocketAddress address, Configuration conf) throws IOException {
   public DFSClient(InetSocketAddress address, Configuration conf) throws IOException {
-    this(NameNode.getUri(address), conf);
+    this(DFSUtilClient.getNNUri(address), conf);
   }
   }
 
 
   /**
   /**
@@ -3060,7 +3058,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
     try {
     try {
       sock = socketFactory.createSocket();
       sock = socketFactory.createSocket();
       NetUtils.connect(sock, addr, getRandomLocalInterfaceAddr(), socketTimeout);
       NetUtils.connect(sock, addr, getRandomLocalInterfaceAddr(), socketTimeout);
-      peer = TcpPeerServer.peerFromSocketAndKey(saslClient, sock, this,
+      peer = DFSUtilClient.peerFromSocketAndKey(saslClient, sock, this,
           blockToken, datanodeId);
           blockToken, datanodeId);
       peer.setReadTimeout(socketTimeout);
       peer.setReadTimeout(socketTimeout);
       peer.setWriteTimeout(socketTimeout);
       peer.setWriteTimeout(socketTimeout);

+ 41 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -23,6 +23,7 @@ import java.util.concurrent.TimeUnit;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker;
@@ -173,6 +174,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int     DFS_NAMENODE_REPLICATION_MIN_DEFAULT = 1;
   public static final int     DFS_NAMENODE_REPLICATION_MIN_DEFAULT = 1;
   public static final String  DFS_NAMENODE_STRIPE_MIN_KEY = "dfs.namenode.stripe.min";
   public static final String  DFS_NAMENODE_STRIPE_MIN_KEY = "dfs.namenode.stripe.min";
   public static final int     DFS_NAMENODE_STRIPE_MIN_DEFAULT = 1;
   public static final int     DFS_NAMENODE_STRIPE_MIN_DEFAULT = 1;
+  public static final String  DFS_NAMENODE_SAFEMODE_REPLICATION_MIN_KEY =
+      "dfs.namenode.safemode.replication.min";
   public static final String  DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY = "dfs.namenode.replication.pending.timeout-sec";
   public static final String  DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY = "dfs.namenode.replication.pending.timeout-sec";
   public static final int     DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT = -1;
   public static final int     DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT = -1;
   public static final String  DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY = "dfs.namenode.replication.max-streams";
   public static final String  DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY = "dfs.namenode.replication.max-streams";
@@ -364,6 +367,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
       "dfs.namenode.metrics.logger.period.seconds";
       "dfs.namenode.metrics.logger.period.seconds";
   public static final int     DFS_NAMENODE_METRICS_LOGGER_PERIOD_SECONDS_DEFAULT =
   public static final int     DFS_NAMENODE_METRICS_LOGGER_PERIOD_SECONDS_DEFAULT =
       600;
       600;
+  public static final String DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_KEY =
+      "dfs.datanode.metrics.logger.period.seconds";
+  public static final int DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_DEFAULT =
+      600;
 
 
   public static final String  DFS_BALANCER_MOVEDWINWIDTH_KEY = "dfs.balancer.movedWinWidth";
   public static final String  DFS_BALANCER_MOVEDWINWIDTH_KEY = "dfs.balancer.movedWinWidth";
   public static final long    DFS_BALANCER_MOVEDWINWIDTH_DEFAULT = 5400*1000L;
   public static final long    DFS_BALANCER_MOVEDWINWIDTH_DEFAULT = 5400*1000L;
@@ -612,14 +619,28 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   // Security-related configs
   // Security-related configs
   public static final String DFS_ENCRYPT_DATA_TRANSFER_KEY = "dfs.encrypt.data.transfer";
   public static final String DFS_ENCRYPT_DATA_TRANSFER_KEY = "dfs.encrypt.data.transfer";
   public static final boolean DFS_ENCRYPT_DATA_TRANSFER_DEFAULT = false;
   public static final boolean DFS_ENCRYPT_DATA_TRANSFER_DEFAULT = false;
-  public static final String DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY = "dfs.encrypt.data.transfer.cipher.key.bitlength";
-  public static final int    DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT = 128;
-  public static final String DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY = "dfs.encrypt.data.transfer.cipher.suites";
+  @Deprecated
+  public static final String DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY =
+      HdfsClientConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_KEY;
+  @Deprecated
+  public static final int    DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT =
+      HdfsClientConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT;
+  @Deprecated
+  public static final String DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY =
+      HdfsClientConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_CIPHER_SUITES_KEY;
   public static final String DFS_DATA_ENCRYPTION_ALGORITHM_KEY = "dfs.encrypt.data.transfer.algorithm";
   public static final String DFS_DATA_ENCRYPTION_ALGORITHM_KEY = "dfs.encrypt.data.transfer.algorithm";
-  public static final String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS = "dfs.trustedchannel.resolver.class";
-  public static final String DFS_DATA_TRANSFER_PROTECTION_KEY = "dfs.data.transfer.protection";
-  public static final String DFS_DATA_TRANSFER_PROTECTION_DEFAULT = "";
-  public static final String DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY = "dfs.data.transfer.saslproperties.resolver.class";
+  @Deprecated
+  public static final String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS =
+      HdfsClientConfigKeys.DFS_TRUSTEDCHANNEL_RESOLVER_CLASS;
+  @Deprecated
+  public static final String DFS_DATA_TRANSFER_PROTECTION_KEY =
+      HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
+  @Deprecated
+  public static final String DFS_DATA_TRANSFER_PROTECTION_DEFAULT =
+      HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_DEFAULT;
+  @Deprecated
+  public static final String DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY =
+      HdfsClientConfigKeys.DFS_DATA_TRANSFER_SASL_PROPS_RESOLVER_CLASS_KEY;
   public static final int    DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES_DEFAULT = 100;
   public static final int    DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES_DEFAULT = 100;
   public static final String DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES = "dfs.namenode.list.encryption.zones.num.responses";
   public static final String DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES = "dfs.namenode.list.encryption.zones.num.responses";
   public static final String DFS_ENCRYPTION_KEY_PROVIDER_URI =
   public static final String DFS_ENCRYPTION_KEY_PROVIDER_URI =
@@ -766,9 +787,20 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final boolean DFS_DATANODE_BLOCK_PINNING_ENABLED_DEFAULT =
   public static final boolean DFS_DATANODE_BLOCK_PINNING_ENABLED_DEFAULT =
     false;
     false;
 
 
+  public static final String
+      DFS_DATANODE_TRANSFER_SOCKET_SEND_BUFFER_SIZE_KEY =
+      "dfs.datanode.transfer.socket.send.buffer.size";
+  public static final int
+      DFS_DATANODE_TRANSFER_SOCKET_SEND_BUFFER_SIZE_DEFAULT =
+      HdfsConstants.DEFAULT_DATA_SOCKET_SIZE;
+
+  public static final String
+      DFS_DATANODE_TRANSFER_SOCKET_RECV_BUFFER_SIZE_KEY =
+      "dfs.datanode.transfer.socket.recv.buffer.size";
+  public static final int
+      DFS_DATANODE_TRANSFER_SOCKET_RECV_BUFFER_SIZE_DEFAULT =
+      HdfsConstants.DEFAULT_DATA_SOCKET_SIZE;
 
 
-  
-  
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry 
   // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry 
   @Deprecated
   @Deprecated
   public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY
   public static final String  DFS_CLIENT_RETRY_POLICY_ENABLED_KEY

+ 6 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -413,7 +413,7 @@ public class DFSUtil {
           NameNode.initializeGenericKeys(confForNn, nsId, nnId);
           NameNode.initializeGenericKeys(confForNn, nsId, nnId);
           String principal = SecurityUtil.getServerPrincipal(confForNn
           String principal = SecurityUtil.getServerPrincipal(confForNn
               .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
               .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
-              NameNode.getAddress(confForNn).getHostName());
+              DFSUtilClient.getNNAddress(confForNn).getHostName());
           principals.add(principal);
           principals.add(principal);
         }
         }
       } else {
       } else {
@@ -421,7 +421,7 @@ public class DFSUtil {
         NameNode.initializeGenericKeys(confForNn, nsId, null);
         NameNode.initializeGenericKeys(confForNn, nsId, null);
         String principal = SecurityUtil.getServerPrincipal(confForNn
         String principal = SecurityUtil.getServerPrincipal(confForNn
             .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
             .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
-            NameNode.getAddress(confForNn).getHostName());
+            DFSUtilClient.getNNAddress(confForNn).getHostName());
         principals.add(principal);
         principals.add(principal);
       }
       }
     }
     }
@@ -497,7 +497,8 @@ public class DFSUtil {
     // Use default address as fall back
     // Use default address as fall back
     String defaultAddress;
     String defaultAddress;
     try {
     try {
-      defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf));
+      defaultAddress = NetUtils.getHostPortString(
+          DFSUtilClient.getNNAddress(conf));
     } catch (IllegalArgumentException e) {
     } catch (IllegalArgumentException e) {
       defaultAddress = null;
       defaultAddress = null;
     }
     }
@@ -533,7 +534,8 @@ public class DFSUtil {
     // Use default address as fall back
     // Use default address as fall back
     String defaultAddress;
     String defaultAddress;
     try {
     try {
-      defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf));
+      defaultAddress = NetUtils.getHostPortString(
+          DFSUtilClient.getNNAddress(conf));
     } catch (IllegalArgumentException e) {
     } catch (IllegalArgumentException e) {
       defaultAddress = null;
       defaultAddress = null;
     }
     }

+ 1 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java

@@ -46,7 +46,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.BlockWrite;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.BlockWrite;
 import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
-import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -69,7 +68,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.util.ByteArrayManager;
 import org.apache.hadoop.hdfs.util.ByteArrayManager;
@@ -155,9 +153,7 @@ class DataStreamer extends Daemon {
    * @return if this file is lazy persist
    * @return if this file is lazy persist
    */
    */
   static boolean isLazyPersist(HdfsFileStatus stat) {
   static boolean isLazyPersist(HdfsFileStatus stat) {
-    final BlockStoragePolicy p = blockStoragePolicySuite.getPolicy(
-        HdfsConstants.MEMORY_STORAGE_POLICY_NAME);
-    return p != null && stat.getStoragePolicy() == p.getId();
+    return stat.getStoragePolicy() == HdfsConstants.MEMORY_STORAGE_POLICY_ID;
   }
   }
 
 
   /**
   /**
@@ -392,8 +388,6 @@ class DataStreamer extends Daemon {
   private final LinkedList<DFSPacket> ackQueue = new LinkedList<>();
   private final LinkedList<DFSPacket> ackQueue = new LinkedList<>();
   private final AtomicReference<CachingStrategy> cachingStrategy;
   private final AtomicReference<CachingStrategy> cachingStrategy;
   private final ByteArrayManager byteArrayManager;
   private final ByteArrayManager byteArrayManager;
-  private static final BlockStoragePolicySuite blockStoragePolicySuite =
-      BlockStoragePolicySuite.createDefaultSuite();
   //persist blocks on namenode
   //persist blocks on namenode
   private final AtomicBoolean persistBlocks = new AtomicBoolean(false);
   private final AtomicBoolean persistBlocks = new AtomicBoolean(false);
   private boolean failPacket = false;
   private boolean failPacket = false;

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -63,6 +63,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.client.HdfsAdmin;
 import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
 import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
 import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
@@ -84,7 +85,6 @@ import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
@@ -1498,7 +1498,7 @@ public class DistributedFileSystem extends FileSystem {
 
 
   @Override
   @Override
   protected int getDefaultPort() {
   protected int getDefaultPort() {
-    return NameNode.DEFAULT_PORT;
+    return HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT;
   }
   }
 
 
   @Override
   @Override

+ 8 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java

@@ -165,8 +165,8 @@ public class NameNodeProxies {
   
   
     if (failoverProxyProvider == null) {
     if (failoverProxyProvider == null) {
       // Non-HA case
       // Non-HA case
-      return createNonHAProxy(conf, NameNode.getAddress(nameNodeUri), xface,
-          UserGroupInformation.getCurrentUser(), true,
+      return createNonHAProxy(conf, DFSUtilClient.getNNAddress(nameNodeUri),
+          xface, UserGroupInformation.getCurrentUser(), true,
           fallbackToSimpleAuth);
           fallbackToSimpleAuth);
     } else {
     } else {
       // HA case
       // HA case
@@ -183,10 +183,10 @@ public class NameNodeProxies {
                                                                 HdfsConstants.HDFS_URI_SCHEME);
                                                                 HdfsConstants.HDFS_URI_SCHEME);
       } else {
       } else {
         dtService = SecurityUtil.buildTokenService(
         dtService = SecurityUtil.buildTokenService(
-            NameNode.getAddress(nameNodeUri));
+            DFSUtilClient.getNNAddress(nameNodeUri));
       }
       }
       return new ProxyAndInfo<T>(proxy, dtService,
       return new ProxyAndInfo<T>(proxy, dtService,
-          NameNode.getAddress(nameNodeUri));
+          DFSUtilClient.getNNAddress(nameNodeUri));
     }
     }
   }
   }
   
   
@@ -249,10 +249,10 @@ public class NameNodeProxies {
                                                                 HdfsConstants.HDFS_URI_SCHEME);
                                                                 HdfsConstants.HDFS_URI_SCHEME);
       } else {
       } else {
         dtService = SecurityUtil.buildTokenService(
         dtService = SecurityUtil.buildTokenService(
-            NameNode.getAddress(nameNodeUri));
+            DFSUtilClient.getNNAddress(nameNodeUri));
       }
       }
       return new ProxyAndInfo<T>(proxy, dtService,
       return new ProxyAndInfo<T>(proxy, dtService,
-          NameNode.getAddress(nameNodeUri));
+          DFSUtilClient.getNNAddress(nameNodeUri));
     } else {
     } else {
       LOG.warn("Currently creating proxy using " +
       LOG.warn("Currently creating proxy using " +
       		"LossyRetryInvocationHandler requires NN HA setup");
       		"LossyRetryInvocationHandler requires NN HA setup");
@@ -509,7 +509,8 @@ public class NameNodeProxies {
     // Check the port in the URI, if it is logical.
     // Check the port in the URI, if it is logical.
     if (checkPort && providerNN.useLogicalURI()) {
     if (checkPort && providerNN.useLogicalURI()) {
       int port = nameNodeUri.getPort();
       int port = nameNodeUri.getPort();
-      if (port > 0 && port != NameNode.DEFAULT_PORT) {
+      if (port > 0 &&
+          port != HdfsClientConfigKeys.DFS_NAMENODE_RPC_PORT_DEFAULT) {
         // Throwing here without any cleanup is fine since we have not
         // Throwing here without any cleanup is fine since we have not
         // actually created the underlying proxies yet.
         // actually created the underlying proxies yet.
         throw new IOException("Port " + port + " specified in URI "
         throw new IOException("Port " + port + " specified in URI "

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DomainPeerServer.java

@@ -49,6 +49,11 @@ public class DomainPeerServer implements PeerServer {
     sock.setAttribute(DomainSocket.RECEIVE_BUFFER_SIZE, size);
     sock.setAttribute(DomainSocket.RECEIVE_BUFFER_SIZE, size);
   }
   }
 
 
+  @Override
+  public int getReceiveBufferSize() throws IOException {
+    return sock.getAttribute(DomainSocket.RECEIVE_BUFFER_SIZE);
+  }
+
   @Override
   @Override
   public Peer accept() throws IOException, SocketTimeoutException {
   public Peer accept() throws IOException, SocketTimeoutException {
     DomainSocket connSock = sock.accept();
     DomainSocket connSock = sock.accept();

+ 8 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/PeerServer.java

@@ -32,7 +32,14 @@ public interface PeerServer extends Closeable {
   public void setReceiveBufferSize(int size) throws IOException;
   public void setReceiveBufferSize(int size) throws IOException;
 
 
   /**
   /**
-   * Listens for a connection to be made to this server and accepts 
+   * Get the receive buffer size of the PeerServer.
+   *
+   * @return     The receive buffer size.
+   */
+  int getReceiveBufferSize() throws IOException;
+
+  /**
+   * Listens for a connection to be made to this server and accepts
    * it. The method blocks until a connection is made.
    * it. The method blocks until a connection is made.
    *
    *
    * @exception IOException  if an I/O error occurs when waiting for a
    * @exception IOException  if an I/O error occurs when waiting for a

+ 7 - 63
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/TcpPeerServer.java

@@ -20,22 +20,15 @@ package org.apache.hadoop.hdfs.net;
 import java.io.IOException;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.net.ServerSocket;
 import java.net.ServerSocket;
-import java.net.Socket;
 import java.net.SocketTimeoutException;
 import java.net.SocketTimeoutException;
 import java.nio.channels.ServerSocketChannel;
 import java.nio.channels.ServerSocketChannel;
-import java.nio.channels.SocketChannel;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataEncryptionKeyFactory;
-import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
 import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.Server;
-import org.apache.hadoop.security.token.Token;
 
 
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 public class TcpPeerServer implements PeerServer {
 public class TcpPeerServer implements PeerServer {
@@ -43,60 +36,6 @@ public class TcpPeerServer implements PeerServer {
 
 
   private final ServerSocket serverSocket;
   private final ServerSocket serverSocket;
 
 
-  public static Peer peerFromSocket(Socket socket)
-      throws IOException {
-    Peer peer = null;
-    boolean success = false;
-    try {
-      // TCP_NODELAY is crucial here because of bad interactions between
-      // Nagle's Algorithm and Delayed ACKs. With connection keepalive
-      // between the client and DN, the conversation looks like:
-      //   1. Client -> DN: Read block X
-      //   2. DN -> Client: data for block X
-      //   3. Client -> DN: Status OK (successful read)
-      //   4. Client -> DN: Read block Y
-      // The fact that step #3 and #4 are both in the client->DN direction
-      // triggers Nagling. If the DN is using delayed ACKs, this results
-      // in a delay of 40ms or more.
-      //
-      // TCP_NODELAY disables nagling and thus avoids this performance
-      // disaster.
-      socket.setTcpNoDelay(true);
-      SocketChannel channel = socket.getChannel();
-      if (channel == null) {
-        peer = new BasicInetPeer(socket);
-      } else {
-        peer = new NioInetPeer(socket);
-      }
-      success = true;
-      return peer;
-    } finally {
-      if (!success) {
-        if (peer != null) peer.close();
-        socket.close();
-      }
-    }
-  }
-
-  public static Peer peerFromSocketAndKey(
-        SaslDataTransferClient saslClient, Socket s,
-        DataEncryptionKeyFactory keyFactory,
-        Token<BlockTokenIdentifier> blockToken, DatanodeID datanodeId)
-        throws IOException {
-    Peer peer = null;
-    boolean success = false;
-    try {
-      peer = peerFromSocket(s);
-      peer = saslClient.peerSend(peer, keyFactory, blockToken, datanodeId);
-      success = true;
-      return peer;
-    } finally {
-      if (!success) {
-        IOUtils.cleanup(null, peer);
-      }
-    }
-  }
-
   /**
   /**
    * Create a non-secure TcpPeerServer.
    * Create a non-secure TcpPeerServer.
    *
    *
@@ -134,9 +73,14 @@ public class TcpPeerServer implements PeerServer {
     this.serverSocket.setReceiveBufferSize(size);
     this.serverSocket.setReceiveBufferSize(size);
   }
   }
 
 
+  @Override
+  public int getReceiveBufferSize() throws IOException {
+    return this.serverSocket.getReceiveBufferSize();
+  }
+
   @Override
   @Override
   public Peer accept() throws IOException, SocketTimeoutException {
   public Peer accept() throws IOException, SocketTimeoutException {
-    Peer peer = peerFromSocket(serverSocket.accept());
+    Peer peer = DFSUtilClient.peerFromSocket(serverSocket.accept());
     return peer;
     return peer;
   }
   }
 
 

+ 16 - 17
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java

@@ -37,7 +37,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpTransferBlockP
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReleaseShortCircuitAccessRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmRequestProto;
-import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
 import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
@@ -115,7 +114,7 @@ public abstract class Receiver implements DataTransferProtocol {
         proto.getClass().getSimpleName());
         proto.getClass().getSimpleName());
     try {
     try {
       readBlock(PBHelperClient.convert(proto.getHeader().getBaseHeader().getBlock()),
       readBlock(PBHelperClient.convert(proto.getHeader().getBaseHeader().getBlock()),
-        PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
+        PBHelperClient.convert(proto.getHeader().getBaseHeader().getToken()),
         proto.getHeader().getClientName(),
         proto.getHeader().getClientName(),
         proto.getOffset(),
         proto.getOffset(),
         proto.getLen(),
         proto.getLen(),
@@ -131,17 +130,17 @@ public abstract class Receiver implements DataTransferProtocol {
   /** Receive OP_WRITE_BLOCK */
   /** Receive OP_WRITE_BLOCK */
   private void opWriteBlock(DataInputStream in) throws IOException {
   private void opWriteBlock(DataInputStream in) throws IOException {
     final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
     final OpWriteBlockProto proto = OpWriteBlockProto.parseFrom(vintPrefixed(in));
-    final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
+    final DatanodeInfo[] targets = PBHelperClient.convert(proto.getTargetsList());
     TraceScope traceScope = continueTraceSpan(proto.getHeader(),
     TraceScope traceScope = continueTraceSpan(proto.getHeader(),
         proto.getClass().getSimpleName());
         proto.getClass().getSimpleName());
     try {
     try {
       writeBlock(PBHelperClient.convert(proto.getHeader().getBaseHeader().getBlock()),
       writeBlock(PBHelperClient.convert(proto.getHeader().getBaseHeader().getBlock()),
           PBHelperClient.convertStorageType(proto.getStorageType()),
           PBHelperClient.convertStorageType(proto.getStorageType()),
-          PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
+          PBHelperClient.convert(proto.getHeader().getBaseHeader().getToken()),
           proto.getHeader().getClientName(),
           proto.getHeader().getClientName(),
           targets,
           targets,
-          PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length),
-          PBHelper.convert(proto.getSource()),
+          PBHelperClient.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length),
+          PBHelperClient.convert(proto.getSource()),
           fromProto(proto.getStage()),
           fromProto(proto.getStage()),
           proto.getPipelineSize(),
           proto.getPipelineSize(),
           proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
           proto.getMinBytesRcvd(), proto.getMaxBytesRcvd(),
@@ -152,7 +151,7 @@ public abstract class Receiver implements DataTransferProtocol {
             CachingStrategy.newDefaultStrategy()),
             CachingStrategy.newDefaultStrategy()),
           (proto.hasAllowLazyPersist() ? proto.getAllowLazyPersist() : false),
           (proto.hasAllowLazyPersist() ? proto.getAllowLazyPersist() : false),
           (proto.hasPinning() ? proto.getPinning(): false),
           (proto.hasPinning() ? proto.getPinning(): false),
-          (PBHelper.convertBooleanList(proto.getTargetPinningsList())));
+          (PBHelperClient.convertBooleanList(proto.getTargetPinningsList())));
     } finally {
     } finally {
      if (traceScope != null) traceScope.close();
      if (traceScope != null) traceScope.close();
     }
     }
@@ -162,15 +161,15 @@ public abstract class Receiver implements DataTransferProtocol {
   private void opTransferBlock(DataInputStream in) throws IOException {
   private void opTransferBlock(DataInputStream in) throws IOException {
     final OpTransferBlockProto proto =
     final OpTransferBlockProto proto =
       OpTransferBlockProto.parseFrom(vintPrefixed(in));
       OpTransferBlockProto.parseFrom(vintPrefixed(in));
-    final DatanodeInfo[] targets = PBHelper.convert(proto.getTargetsList());
+    final DatanodeInfo[] targets = PBHelperClient.convert(proto.getTargetsList());
     TraceScope traceScope = continueTraceSpan(proto.getHeader(),
     TraceScope traceScope = continueTraceSpan(proto.getHeader(),
         proto.getClass().getSimpleName());
         proto.getClass().getSimpleName());
     try {
     try {
       transferBlock(PBHelperClient.convert(proto.getHeader().getBaseHeader().getBlock()),
       transferBlock(PBHelperClient.convert(proto.getHeader().getBaseHeader().getBlock()),
-          PBHelper.convert(proto.getHeader().getBaseHeader().getToken()),
+          PBHelperClient.convert(proto.getHeader().getBaseHeader().getToken()),
           proto.getHeader().getClientName(),
           proto.getHeader().getClientName(),
           targets,
           targets,
-          PBHelper.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length));
+          PBHelperClient.convertStorageTypes(proto.getTargetStorageTypesList(), targets.length));
     } finally {
     } finally {
       if (traceScope != null) traceScope.close();
       if (traceScope != null) traceScope.close();
     }
     }
@@ -181,12 +180,12 @@ public abstract class Receiver implements DataTransferProtocol {
     final OpRequestShortCircuitAccessProto proto =
     final OpRequestShortCircuitAccessProto proto =
       OpRequestShortCircuitAccessProto.parseFrom(vintPrefixed(in));
       OpRequestShortCircuitAccessProto.parseFrom(vintPrefixed(in));
     SlotId slotId = (proto.hasSlotId()) ? 
     SlotId slotId = (proto.hasSlotId()) ? 
-        PBHelper.convert(proto.getSlotId()) : null;
+        PBHelperClient.convert(proto.getSlotId()) : null;
     TraceScope traceScope = continueTraceSpan(proto.getHeader(),
     TraceScope traceScope = continueTraceSpan(proto.getHeader(),
         proto.getClass().getSimpleName());
         proto.getClass().getSimpleName());
     try {
     try {
       requestShortCircuitFds(PBHelperClient.convert(proto.getHeader().getBlock()),
       requestShortCircuitFds(PBHelperClient.convert(proto.getHeader().getBlock()),
-          PBHelper.convert(proto.getHeader().getToken()),
+          PBHelperClient.convert(proto.getHeader().getToken()),
           slotId, proto.getMaxVersion(),
           slotId, proto.getMaxVersion(),
           proto.getSupportsReceiptVerification());
           proto.getSupportsReceiptVerification());
     } finally {
     } finally {
@@ -202,7 +201,7 @@ public abstract class Receiver implements DataTransferProtocol {
     TraceScope traceScope = continueTraceSpan(proto.getTraceInfo(),
     TraceScope traceScope = continueTraceSpan(proto.getTraceInfo(),
         proto.getClass().getSimpleName());
         proto.getClass().getSimpleName());
     try {
     try {
-      releaseShortCircuitFds(PBHelper.convert(proto.getSlotId()));
+      releaseShortCircuitFds(PBHelperClient.convert(proto.getSlotId()));
     } finally {
     } finally {
       if (traceScope != null) traceScope.close();
       if (traceScope != null) traceScope.close();
     }
     }
@@ -229,9 +228,9 @@ public abstract class Receiver implements DataTransferProtocol {
     try {
     try {
       replaceBlock(PBHelperClient.convert(proto.getHeader().getBlock()),
       replaceBlock(PBHelperClient.convert(proto.getHeader().getBlock()),
           PBHelperClient.convertStorageType(proto.getStorageType()),
           PBHelperClient.convertStorageType(proto.getStorageType()),
-          PBHelper.convert(proto.getHeader().getToken()),
+          PBHelperClient.convert(proto.getHeader().getToken()),
           proto.getDelHint(),
           proto.getDelHint(),
-          PBHelper.convert(proto.getSource()));
+          PBHelperClient.convert(proto.getSource()));
     } finally {
     } finally {
       if (traceScope != null) traceScope.close();
       if (traceScope != null) traceScope.close();
     }
     }
@@ -244,7 +243,7 @@ public abstract class Receiver implements DataTransferProtocol {
         proto.getClass().getSimpleName());
         proto.getClass().getSimpleName());
     try {
     try {
       copyBlock(PBHelperClient.convert(proto.getHeader().getBlock()),
       copyBlock(PBHelperClient.convert(proto.getHeader().getBlock()),
-          PBHelper.convert(proto.getHeader().getToken()));
+          PBHelperClient.convert(proto.getHeader().getToken()));
     } finally {
     } finally {
       if (traceScope != null) traceScope.close();
       if (traceScope != null) traceScope.close();
     }
     }
@@ -257,7 +256,7 @@ public abstract class Receiver implements DataTransferProtocol {
         proto.getClass().getSimpleName());
         proto.getClass().getSimpleName());
     try {
     try {
     blockChecksum(PBHelperClient.convert(proto.getHeader().getBlock()),
     blockChecksum(PBHelperClient.convert(proto.getHeader().getBlock()),
-        PBHelper.convert(proto.getHeader().getToken()));
+        PBHelperClient.convert(proto.getHeader().getToken()));
     } finally {
     } finally {
       if (traceScope != null) traceScope.close();
       if (traceScope != null) traceScope.close();
     }
     }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java

@@ -17,7 +17,7 @@
  */
  */
 package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
 package org.apache.hadoop.hdfs.protocol.datatransfer.sasl;
 
 
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
+import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
 import static org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil.*;
 import static org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil.*;
 
 
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayInputStream;

+ 20 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java

@@ -29,6 +29,8 @@ import org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DeleteBlockPoolResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthRequestProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBalancerBandwidthResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetBlockLocalPathInfoResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.GetDatanodeInfoRequestProto;
@@ -121,7 +123,9 @@ public class ClientDatanodeProtocolServerSideTranslatorPB implements
       throws ServiceException {
       throws ServiceException {
     BlockLocalPathInfo resp;
     BlockLocalPathInfo resp;
     try {
     try {
-      resp = impl.getBlockLocalPathInfo(PBHelperClient.convert(request.getBlock()), PBHelper.convert(request.getToken()));
+      resp = impl.getBlockLocalPathInfo(
+                 PBHelperClient.convert(request.getBlock()),
+                 PBHelperClient.convert(request.getToken()));
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
@@ -148,7 +152,7 @@ public class ClientDatanodeProtocolServerSideTranslatorPB implements
     GetDatanodeInfoResponseProto res;
     GetDatanodeInfoResponseProto res;
     try {
     try {
       res = GetDatanodeInfoResponseProto.newBuilder()
       res = GetDatanodeInfoResponseProto.newBuilder()
-          .setLocalInfo(PBHelper.convert(impl.getDatanodeInfo())).build();
+          .setLocalInfo(PBHelperClient.convert(impl.getDatanodeInfo())).build();
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
@@ -231,4 +235,18 @@ public class ClientDatanodeProtocolServerSideTranslatorPB implements
     }
     }
     return TRIGGER_BLOCK_REPORT_RESP;
     return TRIGGER_BLOCK_REPORT_RESP;
   }
   }
+
+  @Override
+  public GetBalancerBandwidthResponseProto getBalancerBandwidth(
+      RpcController controller, GetBalancerBandwidthRequestProto request)
+      throws ServiceException {
+    long bandwidth;
+    try {
+      bandwidth = impl.getBalancerBandwidth();
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+    return GetBalancerBandwidthResponseProto.newBuilder()
+        .setBandwidth(bandwidth).build();
+  }
 }
 }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolPB.java

@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.protocolPB;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
@@ -31,7 +31,7 @@ import org.apache.hadoop.security.token.TokenInfo;
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Stable
 @InterfaceStability.Stable
 @KerberosInfo(
 @KerberosInfo(
-    serverPrincipal = DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY)
+    serverPrincipal = HdfsClientConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY)
 @TokenInfo(DelegationTokenSelector.class)
 @TokenInfo(DelegationTokenSelector.class)
 @ProtocolInfo(protocolName = HdfsConstants.CLIENT_NAMENODE_PROTOCOL_NAME,
 @ProtocolInfo(protocolName = HdfsConstants.CLIENT_NAMENODE_PROTOCOL_NAME,
     protocolVersion = 1)
     protocolVersion = 1)

+ 68 - 67
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java

@@ -374,7 +374,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       Builder builder = GetBlockLocationsResponseProto
       Builder builder = GetBlockLocationsResponseProto
           .newBuilder();
           .newBuilder();
       if (b != null) {
       if (b != null) {
-        builder.setLocations(PBHelper.convert(b)).build();
+        builder.setLocations(PBHelperClient.convert(b)).build();
       }
       }
       return builder.build();
       return builder.build();
     } catch (IOException e) {
     } catch (IOException e) {
@@ -389,7 +389,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     try {
     try {
       FsServerDefaults result = server.getServerDefaults();
       FsServerDefaults result = server.getServerDefaults();
       return GetServerDefaultsResponseProto.newBuilder()
       return GetServerDefaultsResponseProto.newBuilder()
-          .setServerDefaults(PBHelper.convert(result))
+          .setServerDefaults(PBHelperClient.convert(result))
           .build();
           .build();
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
@@ -402,14 +402,14 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       CreateRequestProto req) throws ServiceException {
       CreateRequestProto req) throws ServiceException {
     try {
     try {
       HdfsFileStatus result = server.create(req.getSrc(),
       HdfsFileStatus result = server.create(req.getSrc(),
-          PBHelper.convert(req.getMasked()), req.getClientName(),
-          PBHelper.convertCreateFlag(req.getCreateFlag()), req.getCreateParent(),
+          PBHelperClient.convert(req.getMasked()), req.getClientName(),
+          PBHelperClient.convertCreateFlag(req.getCreateFlag()), req.getCreateParent(),
           (short) req.getReplication(), req.getBlockSize(),
           (short) req.getReplication(), req.getBlockSize(),
-          PBHelper.convertCryptoProtocolVersions(
+          PBHelperClient.convertCryptoProtocolVersions(
               req.getCryptoProtocolVersionList()));
               req.getCryptoProtocolVersionList()));
 
 
       if (result != null) {
       if (result != null) {
-        return CreateResponseProto.newBuilder().setFs(PBHelper.convert(result))
+        return CreateResponseProto.newBuilder().setFs(PBHelperClient.convert(result))
             .build();
             .build();
       }
       }
       return VOID_CREATE_RESPONSE;
       return VOID_CREATE_RESPONSE;
@@ -423,16 +423,17 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       AppendRequestProto req) throws ServiceException {
       AppendRequestProto req) throws ServiceException {
     try {
     try {
       EnumSetWritable<CreateFlag> flags = req.hasFlag() ?
       EnumSetWritable<CreateFlag> flags = req.hasFlag() ?
-          PBHelper.convertCreateFlag(req.getFlag()) :
+          PBHelperClient.convertCreateFlag(req.getFlag()) :
           new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND));
           new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND));
       LastBlockWithStatus result = server.append(req.getSrc(),
       LastBlockWithStatus result = server.append(req.getSrc(),
           req.getClientName(), flags);
           req.getClientName(), flags);
       AppendResponseProto.Builder builder = AppendResponseProto.newBuilder();
       AppendResponseProto.Builder builder = AppendResponseProto.newBuilder();
       if (result.getLastBlock() != null) {
       if (result.getLastBlock() != null) {
-        builder.setBlock(PBHelper.convertLocatedBlock(result.getLastBlock()));
+        builder.setBlock(PBHelperClient.convertLocatedBlock(
+            result.getLastBlock()));
       }
       }
       if (result.getFileStatus() != null) {
       if (result.getFileStatus() != null) {
-        builder.setStat(PBHelper.convert(result.getFileStatus()));
+        builder.setStat(PBHelperClient.convert(result.getFileStatus()));
       }
       }
       return builder.build();
       return builder.build();
     } catch (IOException e) {
     } catch (IOException e) {
@@ -457,7 +458,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   public SetPermissionResponseProto setPermission(RpcController controller,
   public SetPermissionResponseProto setPermission(RpcController controller,
       SetPermissionRequestProto req) throws ServiceException {
       SetPermissionRequestProto req) throws ServiceException {
     try {
     try {
-      server.setPermission(req.getSrc(), PBHelper.convert(req.getPermission()));
+      server.setPermission(req.getSrc(), PBHelperClient.convert(req.getPermission()));
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
@@ -500,12 +501,12 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
           req.getSrc(),
           req.getSrc(),
           req.getClientName(),
           req.getClientName(),
           req.hasPrevious() ? PBHelperClient.convert(req.getPrevious()) : null,
           req.hasPrevious() ? PBHelperClient.convert(req.getPrevious()) : null,
-          (excl == null || excl.size() == 0) ? null : PBHelper.convert(excl
+          (excl == null || excl.size() == 0) ? null : PBHelperClient.convert(excl
               .toArray(new DatanodeInfoProto[excl.size()])), req.getFileId(),
               .toArray(new DatanodeInfoProto[excl.size()])), req.getFileId(),
           (favor == null || favor.size() == 0) ? null : favor
           (favor == null || favor.size() == 0) ? null : favor
               .toArray(new String[favor.size()]));
               .toArray(new String[favor.size()]));
       return AddBlockResponseProto.newBuilder()
       return AddBlockResponseProto.newBuilder()
-          .setBlock(PBHelper.convertLocatedBlock(result)).build();
+          .setBlock(PBHelperClient.convertLocatedBlock(result)).build();
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
@@ -521,15 +522,15 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       List<DatanodeInfoProto> excludesList = req.getExcludesList();
       List<DatanodeInfoProto> excludesList = req.getExcludesList();
       LocatedBlock result = server.getAdditionalDatanode(req.getSrc(),
       LocatedBlock result = server.getAdditionalDatanode(req.getSrc(),
           req.getFileId(), PBHelperClient.convert(req.getBlk()),
           req.getFileId(), PBHelperClient.convert(req.getBlk()),
-          PBHelper.convert(existingList.toArray(
+          PBHelperClient.convert(existingList.toArray(
               new DatanodeInfoProto[existingList.size()])),
               new DatanodeInfoProto[existingList.size()])),
           existingStorageIDsList.toArray(
           existingStorageIDsList.toArray(
               new String[existingStorageIDsList.size()]),
               new String[existingStorageIDsList.size()]),
-          PBHelper.convert(excludesList.toArray(
-              new DatanodeInfoProto[excludesList.size()])), 
+          PBHelperClient.convert(excludesList.toArray(
+              new DatanodeInfoProto[excludesList.size()])),
           req.getNumAdditionalNodes(), req.getClientName());
           req.getNumAdditionalNodes(), req.getClientName());
       return GetAdditionalDatanodeResponseProto.newBuilder().setBlock(
       return GetAdditionalDatanodeResponseProto.newBuilder().setBlock(
-          PBHelper.convertLocatedBlock(result))
+      PBHelperClient.convertLocatedBlock(result))
           .build();
           .build();
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
@@ -555,7 +556,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       ReportBadBlocksRequestProto req) throws ServiceException {
       ReportBadBlocksRequestProto req) throws ServiceException {
     try {
     try {
       List<LocatedBlockProto> bl = req.getBlocksList();
       List<LocatedBlockProto> bl = req.getBlocksList();
-      server.reportBadBlocks(PBHelper.convertLocatedBlocks(
+      server.reportBadBlocks(PBHelperClient.convertLocatedBlocks(
           bl.toArray(new LocatedBlockProto[bl.size()])));
           bl.toArray(new LocatedBlockProto[bl.size()])));
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
@@ -627,7 +628,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       MkdirsRequestProto req) throws ServiceException {
       MkdirsRequestProto req) throws ServiceException {
     try {
     try {
       boolean result = server.mkdirs(req.getSrc(),
       boolean result = server.mkdirs(req.getSrc(),
-          PBHelper.convert(req.getMasked()), req.getCreateParent());
+          PBHelperClient.convert(req.getMasked()), req.getCreateParent());
       return MkdirsResponseProto.newBuilder().setResult(result).build();
       return MkdirsResponseProto.newBuilder().setResult(result).build();
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
@@ -643,7 +644,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
           req.getNeedLocation());
           req.getNeedLocation());
       if (result !=null) {
       if (result !=null) {
         return GetListingResponseProto.newBuilder().setDirList(
         return GetListingResponseProto.newBuilder().setDirList(
-          PBHelper.convert(result)).build();
+          PBHelperClient.convert(result)).build();
       } else {
       } else {
         return VOID_GETLISTING_RESPONSE;
         return VOID_GETLISTING_RESPONSE;
       }
       }
@@ -691,7 +692,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   public GetFsStatsResponseProto getFsStats(RpcController controller,
   public GetFsStatsResponseProto getFsStats(RpcController controller,
       GetFsStatusRequestProto req) throws ServiceException {
       GetFsStatusRequestProto req) throws ServiceException {
     try {
     try {
-      return PBHelper.convert(server.getStats());
+      return PBHelperClient.convert(server.getStats());
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
@@ -703,7 +704,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       throws ServiceException {
       throws ServiceException {
     try {
     try {
       List<? extends DatanodeInfoProto> result = PBHelperClient.convert(server
       List<? extends DatanodeInfoProto> result = PBHelperClient.convert(server
-          .getDatanodeReport(PBHelper.convert(req.getType())));
+          .getDatanodeReport(PBHelperClient.convert(req.getType())));
       return GetDatanodeReportResponseProto.newBuilder()
       return GetDatanodeReportResponseProto.newBuilder()
           .addAllDi(result).build();
           .addAllDi(result).build();
     } catch (IOException e) {
     } catch (IOException e) {
@@ -716,8 +717,8 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       RpcController controller, GetDatanodeStorageReportRequestProto req)
       RpcController controller, GetDatanodeStorageReportRequestProto req)
       throws ServiceException {
       throws ServiceException {
     try {
     try {
-      List<DatanodeStorageReportProto> reports = PBHelper.convertDatanodeStorageReports(
-          server.getDatanodeStorageReport(PBHelper.convert(req.getType())));
+      List<DatanodeStorageReportProto> reports = PBHelperClient.convertDatanodeStorageReports(
+          server.getDatanodeStorageReport(PBHelperClient.convert(req.getType())));
       return GetDatanodeStorageReportResponseProto.newBuilder()
       return GetDatanodeStorageReportResponseProto.newBuilder()
           .addAllDatanodeStorageReports(reports)
           .addAllDatanodeStorageReports(reports)
           .build();
           .build();
@@ -743,7 +744,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   public SetSafeModeResponseProto setSafeMode(RpcController controller,
   public SetSafeModeResponseProto setSafeMode(RpcController controller,
       SetSafeModeRequestProto req) throws ServiceException {
       SetSafeModeRequestProto req) throws ServiceException {
     try {
     try {
-      boolean result = server.setSafeMode(PBHelper.convert(req.getAction()),
+      boolean result = server.setSafeMode(PBHelperClient.convert(req.getAction()),
           req.getChecked());
           req.getChecked());
       return SetSafeModeResponseProto.newBuilder().setResult(result).build();
       return SetSafeModeResponseProto.newBuilder().setResult(result).build();
     } catch (IOException e) {
     } catch (IOException e) {
@@ -806,10 +807,10 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       RollingUpgradeRequestProto req) throws ServiceException {
       RollingUpgradeRequestProto req) throws ServiceException {
     try {
     try {
       final RollingUpgradeInfo info = server.rollingUpgrade(
       final RollingUpgradeInfo info = server.rollingUpgrade(
-          PBHelper.convert(req.getAction()));
+          PBHelperClient.convert(req.getAction()));
       final RollingUpgradeResponseProto.Builder b = RollingUpgradeResponseProto.newBuilder();
       final RollingUpgradeResponseProto.Builder b = RollingUpgradeResponseProto.newBuilder();
       if (info != null) {
       if (info != null) {
-        b.setRollingUpgradeInfo(PBHelper.convert(info));
+        b.setRollingUpgradeInfo(PBHelperClient.convert(info));
       }
       }
       return b.build();
       return b.build();
     } catch (IOException e) {
     } catch (IOException e) {
@@ -825,7 +826,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       CorruptFileBlocks result = server.listCorruptFileBlocks(
       CorruptFileBlocks result = server.listCorruptFileBlocks(
           req.getPath(), req.hasCookie() ? req.getCookie(): null);
           req.getPath(), req.hasCookie() ? req.getCookie(): null);
       return ListCorruptFileBlocksResponseProto.newBuilder()
       return ListCorruptFileBlocksResponseProto.newBuilder()
-          .setCorrupt(PBHelper.convert(result))
+          .setCorrupt(PBHelperClient.convert(result))
           .build();
           .build();
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
@@ -852,7 +853,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
  
  
       if (result != null) {
       if (result != null) {
         return GetFileInfoResponseProto.newBuilder().setFs(
         return GetFileInfoResponseProto.newBuilder().setFs(
-            PBHelper.convert(result)).build();
+            PBHelperClient.convert(result)).build();
       }
       }
       return VOID_GETFILEINFO_RESPONSE;      
       return VOID_GETFILEINFO_RESPONSE;      
     } catch (IOException e) {
     } catch (IOException e) {
@@ -867,7 +868,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       HdfsFileStatus result = server.getFileLinkInfo(req.getSrc());
       HdfsFileStatus result = server.getFileLinkInfo(req.getSrc());
       if (result != null) {
       if (result != null) {
         return GetFileLinkInfoResponseProto.newBuilder().setFs(
         return GetFileLinkInfoResponseProto.newBuilder().setFs(
-            PBHelper.convert(result)).build();
+            PBHelperClient.convert(result)).build();
       } else {
       } else {
         return VOID_GETFILELINKINFO_RESPONSE;      
         return VOID_GETFILELINKINFO_RESPONSE;      
       }
       }
@@ -884,7 +885,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     try {
     try {
       ContentSummary result = server.getContentSummary(req.getPath());
       ContentSummary result = server.getContentSummary(req.getPath());
       return GetContentSummaryResponseProto.newBuilder()
       return GetContentSummaryResponseProto.newBuilder()
-          .setSummary(PBHelper.convert(result)).build();
+          .setSummary(PBHelperClient.convert(result)).build();
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
@@ -932,7 +933,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       CreateSymlinkRequestProto req) throws ServiceException {
       CreateSymlinkRequestProto req) throws ServiceException {
     try {
     try {
       server.createSymlink(req.getTarget(), req.getLink(),
       server.createSymlink(req.getTarget(), req.getLink(),
-          PBHelper.convert(req.getDirPerm()), req.getCreateParent());
+          PBHelperClient.convert(req.getDirPerm()), req.getCreateParent());
       return VOID_CREATESYMLINK_RESPONSE;
       return VOID_CREATESYMLINK_RESPONSE;
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
@@ -960,7 +961,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       RpcController controller, UpdateBlockForPipelineRequestProto req)
       RpcController controller, UpdateBlockForPipelineRequestProto req)
       throws ServiceException {
       throws ServiceException {
     try {
     try {
-      LocatedBlockProto result = PBHelper.convertLocatedBlock(
+      LocatedBlockProto result = PBHelperClient.convertLocatedBlock(
           server.updateBlockForPipeline(PBHelperClient.convert(req.getBlock()),
           server.updateBlockForPipeline(PBHelperClient.convert(req.getBlock()),
               req.getClientName()));
               req.getClientName()));
       return UpdateBlockForPipelineResponseProto.newBuilder().setBlock(result)
       return UpdateBlockForPipelineResponseProto.newBuilder().setBlock(result)
@@ -979,7 +980,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       server.updatePipeline(req.getClientName(),
       server.updatePipeline(req.getClientName(),
           PBHelperClient.convert(req.getOldBlock()),
           PBHelperClient.convert(req.getOldBlock()),
           PBHelperClient.convert(req.getNewBlock()),
           PBHelperClient.convert(req.getNewBlock()),
-          PBHelper.convert(newNodes.toArray(new DatanodeIDProto[newNodes.size()])),
+          PBHelperClient.convert(newNodes.toArray(new DatanodeIDProto[newNodes.size()])),
           newStorageIDs.toArray(new String[newStorageIDs.size()]));
           newStorageIDs.toArray(new String[newStorageIDs.size()]));
       return VOID_UPDATEPIPELINE_RESPONSE;
       return VOID_UPDATEPIPELINE_RESPONSE;
     } catch (IOException e) {
     } catch (IOException e) {
@@ -1010,7 +1011,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       RpcController controller, RenewDelegationTokenRequestProto req)
       RpcController controller, RenewDelegationTokenRequestProto req)
       throws ServiceException {
       throws ServiceException {
     try {
     try {
-      long result = server.renewDelegationToken(PBHelper
+      long result = server.renewDelegationToken(PBHelperClient
           .convertDelegationToken(req.getToken()));
           .convertDelegationToken(req.getToken()));
       return RenewDelegationTokenResponseProto.newBuilder()
       return RenewDelegationTokenResponseProto.newBuilder()
           .setNewExpiryTime(result).build();
           .setNewExpiryTime(result).build();
@@ -1024,7 +1025,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       RpcController controller, CancelDelegationTokenRequestProto req)
       RpcController controller, CancelDelegationTokenRequestProto req)
       throws ServiceException {
       throws ServiceException {
     try {
     try {
-      server.cancelDelegationToken(PBHelper.convertDelegationToken(req
+      server.cancelDelegationToken(PBHelperClient.convertDelegationToken(req
           .getToken()));
           .getToken()));
       return VOID_CANCELDELEGATIONTOKEN_RESPONSE;
       return VOID_CANCELDELEGATIONTOKEN_RESPONSE;
     } catch (IOException e) {
     } catch (IOException e) {
@@ -1053,7 +1054,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
           GetDataEncryptionKeyResponseProto.newBuilder();
           GetDataEncryptionKeyResponseProto.newBuilder();
       DataEncryptionKey encryptionKey = server.getDataEncryptionKey();
       DataEncryptionKey encryptionKey = server.getDataEncryptionKey();
       if (encryptionKey != null) {
       if (encryptionKey != null) {
-        builder.setDataEncryptionKey(PBHelper.convert(encryptionKey));
+        builder.setDataEncryptionKey(PBHelperClient.convert(encryptionKey));
       }
       }
       return builder.build();
       return builder.build();
     } catch (IOException e) {
     } catch (IOException e) {
@@ -1132,7 +1133,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
           .getSnapshottableDirListing();
           .getSnapshottableDirListing();
       if (result != null) {
       if (result != null) {
         return GetSnapshottableDirListingResponseProto.newBuilder().
         return GetSnapshottableDirListingResponseProto.newBuilder().
-            setSnapshottableDirList(PBHelper.convert(result)).build();
+            setSnapshottableDirList(PBHelperClient.convert(result)).build();
       } else {
       } else {
         return NULL_GET_SNAPSHOTTABLE_DIR_LISTING_RESPONSE;
         return NULL_GET_SNAPSHOTTABLE_DIR_LISTING_RESPONSE;
       }
       }
@@ -1150,7 +1151,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
           request.getSnapshotRoot(), request.getFromSnapshot(),
           request.getSnapshotRoot(), request.getFromSnapshot(),
           request.getToSnapshot());
           request.getToSnapshot());
       return GetSnapshotDiffReportResponseProto.newBuilder()
       return GetSnapshotDiffReportResponseProto.newBuilder()
-          .setDiffReport(PBHelper.convert(report)).build();
+          .setDiffReport(PBHelperClient.convert(report)).build();
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
@@ -1174,8 +1175,8 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       throws ServiceException {
       throws ServiceException {
     try {
     try {
       long id = server.addCacheDirective(
       long id = server.addCacheDirective(
-          PBHelper.convert(request.getInfo()),
-          PBHelper.convertCacheFlags(request.getCacheFlags()));
+          PBHelperClient.convert(request.getInfo()),
+          PBHelperClient.convertCacheFlags(request.getCacheFlags()));
       return AddCacheDirectiveResponseProto.newBuilder().
       return AddCacheDirectiveResponseProto.newBuilder().
               setId(id).build();
               setId(id).build();
     } catch (IOException e) {
     } catch (IOException e) {
@@ -1189,8 +1190,8 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       throws ServiceException {
       throws ServiceException {
     try {
     try {
       server.modifyCacheDirective(
       server.modifyCacheDirective(
-          PBHelper.convert(request.getInfo()),
-          PBHelper.convertCacheFlags(request.getCacheFlags()));
+          PBHelperClient.convert(request.getInfo()),
+          PBHelperClient.convertCacheFlags(request.getCacheFlags()));
       return ModifyCacheDirectiveResponseProto.newBuilder().build();
       return ModifyCacheDirectiveResponseProto.newBuilder().build();
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
@@ -1217,14 +1218,14 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
           throws ServiceException {
           throws ServiceException {
     try {
     try {
       CacheDirectiveInfo filter =
       CacheDirectiveInfo filter =
-          PBHelper.convert(request.getFilter());
+          PBHelperClient.convert(request.getFilter());
       BatchedEntries<CacheDirectiveEntry> entries =
       BatchedEntries<CacheDirectiveEntry> entries =
         server.listCacheDirectives(request.getPrevId(), filter);
         server.listCacheDirectives(request.getPrevId(), filter);
       ListCacheDirectivesResponseProto.Builder builder =
       ListCacheDirectivesResponseProto.Builder builder =
           ListCacheDirectivesResponseProto.newBuilder();
           ListCacheDirectivesResponseProto.newBuilder();
       builder.setHasMore(entries.hasMore());
       builder.setHasMore(entries.hasMore());
       for (int i=0, n=entries.size(); i<n; i++) {
       for (int i=0, n=entries.size(); i<n; i++) {
-        builder.addElements(PBHelper.convert(entries.get(i)));
+        builder.addElements(PBHelperClient.convert(entries.get(i)));
       }
       }
       return builder.build();
       return builder.build();
     } catch (IOException e) {
     } catch (IOException e) {
@@ -1236,7 +1237,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   public AddCachePoolResponseProto addCachePool(RpcController controller,
   public AddCachePoolResponseProto addCachePool(RpcController controller,
       AddCachePoolRequestProto request) throws ServiceException {
       AddCachePoolRequestProto request) throws ServiceException {
     try {
     try {
-      server.addCachePool(PBHelper.convert(request.getInfo()));
+      server.addCachePool(PBHelperClient.convert(request.getInfo()));
       return AddCachePoolResponseProto.newBuilder().build();
       return AddCachePoolResponseProto.newBuilder().build();
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
@@ -1247,7 +1248,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   public ModifyCachePoolResponseProto modifyCachePool(RpcController controller,
   public ModifyCachePoolResponseProto modifyCachePool(RpcController controller,
       ModifyCachePoolRequestProto request) throws ServiceException {
       ModifyCachePoolRequestProto request) throws ServiceException {
     try {
     try {
-      server.modifyCachePool(PBHelper.convert(request.getInfo()));
+      server.modifyCachePool(PBHelperClient.convert(request.getInfo()));
       return ModifyCachePoolResponseProto.newBuilder().build();
       return ModifyCachePoolResponseProto.newBuilder().build();
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
@@ -1275,7 +1276,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
         ListCachePoolsResponseProto.newBuilder();
         ListCachePoolsResponseProto.newBuilder();
       responseBuilder.setHasMore(entries.hasMore());
       responseBuilder.setHasMore(entries.hasMore());
       for (int i=0, n=entries.size(); i<n; i++) {
       for (int i=0, n=entries.size(); i<n; i++) {
-        responseBuilder.addEntries(PBHelper.convert(entries.get(i)));
+        responseBuilder.addEntries(PBHelperClient.convert(entries.get(i)));
       }
       }
       return responseBuilder.build();
       return responseBuilder.build();
     } catch (IOException e) {
     } catch (IOException e) {
@@ -1288,7 +1289,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       RpcController controller, ModifyAclEntriesRequestProto req)
       RpcController controller, ModifyAclEntriesRequestProto req)
       throws ServiceException {
       throws ServiceException {
     try {
     try {
-      server.modifyAclEntries(req.getSrc(), PBHelper.convertAclEntry(req.getAclSpecList()));
+      server.modifyAclEntries(req.getSrc(), PBHelperClient.convertAclEntry(req.getAclSpecList()));
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
@@ -1301,7 +1302,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       throws ServiceException {
       throws ServiceException {
     try {
     try {
       server.removeAclEntries(req.getSrc(),
       server.removeAclEntries(req.getSrc(),
-          PBHelper.convertAclEntry(req.getAclSpecList()));
+          PBHelperClient.convertAclEntry(req.getAclSpecList()));
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
@@ -1335,7 +1336,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   public SetAclResponseProto setAcl(RpcController controller,
   public SetAclResponseProto setAcl(RpcController controller,
       SetAclRequestProto req) throws ServiceException {
       SetAclRequestProto req) throws ServiceException {
     try {
     try {
-      server.setAcl(req.getSrc(), PBHelper.convertAclEntry(req.getAclSpecList()));
+      server.setAcl(req.getSrc(), PBHelperClient.convertAclEntry(req.getAclSpecList()));
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
@@ -1346,7 +1347,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   public GetAclStatusResponseProto getAclStatus(RpcController controller,
   public GetAclStatusResponseProto getAclStatus(RpcController controller,
       GetAclStatusRequestProto req) throws ServiceException {
       GetAclStatusRequestProto req) throws ServiceException {
     try {
     try {
-      return PBHelper.convert(server.getAclStatus(req.getSrc()));
+      return PBHelperClient.convert(server.getAclStatus(req.getSrc()));
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
@@ -1373,7 +1374,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
           GetEZForPathResponseProto.newBuilder();
           GetEZForPathResponseProto.newBuilder();
       final EncryptionZone ret = server.getEZForPath(req.getSrc());
       final EncryptionZone ret = server.getEZForPath(req.getSrc());
       if (ret != null) {
       if (ret != null) {
-        builder.setZone(PBHelper.convert(ret));
+        builder.setZone(PBHelperClient.convert(ret));
       }
       }
       return builder.build();
       return builder.build();
     } catch (IOException e) {
     } catch (IOException e) {
@@ -1392,7 +1393,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
           ListEncryptionZonesResponseProto.newBuilder();
           ListEncryptionZonesResponseProto.newBuilder();
       builder.setHasMore(entries.hasMore());
       builder.setHasMore(entries.hasMore());
       for (int i=0; i<entries.size(); i++) {
       for (int i=0; i<entries.size(); i++) {
-        builder.addZones(PBHelper.convert(entries.get(i)));
+        builder.addZones(PBHelperClient.convert(entries.get(i)));
       }
       }
       return builder.build();
       return builder.build();
     } catch (IOException e) {
     } catch (IOException e) {
@@ -1405,8 +1406,8 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       RpcController controller, SetErasureCodingPolicyRequestProto req)
       RpcController controller, SetErasureCodingPolicyRequestProto req)
       throws ServiceException {
       throws ServiceException {
     try {
     try {
-      ErasureCodingPolicy ecPolicy = req.hasEcPolicy() ? PBHelper.convertErasureCodingPolicy(req
-          .getEcPolicy()) : null;
+      ErasureCodingPolicy ecPolicy = req.hasEcPolicy() ?
+          PBHelperClient.convertErasureCodingPolicy(req.getEcPolicy()) : null;
       server.setErasureCodingPolicy(req.getSrc(), ecPolicy);
       server.setErasureCodingPolicy(req.getSrc(), ecPolicy);
       return SetErasureCodingPolicyResponseProto.newBuilder().build();
       return SetErasureCodingPolicyResponseProto.newBuilder().build();
     } catch (IOException e) {
     } catch (IOException e) {
@@ -1418,8 +1419,8 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   public SetXAttrResponseProto setXAttr(RpcController controller,
   public SetXAttrResponseProto setXAttr(RpcController controller,
       SetXAttrRequestProto req) throws ServiceException {
       SetXAttrRequestProto req) throws ServiceException {
     try {
     try {
-      server.setXAttr(req.getSrc(), PBHelper.convertXAttr(req.getXAttr()), 
-          PBHelper.convert(req.getFlag()));
+      server.setXAttr(req.getSrc(), PBHelperClient.convertXAttr(req.getXAttr()),
+          PBHelperClient.convert(req.getFlag()));
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
@@ -1430,8 +1431,8 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   public GetXAttrsResponseProto getXAttrs(RpcController controller,
   public GetXAttrsResponseProto getXAttrs(RpcController controller,
       GetXAttrsRequestProto req) throws ServiceException {
       GetXAttrsRequestProto req) throws ServiceException {
     try {
     try {
-      return PBHelper.convertXAttrsResponse(server.getXAttrs(req.getSrc(), 
-          PBHelper.convertXAttrs(req.getXAttrsList())));
+      return PBHelperClient.convertXAttrsResponse(server.getXAttrs(req.getSrc(),
+          PBHelperClient.convertXAttrs(req.getXAttrsList())));
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
@@ -1441,7 +1442,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   public ListXAttrsResponseProto listXAttrs(RpcController controller,
   public ListXAttrsResponseProto listXAttrs(RpcController controller,
     ListXAttrsRequestProto req) throws ServiceException {
     ListXAttrsRequestProto req) throws ServiceException {
     try {
     try {
-      return PBHelper.convertListXAttrsResponse(server.listXAttrs(req.getSrc()));
+      return PBHelperClient.convertListXAttrsResponse(server.listXAttrs(req.getSrc()));
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
@@ -1451,7 +1452,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   public RemoveXAttrResponseProto removeXAttr(RpcController controller,
   public RemoveXAttrResponseProto removeXAttr(RpcController controller,
       RemoveXAttrRequestProto req) throws ServiceException {
       RemoveXAttrRequestProto req) throws ServiceException {
     try {
     try {
-      server.removeXAttr(req.getSrc(), PBHelper.convertXAttr(req.getXAttr()));
+      server.removeXAttr(req.getSrc(), PBHelperClient.convertXAttr(req.getXAttr()));
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
@@ -1462,7 +1463,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   public CheckAccessResponseProto checkAccess(RpcController controller,
   public CheckAccessResponseProto checkAccess(RpcController controller,
      CheckAccessRequestProto req) throws ServiceException {
      CheckAccessRequestProto req) throws ServiceException {
     try {
     try {
-      server.checkAccess(req.getPath(), PBHelper.convert(req.getMode()));
+      server.checkAccess(req.getPath(), PBHelperClient.convert(req.getMode()));
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
     }
     }
@@ -1486,7 +1487,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       RpcController controller, GetStoragePolicyRequestProto request)
       RpcController controller, GetStoragePolicyRequestProto request)
       throws ServiceException {
       throws ServiceException {
     try {
     try {
-      BlockStoragePolicyProto policy = PBHelper.convert(server
+      BlockStoragePolicyProto policy = PBHelperClient.convert(server
           .getStoragePolicy(request.getPath()));
           .getStoragePolicy(request.getPath()));
       return GetStoragePolicyResponseProto.newBuilder()
       return GetStoragePolicyResponseProto.newBuilder()
           .setStoragePolicy(policy).build();
           .setStoragePolicy(policy).build();
@@ -1507,7 +1508,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
         return builder.build();
         return builder.build();
       }
       }
       for (BlockStoragePolicy policy : policies) {
       for (BlockStoragePolicy policy : policies) {
-        builder.addPolicies(PBHelper.convert(policy));
+        builder.addPolicies(PBHelperClient.convert(policy));
       }
       }
       return builder.build();
       return builder.build();
     } catch (IOException e) {
     } catch (IOException e) {
@@ -1529,7 +1530,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
   public GetEditsFromTxidResponseProto getEditsFromTxid(RpcController controller,
   public GetEditsFromTxidResponseProto getEditsFromTxid(RpcController controller,
       GetEditsFromTxidRequestProto req) throws ServiceException {
       GetEditsFromTxidRequestProto req) throws ServiceException {
     try {
     try {
-      return PBHelper.convertEditsResponse(server.getEditsFromTxid(
+      return PBHelperClient.convertEditsResponse(server.getEditsFromTxid(
           req.getTxid()));
           req.getTxid()));
     } catch (IOException e) {
     } catch (IOException e) {
       throw new ServiceException(e);
       throw new ServiceException(e);
@@ -1544,7 +1545,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       GetErasureCodingPoliciesResponseProto.Builder resBuilder = GetErasureCodingPoliciesResponseProto
       GetErasureCodingPoliciesResponseProto.Builder resBuilder = GetErasureCodingPoliciesResponseProto
           .newBuilder();
           .newBuilder();
       for (ErasureCodingPolicy ecPolicy : ecPolicies) {
       for (ErasureCodingPolicy ecPolicy : ecPolicies) {
-        resBuilder.addEcPolicies(PBHelper.convertErasureCodingPolicy(ecPolicy));
+        resBuilder.addEcPolicies(PBHelperClient.convertErasureCodingPolicy(ecPolicy));
       }
       }
       return resBuilder.build();
       return resBuilder.build();
     } catch (IOException e) {
     } catch (IOException e) {
@@ -1559,7 +1560,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
       ErasureCodingPolicy ecPolicy = server.getErasureCodingPolicy(request.getSrc());
       ErasureCodingPolicy ecPolicy = server.getErasureCodingPolicy(request.getSrc());
       GetErasureCodingPolicyResponseProto.Builder builder = GetErasureCodingPolicyResponseProto.newBuilder();
       GetErasureCodingPolicyResponseProto.Builder builder = GetErasureCodingPolicyResponseProto.newBuilder();
       if (ecPolicy != null) {
       if (ecPolicy != null) {
-        builder.setEcPolicy(PBHelper.convertErasureCodingPolicy(ecPolicy));
+        builder.setEcPolicy(PBHelperClient.convertErasureCodingPolicy(ecPolicy));
       }
       }
       return builder.build();
       return builder.build();
     } catch (IOException e) {
     } catch (IOException e) {

+ 64 - 64
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

@@ -266,8 +266,8 @@ public class ClientNamenodeProtocolTranslatorPB implements
     try {
     try {
       GetBlockLocationsResponseProto resp = rpcProxy.getBlockLocations(null,
       GetBlockLocationsResponseProto resp = rpcProxy.getBlockLocations(null,
           req);
           req);
-      return resp.hasLocations() ? 
-        PBHelper.convert(resp.getLocations()) : null;
+      return resp.hasLocations() ?
+        PBHelperClient.convert(resp.getLocations()) : null;
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -277,7 +277,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
   public FsServerDefaults getServerDefaults() throws IOException {
   public FsServerDefaults getServerDefaults() throws IOException {
     GetServerDefaultsRequestProto req = VOID_GET_SERVER_DEFAULT_REQUEST;
     GetServerDefaultsRequestProto req = VOID_GET_SERVER_DEFAULT_REQUEST;
     try {
     try {
-      return PBHelper
+      return PBHelperClient
           .convert(rpcProxy.getServerDefaults(null, req).getServerDefaults());
           .convert(rpcProxy.getServerDefaults(null, req).getServerDefaults());
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
@@ -296,17 +296,17 @@ public class ClientNamenodeProtocolTranslatorPB implements
       IOException {
       IOException {
     CreateRequestProto.Builder builder = CreateRequestProto.newBuilder()
     CreateRequestProto.Builder builder = CreateRequestProto.newBuilder()
         .setSrc(src)
         .setSrc(src)
-        .setMasked(PBHelper.convert(masked))
+        .setMasked(PBHelperClient.convert(masked))
         .setClientName(clientName)
         .setClientName(clientName)
-        .setCreateFlag(PBHelper.convertCreateFlag(flag))
+        .setCreateFlag(PBHelperClient.convertCreateFlag(flag))
         .setCreateParent(createParent)
         .setCreateParent(createParent)
         .setReplication(replication)
         .setReplication(replication)
         .setBlockSize(blockSize);
         .setBlockSize(blockSize);
-    builder.addAllCryptoProtocolVersion(PBHelper.convert(supportedVersions));
+    builder.addAllCryptoProtocolVersion(PBHelperClient.convert(supportedVersions));
     CreateRequestProto req = builder.build();
     CreateRequestProto req = builder.build();
     try {
     try {
       CreateResponseProto res = rpcProxy.create(null, req);
       CreateResponseProto res = rpcProxy.create(null, req);
-      return res.hasFs() ? PBHelper.convert(res.getFs()) : null;
+      return res.hasFs() ? PBHelperClient.convert(res.getFs()) : null;
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -334,13 +334,13 @@ public class ClientNamenodeProtocolTranslatorPB implements
       DSQuotaExceededException, FileNotFoundException, SafeModeException,
       DSQuotaExceededException, FileNotFoundException, SafeModeException,
       UnresolvedLinkException, IOException {
       UnresolvedLinkException, IOException {
     AppendRequestProto req = AppendRequestProto.newBuilder().setSrc(src)
     AppendRequestProto req = AppendRequestProto.newBuilder().setSrc(src)
-        .setClientName(clientName).setFlag(PBHelper.convertCreateFlag(flag))
+        .setClientName(clientName).setFlag(PBHelperClient.convertCreateFlag(flag))
         .build();
         .build();
     try {
     try {
       AppendResponseProto res = rpcProxy.append(null, req);
       AppendResponseProto res = rpcProxy.append(null, req);
-      LocatedBlock lastBlock = res.hasBlock() ? PBHelper
+      LocatedBlock lastBlock = res.hasBlock() ? PBHelperClient
           .convertLocatedBlockProto(res.getBlock()) : null;
           .convertLocatedBlockProto(res.getBlock()) : null;
-      HdfsFileStatus stat = (res.hasStat()) ? PBHelper.convert(res.getStat())
+      HdfsFileStatus stat = (res.hasStat()) ? PBHelperClient.convert(res.getStat())
           : null;
           : null;
       return new LastBlockWithStatus(lastBlock, stat);
       return new LastBlockWithStatus(lastBlock, stat);
     } catch (ServiceException e) {
     } catch (ServiceException e) {
@@ -370,7 +370,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
       UnresolvedLinkException, IOException {
       UnresolvedLinkException, IOException {
     SetPermissionRequestProto req = SetPermissionRequestProto.newBuilder()
     SetPermissionRequestProto req = SetPermissionRequestProto.newBuilder()
         .setSrc(src)
         .setSrc(src)
-        .setPermission(PBHelper.convert(permission))
+        .setPermission(PBHelperClient.convert(permission))
         .build();
         .build();
     try {
     try {
       rpcProxy.setPermission(null, req);
       rpcProxy.setPermission(null, req);
@@ -427,7 +427,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
       req.addAllFavoredNodes(Arrays.asList(favoredNodes));
       req.addAllFavoredNodes(Arrays.asList(favoredNodes));
     }
     }
     try {
     try {
-      return PBHelper.convertLocatedBlockProto(
+      return PBHelperClient.convertLocatedBlockProto(
           rpcProxy.addBlock(null, req.build()).getBlock());
           rpcProxy.addBlock(null, req.build()).getBlock());
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
@@ -453,7 +453,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
         .setClientName(clientName)
         .setClientName(clientName)
         .build();
         .build();
     try {
     try {
-      return PBHelper.convertLocatedBlockProto(
+      return PBHelperClient.convertLocatedBlockProto(
           rpcProxy.getAdditionalDatanode(null, req).getBlock());
           rpcProxy.getAdditionalDatanode(null, req).getBlock());
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
@@ -481,7 +481,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
   @Override
   @Override
   public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
   public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
     ReportBadBlocksRequestProto req = ReportBadBlocksRequestProto.newBuilder()
     ReportBadBlocksRequestProto req = ReportBadBlocksRequestProto.newBuilder()
-        .addAllBlocks(Arrays.asList(PBHelper.convertLocatedBlocks(blocks)))
+        .addAllBlocks(Arrays.asList(PBHelperClient.convertLocatedBlocks(blocks)))
         .build();
         .build();
     try {
     try {
       rpcProxy.reportBadBlocks(null, req);
       rpcProxy.reportBadBlocks(null, req);
@@ -564,7 +564,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
       IOException {
       IOException {
     MkdirsRequestProto req = MkdirsRequestProto.newBuilder()
     MkdirsRequestProto req = MkdirsRequestProto.newBuilder()
         .setSrc(src)
         .setSrc(src)
-        .setMasked(PBHelper.convert(masked))
+        .setMasked(PBHelperClient.convert(masked))
         .setCreateParent(createParent).build();
         .setCreateParent(createParent).build();
 
 
     try {
     try {
@@ -586,7 +586,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
       GetListingResponseProto result = rpcProxy.getListing(null, req);
       GetListingResponseProto result = rpcProxy.getListing(null, req);
       
       
       if (result.hasDirList()) {
       if (result.hasDirList()) {
-        return PBHelper.convert(result.getDirList());
+        return PBHelperClient.convert(result.getDirList());
       }
       }
       return null;
       return null;
     } catch (ServiceException e) {
     } catch (ServiceException e) {
@@ -622,7 +622,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
   @Override
   @Override
   public long[] getStats() throws IOException {
   public long[] getStats() throws IOException {
     try {
     try {
-      return PBHelper.convert(rpcProxy.getFsStats(null,
+      return PBHelperClient.convert(rpcProxy.getFsStats(null,
           VOID_GET_FSSTATUS_REQUEST));
           VOID_GET_FSSTATUS_REQUEST));
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
@@ -634,9 +634,9 @@ public class ClientNamenodeProtocolTranslatorPB implements
       throws IOException {
       throws IOException {
     GetDatanodeReportRequestProto req = GetDatanodeReportRequestProto
     GetDatanodeReportRequestProto req = GetDatanodeReportRequestProto
         .newBuilder()
         .newBuilder()
-        .setType(PBHelper.convert(type)).build();
+        .setType(PBHelperClient.convert(type)).build();
     try {
     try {
-      return PBHelper.convert(
+      return PBHelperClient.convert(
           rpcProxy.getDatanodeReport(null, req).getDiList());
           rpcProxy.getDatanodeReport(null, req).getDiList());
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
@@ -648,9 +648,9 @@ public class ClientNamenodeProtocolTranslatorPB implements
       throws IOException {
       throws IOException {
     final GetDatanodeStorageReportRequestProto req
     final GetDatanodeStorageReportRequestProto req
         = GetDatanodeStorageReportRequestProto.newBuilder()
         = GetDatanodeStorageReportRequestProto.newBuilder()
-            .setType(PBHelper.convert(type)).build();
+            .setType(PBHelperClient.convert(type)).build();
     try {
     try {
-      return PBHelper.convertDatanodeStorageReports(
+      return PBHelperClient.convertDatanodeStorageReports(
           rpcProxy.getDatanodeStorageReport(null, req).getDatanodeStorageReportsList());
           rpcProxy.getDatanodeStorageReport(null, req).getDatanodeStorageReportsList());
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
@@ -674,7 +674,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
   @Override
   @Override
   public boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException {
   public boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException {
     SetSafeModeRequestProto req = SetSafeModeRequestProto.newBuilder()
     SetSafeModeRequestProto req = SetSafeModeRequestProto.newBuilder()
-        .setAction(PBHelper.convert(action)).setChecked(isChecked).build();
+        .setAction(PBHelperClient.convert(action)).setChecked(isChecked).build();
     try {
     try {
       return rpcProxy.setSafeMode(null, req).getResult();
       return rpcProxy.setSafeMode(null, req).getResult();
     } catch (ServiceException e) {
     } catch (ServiceException e) {
@@ -738,11 +738,11 @@ public class ClientNamenodeProtocolTranslatorPB implements
   @Override
   @Override
   public RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action) throws IOException {
   public RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action) throws IOException {
     final RollingUpgradeRequestProto r = RollingUpgradeRequestProto.newBuilder()
     final RollingUpgradeRequestProto r = RollingUpgradeRequestProto.newBuilder()
-        .setAction(PBHelper.convert(action)).build();
+        .setAction(PBHelperClient.convert(action)).build();
     try {
     try {
       final RollingUpgradeResponseProto proto = rpcProxy.rollingUpgrade(null, r);
       final RollingUpgradeResponseProto proto = rpcProxy.rollingUpgrade(null, r);
       if (proto.hasRollingUpgradeInfo()) {
       if (proto.hasRollingUpgradeInfo()) {
-        return PBHelper.convert(proto.getRollingUpgradeInfo());
+        return PBHelperClient.convert(proto.getRollingUpgradeInfo());
       }
       }
       return null;
       return null;
     } catch (ServiceException e) {
     } catch (ServiceException e) {
@@ -758,7 +758,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
     if (cookie != null) 
     if (cookie != null) 
       req.setCookie(cookie);
       req.setCookie(cookie);
     try {
     try {
-      return PBHelper.convert(
+      return PBHelperClient.convert(
           rpcProxy.listCorruptFileBlocks(null, req.build()).getCorrupt());
           rpcProxy.listCorruptFileBlocks(null, req.build()).getCorrupt());
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
@@ -784,7 +784,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
         .setSrc(src).build();
         .setSrc(src).build();
     try {
     try {
       GetFileInfoResponseProto res = rpcProxy.getFileInfo(null, req);
       GetFileInfoResponseProto res = rpcProxy.getFileInfo(null, req);
-      return res.hasFs() ? PBHelper.convert(res.getFs()) : null;
+      return res.hasFs() ? PBHelperClient.convert(res.getFs()) : null;
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -798,7 +798,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
     try {
     try {
       GetFileLinkInfoResponseProto result = rpcProxy.getFileLinkInfo(null, req);
       GetFileLinkInfoResponseProto result = rpcProxy.getFileLinkInfo(null, req);
       return result.hasFs() ?  
       return result.hasFs() ?  
-          PBHelper.convert(rpcProxy.getFileLinkInfo(null, req).getFs()) : null;
+          PBHelperClient.convert(rpcProxy.getFileLinkInfo(null, req).getFs()) : null;
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -813,7 +813,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
         .setPath(path)
         .setPath(path)
         .build();
         .build();
     try {
     try {
-      return PBHelper.convert(rpcProxy.getContentSummary(null, req)
+      return PBHelperClient.convert(rpcProxy.getContentSummary(null, req)
           .getSummary());
           .getSummary());
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
@@ -881,7 +881,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
     CreateSymlinkRequestProto req = CreateSymlinkRequestProto.newBuilder()
     CreateSymlinkRequestProto req = CreateSymlinkRequestProto.newBuilder()
         .setTarget(target)
         .setTarget(target)
         .setLink(link)
         .setLink(link)
-        .setDirPerm(PBHelper.convert(dirPerm))
+        .setDirPerm(PBHelperClient.convert(dirPerm))
         .setCreateParent(createParent)
         .setCreateParent(createParent)
         .build();
         .build();
     try {
     try {
@@ -913,7 +913,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
         .setClientName(clientName)
         .setClientName(clientName)
         .build();
         .build();
     try {
     try {
-      return PBHelper.convertLocatedBlockProto(
+      return PBHelperClient.convertLocatedBlockProto(
           rpcProxy.updateBlockForPipeline(null, req).getBlock());
           rpcProxy.updateBlockForPipeline(null, req).getBlock());
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
@@ -927,7 +927,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
         .setClientName(clientName)
         .setClientName(clientName)
         .setOldBlock(PBHelperClient.convert(oldBlock))
         .setOldBlock(PBHelperClient.convert(oldBlock))
         .setNewBlock(PBHelperClient.convert(newBlock))
         .setNewBlock(PBHelperClient.convert(newBlock))
-        .addAllNewNodes(Arrays.asList(PBHelper.convert(newNodes)))
+        .addAllNewNodes(Arrays.asList(PBHelperClient.convert(newNodes)))
         .addAllStorageIDs(storageIDs == null ? null : Arrays.asList(storageIDs))
         .addAllStorageIDs(storageIDs == null ? null : Arrays.asList(storageIDs))
         .build();
         .build();
     try {
     try {
@@ -946,7 +946,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
         .build();
         .build();
     try {
     try {
       GetDelegationTokenResponseProto resp = rpcProxy.getDelegationToken(null, req);
       GetDelegationTokenResponseProto resp = rpcProxy.getDelegationToken(null, req);
-      return resp.hasToken() ? PBHelper.convertDelegationToken(resp.getToken())
+      return resp.hasToken() ? PBHelperClient.convertDelegationToken(resp.getToken())
           : null;
           : null;
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
@@ -1005,7 +1005,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
       GetDataEncryptionKeyResponseProto rsp = rpcProxy.getDataEncryptionKey(
       GetDataEncryptionKeyResponseProto rsp = rpcProxy.getDataEncryptionKey(
           null, VOID_GET_DATA_ENCRYPTIONKEY_REQUEST);
           null, VOID_GET_DATA_ENCRYPTIONKEY_REQUEST);
      return rsp.hasDataEncryptionKey() ? 
      return rsp.hasDataEncryptionKey() ? 
-          PBHelper.convert(rsp.getDataEncryptionKey()) : null;
+          PBHelperClient.convert(rsp.getDataEncryptionKey()) : null;
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -1102,7 +1102,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
           .getSnapshottableDirListing(null, req);
           .getSnapshottableDirListing(null, req);
       
       
       if (result.hasSnapshottableDirList()) {
       if (result.hasSnapshottableDirList()) {
-        return PBHelper.convert(result.getSnapshottableDirList());
+        return PBHelperClient.convert(result.getSnapshottableDirList());
       }
       }
       return null;
       return null;
     } catch (ServiceException e) {
     } catch (ServiceException e) {
@@ -1120,7 +1120,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
       GetSnapshotDiffReportResponseProto result = 
       GetSnapshotDiffReportResponseProto result = 
           rpcProxy.getSnapshotDiffReport(null, req);
           rpcProxy.getSnapshotDiffReport(null, req);
     
     
-      return PBHelper.convert(result.getDiffReport());
+      return PBHelperClient.convert(result.getDiffReport());
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -1132,9 +1132,9 @@ public class ClientNamenodeProtocolTranslatorPB implements
     try {
     try {
       AddCacheDirectiveRequestProto.Builder builder =
       AddCacheDirectiveRequestProto.Builder builder =
           AddCacheDirectiveRequestProto.newBuilder().
           AddCacheDirectiveRequestProto.newBuilder().
-              setInfo(PBHelper.convert(directive));
+              setInfo(PBHelperClient.convert(directive));
       if (!flags.isEmpty()) {
       if (!flags.isEmpty()) {
-        builder.setCacheFlags(PBHelper.convertCacheFlags(flags));
+        builder.setCacheFlags(PBHelperClient.convertCacheFlags(flags));
       }
       }
       return rpcProxy.addCacheDirective(null, builder.build()).getId();
       return rpcProxy.addCacheDirective(null, builder.build()).getId();
     } catch (ServiceException e) {
     } catch (ServiceException e) {
@@ -1148,9 +1148,9 @@ public class ClientNamenodeProtocolTranslatorPB implements
     try {
     try {
       ModifyCacheDirectiveRequestProto.Builder builder =
       ModifyCacheDirectiveRequestProto.Builder builder =
           ModifyCacheDirectiveRequestProto.newBuilder().
           ModifyCacheDirectiveRequestProto.newBuilder().
-              setInfo(PBHelper.convert(directive));
+              setInfo(PBHelperClient.convert(directive));
       if (!flags.isEmpty()) {
       if (!flags.isEmpty()) {
-        builder.setCacheFlags(PBHelper.convertCacheFlags(flags));
+        builder.setCacheFlags(PBHelperClient.convertCacheFlags(flags));
       }
       }
       rpcProxy.modifyCacheDirective(null, builder.build());
       rpcProxy.modifyCacheDirective(null, builder.build());
     } catch (ServiceException e) {
     } catch (ServiceException e) {
@@ -1181,7 +1181,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
 
 
     @Override
     @Override
     public CacheDirectiveEntry get(int i) {
     public CacheDirectiveEntry get(int i) {
-      return PBHelper.convert(response.getElements(i));
+      return PBHelperClient.convert(response.getElements(i));
     }
     }
 
 
     @Override
     @Override
@@ -1207,7 +1207,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
         rpcProxy.listCacheDirectives(null,
         rpcProxy.listCacheDirectives(null,
           ListCacheDirectivesRequestProto.newBuilder().
           ListCacheDirectivesRequestProto.newBuilder().
             setPrevId(prevId).
             setPrevId(prevId).
-            setFilter(PBHelper.convert(filter)).
+            setFilter(PBHelperClient.convert(filter)).
             build()));
             build()));
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
@@ -1218,7 +1218,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
   public void addCachePool(CachePoolInfo info) throws IOException {
   public void addCachePool(CachePoolInfo info) throws IOException {
     AddCachePoolRequestProto.Builder builder = 
     AddCachePoolRequestProto.Builder builder = 
         AddCachePoolRequestProto.newBuilder();
         AddCachePoolRequestProto.newBuilder();
-    builder.setInfo(PBHelper.convert(info));
+    builder.setInfo(PBHelperClient.convert(info));
     try {
     try {
       rpcProxy.addCachePool(null, builder.build());
       rpcProxy.addCachePool(null, builder.build());
     } catch (ServiceException e) {
     } catch (ServiceException e) {
@@ -1230,7 +1230,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
   public void modifyCachePool(CachePoolInfo req) throws IOException {
   public void modifyCachePool(CachePoolInfo req) throws IOException {
     ModifyCachePoolRequestProto.Builder builder = 
     ModifyCachePoolRequestProto.Builder builder = 
         ModifyCachePoolRequestProto.newBuilder();
         ModifyCachePoolRequestProto.newBuilder();
-    builder.setInfo(PBHelper.convert(req));
+    builder.setInfo(PBHelperClient.convert(req));
     try {
     try {
       rpcProxy.modifyCachePool(null, builder.build());
       rpcProxy.modifyCachePool(null, builder.build());
     } catch (ServiceException e) {
     } catch (ServiceException e) {
@@ -1260,7 +1260,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
     @Override
     @Override
     public CachePoolEntry get(int i) {
     public CachePoolEntry get(int i) {
       CachePoolEntryProto elem = proto.getEntries(i);
       CachePoolEntryProto elem = proto.getEntries(i);
-      return PBHelper.convert(elem);
+      return PBHelperClient.convert(elem);
     }
     }
 
 
     @Override
     @Override
@@ -1292,7 +1292,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
       throws IOException {
       throws IOException {
     ModifyAclEntriesRequestProto req = ModifyAclEntriesRequestProto
     ModifyAclEntriesRequestProto req = ModifyAclEntriesRequestProto
         .newBuilder().setSrc(src)
         .newBuilder().setSrc(src)
-        .addAllAclSpec(PBHelper.convertAclEntryProto(aclSpec)).build();
+        .addAllAclSpec(PBHelperClient.convertAclEntryProto(aclSpec)).build();
     try {
     try {
       rpcProxy.modifyAclEntries(null, req);
       rpcProxy.modifyAclEntries(null, req);
     } catch (ServiceException e) {
     } catch (ServiceException e) {
@@ -1305,7 +1305,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
       throws IOException {
       throws IOException {
     RemoveAclEntriesRequestProto req = RemoveAclEntriesRequestProto
     RemoveAclEntriesRequestProto req = RemoveAclEntriesRequestProto
         .newBuilder().setSrc(src)
         .newBuilder().setSrc(src)
-        .addAllAclSpec(PBHelper.convertAclEntryProto(aclSpec)).build();
+        .addAllAclSpec(PBHelperClient.convertAclEntryProto(aclSpec)).build();
     try {
     try {
       rpcProxy.removeAclEntries(null, req);
       rpcProxy.removeAclEntries(null, req);
     } catch (ServiceException e) {
     } catch (ServiceException e) {
@@ -1339,7 +1339,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
   public void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
   public void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
     SetAclRequestProto req = SetAclRequestProto.newBuilder()
     SetAclRequestProto req = SetAclRequestProto.newBuilder()
         .setSrc(src)
         .setSrc(src)
-        .addAllAclSpec(PBHelper.convertAclEntryProto(aclSpec))
+        .addAllAclSpec(PBHelperClient.convertAclEntryProto(aclSpec))
         .build();
         .build();
     try {
     try {
       rpcProxy.setAcl(null, req);
       rpcProxy.setAcl(null, req);
@@ -1353,7 +1353,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
     GetAclStatusRequestProto req = GetAclStatusRequestProto.newBuilder()
     GetAclStatusRequestProto req = GetAclStatusRequestProto.newBuilder()
         .setSrc(src).build();
         .setSrc(src).build();
     try {
     try {
-      return PBHelper.convert(rpcProxy.getAclStatus(null, req));
+      return PBHelperClient.convert(rpcProxy.getAclStatus(null, req));
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -1387,7 +1387,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
       final EncryptionZonesProtos.GetEZForPathResponseProto response =
       final EncryptionZonesProtos.GetEZForPathResponseProto response =
           rpcProxy.getEZForPath(null, req);
           rpcProxy.getEZForPath(null, req);
       if (response.hasZone()) {
       if (response.hasZone()) {
-        return PBHelper.convert(response.getZone());
+        return PBHelperClient.convert(response.getZone());
       } else {
       } else {
         return null;
         return null;
       }
       }
@@ -1409,7 +1409,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
       List<EncryptionZone> elements =
       List<EncryptionZone> elements =
           Lists.newArrayListWithCapacity(response.getZonesCount());
           Lists.newArrayListWithCapacity(response.getZonesCount());
       for (EncryptionZoneProto p : response.getZonesList()) {
       for (EncryptionZoneProto p : response.getZonesList()) {
-        elements.add(PBHelper.convert(p));
+        elements.add(PBHelperClient.convert(p));
       }
       }
       return new BatchedListEntries<EncryptionZone>(elements,
       return new BatchedListEntries<EncryptionZone>(elements,
           response.getHasMore());
           response.getHasMore());
@@ -1425,7 +1425,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
         SetErasureCodingPolicyRequestProto.newBuilder();
         SetErasureCodingPolicyRequestProto.newBuilder();
     builder.setSrc(src);
     builder.setSrc(src);
     if (ecPolicy != null) {
     if (ecPolicy != null) {
-      builder.setEcPolicy(PBHelper.convertErasureCodingPolicy(ecPolicy));
+      builder.setEcPolicy(PBHelperClient.convertErasureCodingPolicy(ecPolicy));
     }
     }
     SetErasureCodingPolicyRequestProto req = builder.build();
     SetErasureCodingPolicyRequestProto req = builder.build();
     try {
     try {
@@ -1440,8 +1440,8 @@ public class ClientNamenodeProtocolTranslatorPB implements
       throws IOException {
       throws IOException {
     SetXAttrRequestProto req = SetXAttrRequestProto.newBuilder()
     SetXAttrRequestProto req = SetXAttrRequestProto.newBuilder()
         .setSrc(src)
         .setSrc(src)
-        .setXAttr(PBHelper.convertXAttrProto(xAttr))
-        .setFlag(PBHelper.convert(flag))
+        .setXAttr(PBHelperClient.convertXAttrProto(xAttr))
+        .setFlag(PBHelperClient.convert(flag))
         .build();
         .build();
     try {
     try {
       rpcProxy.setXAttr(null, req);
       rpcProxy.setXAttr(null, req);
@@ -1451,16 +1451,16 @@ public class ClientNamenodeProtocolTranslatorPB implements
   }
   }
   
   
   @Override
   @Override
-  public List<XAttr> getXAttrs(String src, List<XAttr> xAttrs) 
+  public List<XAttr> getXAttrs(String src, List<XAttr> xAttrs)
       throws IOException {
       throws IOException {
     GetXAttrsRequestProto.Builder builder = GetXAttrsRequestProto.newBuilder();
     GetXAttrsRequestProto.Builder builder = GetXAttrsRequestProto.newBuilder();
     builder.setSrc(src);
     builder.setSrc(src);
     if (xAttrs != null) {
     if (xAttrs != null) {
-      builder.addAllXAttrs(PBHelper.convertXAttrProto(xAttrs));
+      builder.addAllXAttrs(PBHelperClient.convertXAttrProto(xAttrs));
     }
     }
     GetXAttrsRequestProto req = builder.build();
     GetXAttrsRequestProto req = builder.build();
     try {
     try {
-      return PBHelper.convert(rpcProxy.getXAttrs(null, req));
+      return PBHelperClient.convert(rpcProxy.getXAttrs(null, req));
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -1473,7 +1473,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
     builder.setSrc(src);
     builder.setSrc(src);
     ListXAttrsRequestProto req = builder.build();
     ListXAttrsRequestProto req = builder.build();
     try {
     try {
-      return PBHelper.convert(rpcProxy.listXAttrs(null, req));
+      return PBHelperClient.convert(rpcProxy.listXAttrs(null, req));
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -1483,7 +1483,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
   public void removeXAttr(String src, XAttr xAttr) throws IOException {
   public void removeXAttr(String src, XAttr xAttr) throws IOException {
     RemoveXAttrRequestProto req = RemoveXAttrRequestProto
     RemoveXAttrRequestProto req = RemoveXAttrRequestProto
         .newBuilder().setSrc(src)
         .newBuilder().setSrc(src)
-        .setXAttr(PBHelper.convertXAttrProto(xAttr)).build();
+        .setXAttr(PBHelperClient.convertXAttrProto(xAttr)).build();
     try {
     try {
       rpcProxy.removeXAttr(null, req);
       rpcProxy.removeXAttr(null, req);
     } catch (ServiceException e) {
     } catch (ServiceException e) {
@@ -1494,7 +1494,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
   @Override
   @Override
   public void checkAccess(String path, FsAction mode) throws IOException {
   public void checkAccess(String path, FsAction mode) throws IOException {
     CheckAccessRequestProto req = CheckAccessRequestProto.newBuilder()
     CheckAccessRequestProto req = CheckAccessRequestProto.newBuilder()
-        .setPath(path).setMode(PBHelper.convert(mode)).build();
+        .setPath(path).setMode(PBHelperClient.convert(mode)).build();
     try {
     try {
       rpcProxy.checkAccess(null, req);
       rpcProxy.checkAccess(null, req);
     } catch (ServiceException e) {
     } catch (ServiceException e) {
@@ -1519,7 +1519,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
     GetStoragePolicyRequestProto request = GetStoragePolicyRequestProto
     GetStoragePolicyRequestProto request = GetStoragePolicyRequestProto
         .newBuilder().setPath(path).build();
         .newBuilder().setPath(path).build();
     try {
     try {
-      return PBHelper.convert(rpcProxy.getStoragePolicy(null, request)
+      return PBHelperClient.convert(rpcProxy.getStoragePolicy(null, request)
           .getStoragePolicy());
           .getStoragePolicy());
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
@@ -1531,7 +1531,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
     try {
     try {
       GetStoragePoliciesResponseProto response = rpcProxy
       GetStoragePoliciesResponseProto response = rpcProxy
           .getStoragePolicies(null, VOID_GET_STORAGE_POLICIES_REQUEST);
           .getStoragePolicies(null, VOID_GET_STORAGE_POLICIES_REQUEST);
-      return PBHelper.convertStoragePolicies(response.getPoliciesList());
+      return PBHelperClient.convertStoragePolicies(response.getPoliciesList());
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -1552,7 +1552,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
     GetEditsFromTxidRequestProto req = GetEditsFromTxidRequestProto.newBuilder()
     GetEditsFromTxidRequestProto req = GetEditsFromTxidRequestProto.newBuilder()
         .setTxid(txid).build();
         .setTxid(txid).build();
     try {
     try {
-      return PBHelper.convert(rpcProxy.getEditsFromTxid(null, req));
+      return PBHelperClient.convert(rpcProxy.getEditsFromTxid(null, req));
     } catch (ServiceException e) {
     } catch (ServiceException e) {
       throw ProtobufHelper.getRemoteException(e);
       throw ProtobufHelper.getRemoteException(e);
     }
     }
@@ -1567,7 +1567,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
           new ErasureCodingPolicy[response.getEcPoliciesCount()];
           new ErasureCodingPolicy[response.getEcPoliciesCount()];
       int i = 0;
       int i = 0;
       for (ErasureCodingPolicyProto ecPolicyProto : response.getEcPoliciesList()) {
       for (ErasureCodingPolicyProto ecPolicyProto : response.getEcPoliciesList()) {
-        ecPolicies[i++] = PBHelper.convertErasureCodingPolicy(ecPolicyProto);
+        ecPolicies[i++] = PBHelperClient.convertErasureCodingPolicy(ecPolicyProto);
       }
       }
       return ecPolicies;
       return ecPolicies;
     } catch (ServiceException e) {
     } catch (ServiceException e) {
@@ -1583,7 +1583,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
       GetErasureCodingPolicyResponseProto response = rpcProxy.getErasureCodingPolicy(
       GetErasureCodingPolicyResponseProto response = rpcProxy.getErasureCodingPolicy(
           null, req);
           null, req);
       if (response.hasEcPolicy()) {
       if (response.hasEcPolicy()) {
-        return PBHelper.convertErasureCodingPolicy(response.getEcPolicy());
+        return PBHelperClient.convertErasureCodingPolicy(response.getEcPolicy());
       }
       }
       return null;
       return null;
     } catch (ServiceException e) {
     } catch (ServiceException e) {

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java

@@ -139,7 +139,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements
         .setXmitsInProgress(xmitsInProgress).setXceiverCount(xceiverCount)
         .setXmitsInProgress(xmitsInProgress).setXceiverCount(xceiverCount)
         .setFailedVolumes(failedVolumes)
         .setFailedVolumes(failedVolumes)
         .setRequestFullBlockReportLease(requestFullBlockReportLease);
         .setRequestFullBlockReportLease(requestFullBlockReportLease);
-    builder.addAllReports(PBHelper.convertStorageReports(reports));
+    builder.addAllReports(PBHelperClient.convertStorageReports(reports));
     if (cacheCapacity != 0) {
     if (cacheCapacity != 0) {
       builder.setCacheCapacity(cacheCapacity);
       builder.setCacheCapacity(cacheCapacity);
     }
     }
@@ -164,7 +164,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements
     }
     }
     RollingUpgradeStatus rollingUpdateStatus = null;
     RollingUpgradeStatus rollingUpdateStatus = null;
     if (resp.hasRollingUpgradeStatus()) {
     if (resp.hasRollingUpgradeStatus()) {
-      rollingUpdateStatus = PBHelper.convert(resp.getRollingUpgradeStatus());
+      rollingUpdateStatus = PBHelperClient.convert(resp.getRollingUpgradeStatus());
     }
     }
     return new HeartbeatResponse(cmds, PBHelper.convert(resp.getHaStatus()),
     return new HeartbeatResponse(cmds, PBHelper.convert(resp.getHaStatus()),
         rollingUpdateStatus, resp.getFullBlockReportLeaseId());
         rollingUpdateStatus, resp.getFullBlockReportLeaseId());
@@ -183,7 +183,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements
 
 
     for (StorageBlockReport r : reports) {
     for (StorageBlockReport r : reports) {
       StorageBlockReportProto.Builder reportBuilder = StorageBlockReportProto
       StorageBlockReportProto.Builder reportBuilder = StorageBlockReportProto
-          .newBuilder().setStorage(PBHelper.convert(r.getStorage()));
+          .newBuilder().setStorage(PBHelperClient.convert(r.getStorage()));
       BlockListAsLongs blocks = r.getBlocks();
       BlockListAsLongs blocks = r.getBlocks();
       if (useBlocksBuffer) {
       if (useBlocksBuffer) {
         reportBuilder.setNumberOfBlocks(blocks.getNumberOfBlocks());
         reportBuilder.setNumberOfBlocks(blocks.getNumberOfBlocks());
@@ -240,7 +240,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements
       StorageReceivedDeletedBlocksProto.Builder repBuilder = 
       StorageReceivedDeletedBlocksProto.Builder repBuilder = 
           StorageReceivedDeletedBlocksProto.newBuilder();
           StorageReceivedDeletedBlocksProto.newBuilder();
       repBuilder.setStorageUuid(storageBlock.getStorage().getStorageID());  // Set for wire compatibility.
       repBuilder.setStorageUuid(storageBlock.getStorage().getStorageID());  // Set for wire compatibility.
-      repBuilder.setStorage(PBHelper.convert(storageBlock.getStorage()));
+      repBuilder.setStorage(PBHelperClient.convert(storageBlock.getStorage()));
       for (ReceivedDeletedBlockInfo rdBlock : storageBlock.getBlocks()) {
       for (ReceivedDeletedBlockInfo rdBlock : storageBlock.getBlocks()) {
         repBuilder.addBlocks(PBHelper.convert(rdBlock));
         repBuilder.addBlocks(PBHelper.convert(rdBlock));
       }
       }
@@ -281,7 +281,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements
     ReportBadBlocksRequestProto.Builder builder = ReportBadBlocksRequestProto
     ReportBadBlocksRequestProto.Builder builder = ReportBadBlocksRequestProto
         .newBuilder();
         .newBuilder();
     for (int i = 0; i < blocks.length; i++) {
     for (int i = 0; i < blocks.length; i++) {
-      builder.addBlocks(i, PBHelper.convertLocatedBlock(blocks[i]));
+      builder.addBlocks(i, PBHelperClient.convertLocatedBlock(blocks[i]));
     }
     }
     ReportBadBlocksRequestProto req = builder.build();
     ReportBadBlocksRequestProto req = builder.build();
     try {
     try {

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java

@@ -105,7 +105,7 @@ public class DatanodeProtocolServerSideTranslatorPB implements
       HeartbeatRequestProto request) throws ServiceException {
       HeartbeatRequestProto request) throws ServiceException {
     HeartbeatResponse response;
     HeartbeatResponse response;
     try {
     try {
-      final StorageReport[] report = PBHelper.convertStorageReports(
+      final StorageReport[] report = PBHelperClient.convertStorageReports(
           request.getReportsList());
           request.getReportsList());
       VolumeFailureSummary volumeFailureSummary =
       VolumeFailureSummary volumeFailureSummary =
           request.hasVolumeFailureSummary() ? PBHelper.convertVolumeFailureSummary(
           request.hasVolumeFailureSummary() ? PBHelper.convertVolumeFailureSummary(
@@ -132,7 +132,7 @@ public class DatanodeProtocolServerSideTranslatorPB implements
     RollingUpgradeStatus rollingUpdateStatus = response
     RollingUpgradeStatus rollingUpdateStatus = response
         .getRollingUpdateStatus();
         .getRollingUpdateStatus();
     if (rollingUpdateStatus != null) {
     if (rollingUpdateStatus != null) {
-      builder.setRollingUpgradeStatus(PBHelper
+      builder.setRollingUpgradeStatus(PBHelperClient
           .convertRollingUpgradeStatus(rollingUpdateStatus));
           .convertRollingUpgradeStatus(rollingUpdateStatus));
     }
     }
     builder.setFullBlockReportLeaseId(response.getFullBlockReportLeaseId());
     builder.setFullBlockReportLeaseId(response.getFullBlockReportLeaseId());
@@ -157,7 +157,7 @@ public class DatanodeProtocolServerSideTranslatorPB implements
       } else {
       } else {
         blocks = BlockListAsLongs.decodeLongs(s.getBlocksList());
         blocks = BlockListAsLongs.decodeLongs(s.getBlocksList());
       }
       }
-      report[index++] = new StorageBlockReport(PBHelper.convert(s.getStorage()),
+      report[index++] = new StorageBlockReport(PBHelperClient.convert(s.getStorage()),
           blocks);
           blocks);
     }
     }
     try {
     try {
@@ -214,7 +214,7 @@ public class DatanodeProtocolServerSideTranslatorPB implements
       }
       }
       if (sBlock.hasStorage()) {
       if (sBlock.hasStorage()) {
         info[i] = new StorageReceivedDeletedBlocks(
         info[i] = new StorageReceivedDeletedBlocks(
-            PBHelper.convert(sBlock.getStorage()), rdBlocks);
+            PBHelperClient.convert(sBlock.getStorage()), rdBlocks);
       } else {
       } else {
         info[i] = new StorageReceivedDeletedBlocks(sBlock.getStorageUuid(), rdBlocks);
         info[i] = new StorageReceivedDeletedBlocks(sBlock.getStorageUuid(), rdBlocks);
       }
       }
@@ -259,7 +259,7 @@ public class DatanodeProtocolServerSideTranslatorPB implements
     List<LocatedBlockProto> lbps = request.getBlocksList();
     List<LocatedBlockProto> lbps = request.getBlocksList();
     LocatedBlock [] blocks = new LocatedBlock [lbps.size()];
     LocatedBlock [] blocks = new LocatedBlock [lbps.size()];
     for(int i=0; i<lbps.size(); i++) {
     for(int i=0; i<lbps.size(); i++) {
-      blocks[i] = PBHelper.convertLocatedBlockProto(lbps.get(i));
+      blocks[i] = PBHelperClient.convertLocatedBlockProto(lbps.get(i));
     }
     }
     try {
     try {
       impl.reportBadBlocks(blocks);
       impl.reportBadBlocks(blocks);
@@ -276,7 +276,7 @@ public class DatanodeProtocolServerSideTranslatorPB implements
     List<DatanodeIDProto> dnprotos = request.getNewTaragetsList();
     List<DatanodeIDProto> dnprotos = request.getNewTaragetsList();
     DatanodeID[] dns = new DatanodeID[dnprotos.size()];
     DatanodeID[] dns = new DatanodeID[dnprotos.size()];
     for (int i = 0; i < dnprotos.size(); i++) {
     for (int i = 0; i < dnprotos.size(); i++) {
-      dns[i] = PBHelper.convert(dnprotos.get(i));
+      dns[i] = PBHelperClient.convert(dnprotos.get(i));
     }
     }
     final List<String> sidprotos = request.getNewTargetStoragesList();
     final List<String> sidprotos = request.getNewTargetStoragesList();
     final String[] storageIDs = sidprotos.toArray(new String[sidprotos.size()]);
     final String[] storageIDs = sidprotos.toArray(new String[sidprotos.size()]);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/InterDatanodeProtocolServerSideTranslatorPB.java

@@ -64,7 +64,7 @@ public class InterDatanodeProtocolServerSideTranslatorPB implements
     } else {
     } else {
       return InitReplicaRecoveryResponseProto.newBuilder()
       return InitReplicaRecoveryResponseProto.newBuilder()
           .setReplicaFound(true)
           .setReplicaFound(true)
-          .setBlock(PBHelper.convert(r))
+          .setBlock(PBHelperClient.convert(r))
           .setState(PBHelper.convert(r.getOriginalReplicaState())).build();
           .setState(PBHelper.convert(r.getOriginalReplicaState())).build();
     }
     }
   }
   }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolTranslatorPB.java

@@ -67,7 +67,7 @@ public class JournalProtocolTranslatorPB implements ProtocolMetaInterface,
         .setEpoch(epoch)
         .setEpoch(epoch)
         .setFirstTxnId(firstTxnId)
         .setFirstTxnId(firstTxnId)
         .setNumTxns(numTxns)
         .setNumTxns(numTxns)
-        .setRecords(PBHelper.getByteString(records))
+        .setRecords(PBHelperClient.getByteString(records))
         .build();
         .build();
     try {
     try {
       rpcProxy.journal(NULL_CONTROLLER, req);
       rpcProxy.journal(NULL_CONTROLLER, req);

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java

@@ -22,7 +22,6 @@ import java.io.IOException;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
 import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
@@ -79,7 +78,7 @@ public class NamenodeProtocolServerSideTranslatorPB implements
   @Override
   @Override
   public GetBlocksResponseProto getBlocks(RpcController unused,
   public GetBlocksResponseProto getBlocks(RpcController unused,
       GetBlocksRequestProto request) throws ServiceException {
       GetBlocksRequestProto request) throws ServiceException {
-    DatanodeInfo dnInfo = new DatanodeInfo(PBHelper.convert(request
+    DatanodeInfo dnInfo = new DatanodeInfo(PBHelperClient.convert(request
         .getDatanode()));
         .getDatanode()));
     BlocksWithLocations blocks;
     BlocksWithLocations blocks;
     try {
     try {

File diff suppressed because it is too large
+ 35 - 1039
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java


+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java

@@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB;
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
+import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto;
 import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto;
@@ -161,7 +162,7 @@ public class QJournalProtocolTranslatorPB implements ProtocolMetaInterface,
         .setSegmentTxnId(segmentTxId)
         .setSegmentTxnId(segmentTxId)
         .setFirstTxnId(firstTxnId)
         .setFirstTxnId(firstTxnId)
         .setNumTxns(numTxns)
         .setNumTxns(numTxns)
-        .setRecords(PBHelper.getByteString(records))
+        .setRecords(PBHelperClient.getByteString(records))
         .build();
         .build();
     try {
     try {
       rpcProxy.journal(NULL_CONTROLLER, req);
       rpcProxy.journal(NULL_CONTROLLER, req);

+ 29 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java

@@ -63,6 +63,7 @@ import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StopWatch;
 import org.apache.hadoop.util.StopWatch;
+import org.apache.hadoop.util.Time;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Charsets;
 import com.google.common.base.Charsets;
@@ -132,6 +133,8 @@ public class Journal implements Closeable {
 
 
   private final JournalMetrics metrics;
   private final JournalMetrics metrics;
 
 
+  private long lastJournalTimestamp = 0;
+
   /**
   /**
    * Time threshold for sync calls, beyond which a warning should be logged to the console.
    * Time threshold for sync calls, beyond which a warning should be logged to the console.
    */
    */
@@ -151,7 +154,7 @@ public class Journal implements Closeable {
     
     
     EditLogFile latest = scanStorageForLatestEdits();
     EditLogFile latest = scanStorageForLatestEdits();
     if (latest != null) {
     if (latest != null) {
-      highestWrittenTxId = latest.getLastTxId();
+      updateHighestWrittenTxId(latest.getLastTxId());
     }
     }
   }
   }
 
 
@@ -189,7 +192,7 @@ public class Journal implements Closeable {
     
     
     while (!files.isEmpty()) {
     while (!files.isEmpty()) {
       EditLogFile latestLog = files.remove(files.size() - 1);
       EditLogFile latestLog = files.remove(files.size() - 1);
-      latestLog.scanLog();
+      latestLog.scanLog(Long.MAX_VALUE, false);
       LOG.info("Latest log is " + latestLog);
       LOG.info("Latest log is " + latestLog);
       if (latestLog.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
       if (latestLog.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
         // the log contains no transactions
         // the log contains no transactions
@@ -253,7 +256,11 @@ public class Journal implements Closeable {
   synchronized long getCommittedTxnIdForTests() throws IOException {
   synchronized long getCommittedTxnIdForTests() throws IOException {
     return committedTxnId.get();
     return committedTxnId.get();
   }
   }
-  
+
+  synchronized long getLastJournalTimestamp() {
+    return lastJournalTimestamp;
+  }
+
   synchronized long getCurrentLagTxns() throws IOException {
   synchronized long getCurrentLagTxns() throws IOException {
     long committed = committedTxnId.get();
     long committed = committedTxnId.get();
     if (committed == 0) {
     if (committed == 0) {
@@ -266,7 +273,17 @@ public class Journal implements Closeable {
   synchronized long getHighestWrittenTxId() {
   synchronized long getHighestWrittenTxId() {
     return highestWrittenTxId;
     return highestWrittenTxId;
   }
   }
-  
+
+  /**
+   * Update the highest Tx ID that has been written to the journal. Also update
+   * the {@link FileJournalManager#lastReadableTxId} of the underlying fjm.
+   * @param val The new value
+   */
+  private void updateHighestWrittenTxId(long val) {
+    highestWrittenTxId = val;
+    fjm.setLastReadableTxId(val);
+  }
+
   @VisibleForTesting
   @VisibleForTesting
   JournalMetrics getMetricsForTests() {
   JournalMetrics getMetricsForTests() {
     return metrics;
     return metrics;
@@ -399,8 +416,9 @@ public class Journal implements Closeable {
     metrics.bytesWritten.incr(records.length);
     metrics.bytesWritten.incr(records.length);
     metrics.txnsWritten.incr(numTxns);
     metrics.txnsWritten.incr(numTxns);
     
     
-    highestWrittenTxId = lastTxnId;
+    updateHighestWrittenTxId(lastTxnId);
     nextTxId = lastTxnId + 1;
     nextTxId = lastTxnId + 1;
+    lastJournalTimestamp = Time.now();
   }
   }
 
 
   public void heartbeat(RequestInfo reqInfo) throws IOException {
   public void heartbeat(RequestInfo reqInfo) throws IOException {
@@ -524,7 +542,7 @@ public class Journal implements Closeable {
       // If it's in-progress, it should only contain one transaction,
       // If it's in-progress, it should only contain one transaction,
       // because the "startLogSegment" transaction is written alone at the
       // because the "startLogSegment" transaction is written alone at the
       // start of each segment. 
       // start of each segment. 
-      existing.scanLog();
+      existing.scanLog(Long.MAX_VALUE, false);
       if (existing.getLastTxId() != existing.getFirstTxId()) {
       if (existing.getLastTxId() != existing.getFirstTxId()) {
         throw new IllegalStateException("The log file " +
         throw new IllegalStateException("The log file " +
             existing + " seems to contain valid transactions");
             existing + " seems to contain valid transactions");
@@ -587,7 +605,7 @@ public class Journal implements Closeable {
       if (needsValidation) {
       if (needsValidation) {
         LOG.info("Validating log segment " + elf.getFile() + " about to be " +
         LOG.info("Validating log segment " + elf.getFile() + " about to be " +
             "finalized");
             "finalized");
-        elf.scanLog();
+        elf.scanLog(Long.MAX_VALUE, false);
   
   
         checkSync(elf.getLastTxId() == endTxId,
         checkSync(elf.getLastTxId() == endTxId,
             "Trying to finalize in-progress log segment %s to end at " +
             "Trying to finalize in-progress log segment %s to end at " +
@@ -675,7 +693,7 @@ public class Journal implements Closeable {
       return null;
       return null;
     }
     }
     if (elf.isInProgress()) {
     if (elf.isInProgress()) {
-      elf.scanLog();
+      elf.scanLog(Long.MAX_VALUE, false);
     }
     }
     if (elf.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
     if (elf.getLastTxId() == HdfsServerConstants.INVALID_TXID) {
       LOG.info("Edit log file " + elf + " appears to be empty. " +
       LOG.info("Edit log file " + elf + " appears to be empty. " +
@@ -782,8 +800,8 @@ public class Journal implements Closeable {
             ": no current segment in place");
             ": no current segment in place");
         
         
         // Update the highest txid for lag metrics
         // Update the highest txid for lag metrics
-        highestWrittenTxId = Math.max(segment.getEndTxId(),
-            highestWrittenTxId);
+        updateHighestWrittenTxId(Math.max(segment.getEndTxId(),
+            highestWrittenTxId));
       } else {
       } else {
         LOG.info("Synchronizing log " + TextFormat.shortDebugString(segment) +
         LOG.info("Synchronizing log " + TextFormat.shortDebugString(segment) +
             ": old segment " + TextFormat.shortDebugString(currentSegment) +
             ": old segment " + TextFormat.shortDebugString(currentSegment) +
@@ -812,7 +830,7 @@ public class Journal implements Closeable {
         // If we're shortening the log, update our highest txid
         // If we're shortening the log, update our highest txid
         // used for lag metrics.
         // used for lag metrics.
         if (txnRange(currentSegment).containsLong(highestWrittenTxId)) {
         if (txnRange(currentSegment).containsLong(highestWrittenTxId)) {
-          highestWrittenTxId = segment.getEndTxId();
+          updateHighestWrittenTxId(segment.getEndTxId());
         }
         }
       }
       }
       syncedFile = syncLog(reqInfo, segment, fromUrl);
       syncedFile = syncLog(reqInfo, segment, fromUrl);

+ 6 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalMetrics.java

@@ -109,7 +109,12 @@ class JournalMetrics {
       return -1L;
       return -1L;
     }
     }
   }
   }
-  
+
+  @Metric("The timestamp of last successfully written transaction")
+  public long getLastJournalTimestamp() {
+    return journal.getLastJournalTimestamp();
+  }
+
   void addSync(long us) {
   void addSync(long us) {
     for (MutableQuantiles q : syncsQuantiles) {
     for (MutableQuantiles q : syncsQuantiles) {
       q.add(us);
       q.add(us);

+ 71 - 99
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java

@@ -179,6 +179,8 @@ public class Balancer {
       + "\tExcludes the specified datanodes."
       + "\tExcludes the specified datanodes."
       + "\n\t[-include [-f <hosts-file> | <comma-separated list of hosts>]]"
       + "\n\t[-include [-f <hosts-file> | <comma-separated list of hosts>]]"
       + "\tIncludes only the specified datanodes."
       + "\tIncludes only the specified datanodes."
+      + "\n\t[-blockpools <comma-separated list of blockpool ids>]"
+      + "\tThe balancer will only run on blockpools included in this list."
       + "\n\t[-idleiterations <idleiterations>]"
       + "\n\t[-idleiterations <idleiterations>]"
       + "\tNumber of consecutive idle iterations (-1 for Infinite) before "
       + "\tNumber of consecutive idle iterations (-1 for Infinite) before "
       + "exit."
       + "exit."
@@ -243,7 +245,8 @@ public class Balancer {
    * namenode as a client and a secondary namenode and retry proxies
    * namenode as a client and a secondary namenode and retry proxies
    * when connection fails.
    * when connection fails.
    */
    */
-  Balancer(NameNodeConnector theblockpool, Parameters p, Configuration conf) {
+  Balancer(NameNodeConnector theblockpool, BalancerParameters p,
+      Configuration conf) {
     final long movedWinWidth = getLong(conf,
     final long movedWinWidth = getLong(conf,
         DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY,
         DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_KEY,
         DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_DEFAULT);
         DFSConfigKeys.DFS_BALANCER_MOVEDWINWIDTH_DEFAULT);
@@ -265,13 +268,15 @@ public class Balancer {
         DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_DEFAULT);
         DFSConfigKeys.DFS_BALANCER_GETBLOCKS_MIN_BLOCK_SIZE_DEFAULT);
 
 
     this.nnc = theblockpool;
     this.nnc = theblockpool;
-    this.dispatcher = new Dispatcher(theblockpool, p.includedNodes,
-        p.excludedNodes, movedWinWidth, moverThreads, dispatcherThreads,
-        maxConcurrentMovesPerNode, getBlocksSize, getBlocksMinBlockSize, conf);
-    this.threshold = p.threshold;
-    this.policy = p.policy;
-    this.sourceNodes = p.sourceNodes;
-    this.runDuringUpgrade = p.runDuringUpgrade;
+    this.dispatcher =
+        new Dispatcher(theblockpool, p.getIncludedNodes(),
+            p.getExcludedNodes(), movedWinWidth, moverThreads,
+            dispatcherThreads, maxConcurrentMovesPerNode, getBlocksSize,
+            getBlocksMinBlockSize, conf);
+    this.threshold = p.getThreshold();
+    this.policy = p.getBalancingPolicy();
+    this.sourceNodes = p.getSourceNodes();
+    this.runDuringUpgrade = p.getRunDuringUpgrade();
 
 
     this.maxSizeToMove = getLong(conf,
     this.maxSizeToMove = getLong(conf,
         DFSConfigKeys.DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY,
         DFSConfigKeys.DFS_BALANCER_MAX_SIZE_TO_MOVE_KEY,
@@ -629,7 +634,7 @@ public class Balancer {
    * for each namenode,
    * for each namenode,
    * execute a {@link Balancer} to work through all datanodes once.  
    * execute a {@link Balancer} to work through all datanodes once.  
    */
    */
-  static int run(Collection<URI> namenodes, final Parameters p,
+  static int run(Collection<URI> namenodes, final BalancerParameters p,
       Configuration conf) throws IOException, InterruptedException {
       Configuration conf) throws IOException, InterruptedException {
     final long sleeptime =
     final long sleeptime =
         conf.getLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
         conf.getLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
@@ -638,38 +643,44 @@ public class Balancer {
             DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000;
             DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT) * 1000;
     LOG.info("namenodes  = " + namenodes);
     LOG.info("namenodes  = " + namenodes);
     LOG.info("parameters = " + p);
     LOG.info("parameters = " + p);
-    LOG.info("included nodes = " + p.includedNodes);
-    LOG.info("excluded nodes = " + p.excludedNodes);
-    LOG.info("source nodes = " + p.sourceNodes);
-    
+    LOG.info("included nodes = " + p.getIncludedNodes());
+    LOG.info("excluded nodes = " + p.getExcludedNodes());
+    LOG.info("source nodes = " + p.getSourceNodes());
+
     System.out.println("Time Stamp               Iteration#  Bytes Already Moved  Bytes Left To Move  Bytes Being Moved");
     System.out.println("Time Stamp               Iteration#  Bytes Already Moved  Bytes Left To Move  Bytes Being Moved");
     
     
     List<NameNodeConnector> connectors = Collections.emptyList();
     List<NameNodeConnector> connectors = Collections.emptyList();
     try {
     try {
       connectors = NameNodeConnector.newNameNodeConnectors(namenodes, 
       connectors = NameNodeConnector.newNameNodeConnectors(namenodes, 
-            Balancer.class.getSimpleName(), BALANCER_ID_PATH, conf, p.maxIdleIteration);
-    
+              Balancer.class.getSimpleName(), BALANCER_ID_PATH, conf,
+              p.getMaxIdleIteration());
+
       boolean done = false;
       boolean done = false;
       for(int iteration = 0; !done; iteration++) {
       for(int iteration = 0; !done; iteration++) {
         done = true;
         done = true;
         Collections.shuffle(connectors);
         Collections.shuffle(connectors);
         for(NameNodeConnector nnc : connectors) {
         for(NameNodeConnector nnc : connectors) {
-          final Balancer b = new Balancer(nnc, p, conf);
-          final Result r = b.runOneIteration();
-          r.print(iteration, System.out);
-
-          // clean all lists
-          b.resetData(conf);
-          if (r.exitStatus == ExitStatus.IN_PROGRESS) {
-            done = false;
-          } else if (r.exitStatus != ExitStatus.SUCCESS) {
-            //must be an error statue, return.
-            return r.exitStatus.getExitCode();
-          }
-        }
+          if (p.getBlockPools().size() == 0
+              || p.getBlockPools().contains(nnc.getBlockpoolID())) {
+            final Balancer b = new Balancer(nnc, p, conf);
+            final Result r = b.runOneIteration();
+            r.print(iteration, System.out);
+
+            // clean all lists
+            b.resetData(conf);
+            if (r.exitStatus == ExitStatus.IN_PROGRESS) {
+              done = false;
+            } else if (r.exitStatus != ExitStatus.SUCCESS) {
+              // must be an error statue, return.
+              return r.exitStatus.getExitCode();
+            }
 
 
-        if (!done) {
-          Thread.sleep(sleeptime);
+            if (!done) {
+              Thread.sleep(sleeptime);
+            }
+          } else {
+            LOG.info("Skipping blockpool " + nnc.getBlockpoolID());
+          }
         }
         }
       }
       }
     } finally {
     } finally {
@@ -700,58 +711,6 @@ public class Balancer {
     return time+" "+unit;
     return time+" "+unit;
   }
   }
 
 
-  static class Parameters {
-    static final Parameters DEFAULT = new Parameters(
-        BalancingPolicy.Node.INSTANCE, 10.0,
-        NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS,
-        Collections.<String>emptySet(), Collections.<String>emptySet(),
-        Collections.<String>emptySet(),
-        false);
-
-    final BalancingPolicy policy;
-    final double threshold;
-    final int maxIdleIteration;
-    /** Exclude the nodes in this set. */
-    final Set<String> excludedNodes;
-    /** If empty, include any node; otherwise, include only these nodes. */
-    final Set<String> includedNodes;
-    /** If empty, any node can be a source;
-     *  otherwise, use only these nodes as source nodes.
-     */
-    final Set<String> sourceNodes;
-    /**
-     * Whether to run the balancer during upgrade.
-     */
-    final boolean runDuringUpgrade;
-
-    Parameters(BalancingPolicy policy, double threshold, int maxIdleIteration,
-        Set<String> excludedNodes, Set<String> includedNodes,
-        Set<String> sourceNodes, boolean runDuringUpgrade) {
-      this.policy = policy;
-      this.threshold = threshold;
-      this.maxIdleIteration = maxIdleIteration;
-      this.excludedNodes = excludedNodes;
-      this.includedNodes = includedNodes;
-      this.sourceNodes = sourceNodes;
-      this.runDuringUpgrade = runDuringUpgrade;
-    }
-
-    @Override
-    public String toString() {
-      return String.format("%s.%s [%s,"
-              + " threshold = %s,"
-              + " max idle iteration = %s,"
-              + " #excluded nodes = %s,"
-              + " #included nodes = %s,"
-              + " #source nodes = %s,"
-              + " run during upgrade = %s]",
-          Balancer.class.getSimpleName(), getClass().getSimpleName(),
-          policy, threshold, maxIdleIteration,
-          excludedNodes.size(), includedNodes.size(), sourceNodes.size(),
-          runDuringUpgrade);
-    }
-  }
-
   static class Cli extends Configured implements Tool {
   static class Cli extends Configured implements Tool {
     /**
     /**
      * Parse arguments and then run Balancer.
      * Parse arguments and then run Balancer.
@@ -784,14 +743,10 @@ public class Balancer {
     }
     }
 
 
     /** parse command line arguments */
     /** parse command line arguments */
-    static Parameters parse(String[] args) {
-      BalancingPolicy policy = Parameters.DEFAULT.policy;
-      double threshold = Parameters.DEFAULT.threshold;
-      int maxIdleIteration = Parameters.DEFAULT.maxIdleIteration;
-      Set<String> excludedNodes = Parameters.DEFAULT.excludedNodes;
-      Set<String> includedNodes = Parameters.DEFAULT.includedNodes;
-      Set<String> sourceNodes = Parameters.DEFAULT.sourceNodes;
-      boolean runDuringUpgrade = Parameters.DEFAULT.runDuringUpgrade;
+    static BalancerParameters parse(String[] args) {
+      Set<String> excludedNodes = null;
+      Set<String> includedNodes = null;
+      BalancerParameters.Builder b = new BalancerParameters.Builder();
 
 
       if (args != null) {
       if (args != null) {
         try {
         try {
@@ -800,12 +755,13 @@ public class Balancer {
               checkArgument(++i < args.length,
               checkArgument(++i < args.length,
                 "Threshold value is missing: args = " + Arrays.toString(args));
                 "Threshold value is missing: args = " + Arrays.toString(args));
               try {
               try {
-                threshold = Double.parseDouble(args[i]);
+                double threshold = Double.parseDouble(args[i]);
                 if (threshold < 1 || threshold > 100) {
                 if (threshold < 1 || threshold > 100) {
                   throw new IllegalArgumentException(
                   throw new IllegalArgumentException(
                       "Number out of range: threshold = " + threshold);
                       "Number out of range: threshold = " + threshold);
                 }
                 }
                 LOG.info( "Using a threshold of " + threshold );
                 LOG.info( "Using a threshold of " + threshold );
+                b.setThreshold(threshold);
               } catch(IllegalArgumentException e) {
               } catch(IllegalArgumentException e) {
                 System.err.println(
                 System.err.println(
                     "Expecting a number in the range of [1.0, 100.0]: "
                     "Expecting a number in the range of [1.0, 100.0]: "
@@ -816,7 +772,7 @@ public class Balancer {
               checkArgument(++i < args.length,
               checkArgument(++i < args.length,
                 "Policy value is missing: args = " + Arrays.toString(args));
                 "Policy value is missing: args = " + Arrays.toString(args));
               try {
               try {
-                policy = BalancingPolicy.parse(args[i]);
+                b.setBalancingPolicy(BalancingPolicy.parse(args[i]));
               } catch(IllegalArgumentException e) {
               } catch(IllegalArgumentException e) {
                 System.err.println("Illegal policy name: " + args[i]);
                 System.err.println("Illegal policy name: " + args[i]);
                 throw e;
                 throw e;
@@ -824,20 +780,33 @@ public class Balancer {
             } else if ("-exclude".equalsIgnoreCase(args[i])) {
             } else if ("-exclude".equalsIgnoreCase(args[i])) {
               excludedNodes = new HashSet<>();
               excludedNodes = new HashSet<>();
               i = processHostList(args, i, "exclude", excludedNodes);
               i = processHostList(args, i, "exclude", excludedNodes);
+              b.setExcludedNodes(excludedNodes);
             } else if ("-include".equalsIgnoreCase(args[i])) {
             } else if ("-include".equalsIgnoreCase(args[i])) {
               includedNodes = new HashSet<>();
               includedNodes = new HashSet<>();
               i = processHostList(args, i, "include", includedNodes);
               i = processHostList(args, i, "include", includedNodes);
+              b.setIncludedNodes(includedNodes);
             } else if ("-source".equalsIgnoreCase(args[i])) {
             } else if ("-source".equalsIgnoreCase(args[i])) {
-              sourceNodes = new HashSet<>();
+              Set<String> sourceNodes = new HashSet<>();
               i = processHostList(args, i, "source", sourceNodes);
               i = processHostList(args, i, "source", sourceNodes);
+              b.setSourceNodes(sourceNodes);
+            } else if ("-blockpools".equalsIgnoreCase(args[i])) {
+              checkArgument(
+                  ++i < args.length,
+                  "blockpools value is missing: args = "
+                      + Arrays.toString(args));
+              Set<String> blockpools = parseBlockPoolList(args[i]);
+              LOG.info("Balancer will run on the following blockpools: "
+                  + blockpools.toString());
+              b.setBlockpools(blockpools);
             } else if ("-idleiterations".equalsIgnoreCase(args[i])) {
             } else if ("-idleiterations".equalsIgnoreCase(args[i])) {
               checkArgument(++i < args.length,
               checkArgument(++i < args.length,
                   "idleiterations value is missing: args = " + Arrays
                   "idleiterations value is missing: args = " + Arrays
                       .toString(args));
                       .toString(args));
-              maxIdleIteration = Integer.parseInt(args[i]);
+              int maxIdleIteration = Integer.parseInt(args[i]);
               LOG.info("Using a idleiterations of " + maxIdleIteration);
               LOG.info("Using a idleiterations of " + maxIdleIteration);
+              b.setMaxIdleIteration(maxIdleIteration);
             } else if ("-runDuringUpgrade".equalsIgnoreCase(args[i])) {
             } else if ("-runDuringUpgrade".equalsIgnoreCase(args[i])) {
-              runDuringUpgrade = true;
+              b.setRunDuringUpgrade(true);
               LOG.info("Will run the balancer even during an ongoing HDFS "
               LOG.info("Will run the balancer even during an ongoing HDFS "
                   + "upgrade. Most users will not want to run the balancer "
                   + "upgrade. Most users will not want to run the balancer "
                   + "during an upgrade since it will not affect used space "
                   + "during an upgrade since it will not affect used space "
@@ -847,16 +816,14 @@ public class Balancer {
                   + Arrays.toString(args));
                   + Arrays.toString(args));
             }
             }
           }
           }
-          checkArgument(excludedNodes.isEmpty() || includedNodes.isEmpty(),
+          checkArgument(excludedNodes == null || includedNodes == null,
               "-exclude and -include options cannot be specified together.");
               "-exclude and -include options cannot be specified together.");
         } catch(RuntimeException e) {
         } catch(RuntimeException e) {
           printUsage(System.err);
           printUsage(System.err);
           throw e;
           throw e;
         }
         }
       }
       }
-      
-      return new Parameters(policy, threshold, maxIdleIteration,
-          excludedNodes, includedNodes, sourceNodes, runDuringUpgrade);
+      return b.build();
     }
     }
 
 
     private static int processHostList(String[] args, int i, String type,
     private static int processHostList(String[] args, int i, String type,
@@ -883,6 +850,11 @@ public class Balancer {
       return i;
       return i;
     }
     }
 
 
+    private static Set<String> parseBlockPoolList(String string) {
+      String[] addrs = StringUtils.getTrimmedStrings(string);
+      return new HashSet<String>(Arrays.asList(addrs));
+    }
+
     private static void printUsage(PrintStream out) {
     private static void printUsage(PrintStream out) {
       out.println(USAGE + "\n");
       out.println(USAGE + "\n");
     }
     }

+ 168 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/BalancerParameters.java

@@ -0,0 +1,168 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.balancer;
+
+import java.util.Collections;
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+
+@InterfaceAudience.Private
+final class BalancerParameters {
+  private final BalancingPolicy policy;
+  private final double threshold;
+  private final int maxIdleIteration;
+  /** Exclude the nodes in this set. */
+  private final Set<String> excludedNodes;
+  /** If empty, include any node; otherwise, include only these nodes. */
+  private final Set<String> includedNodes;
+  /**
+   * If empty, any node can be a source; otherwise, use only these nodes as
+   * source nodes.
+   */
+  private final Set<String> sourceNodes;
+  /**
+   * A set of block pools to run the balancer on.
+   */
+  private final Set<String> blockpools;
+  /**
+   * Whether to run the balancer during upgrade.
+   */
+  private final boolean runDuringUpgrade;
+
+  static final BalancerParameters DEFAULT = new BalancerParameters();
+
+  private BalancerParameters() {
+    this(new Builder());
+  }
+
+  private BalancerParameters(Builder builder) {
+    this.policy = builder.policy;
+    this.threshold = builder.threshold;
+    this.maxIdleIteration = builder.maxIdleIteration;
+    this.excludedNodes = builder.excludedNodes;
+    this.includedNodes = builder.includedNodes;
+    this.sourceNodes = builder.sourceNodes;
+    this.blockpools = builder.blockpools;
+    this.runDuringUpgrade = builder.runDuringUpgrade;
+  }
+
+  BalancingPolicy getBalancingPolicy() {
+    return this.policy;
+  }
+
+  double getThreshold() {
+    return this.threshold;
+  }
+
+  int getMaxIdleIteration() {
+    return this.maxIdleIteration;
+  }
+
+  Set<String> getExcludedNodes() {
+    return this.excludedNodes;
+  }
+
+  Set<String> getIncludedNodes() {
+    return this.includedNodes;
+  }
+
+  Set<String> getSourceNodes() {
+    return this.sourceNodes;
+  }
+
+  Set<String> getBlockPools() {
+    return this.blockpools;
+  }
+
+  boolean getRunDuringUpgrade() {
+    return this.runDuringUpgrade;
+  }
+
+  @Override
+  public String toString() {
+    return String.format("%s.%s [%s," + " threshold = %s,"
+        + " max idle iteration = %s," + " #excluded nodes = %s,"
+        + " #included nodes = %s," + " #source nodes = %s,"
+        + " #blockpools = %s," + " run during upgrade = %s]",
+        Balancer.class.getSimpleName(), getClass().getSimpleName(), policy,
+        threshold, maxIdleIteration, excludedNodes.size(),
+        includedNodes.size(), sourceNodes.size(), blockpools.size(),
+        runDuringUpgrade);
+  }
+
+  static class Builder {
+    // Defaults
+    private BalancingPolicy policy = BalancingPolicy.Node.INSTANCE;
+    private double threshold = 10.0;
+    private int maxIdleIteration =
+        NameNodeConnector.DEFAULT_MAX_IDLE_ITERATIONS;
+    private Set<String> excludedNodes = Collections.<String> emptySet();
+    private Set<String> includedNodes = Collections.<String> emptySet();
+    private Set<String> sourceNodes = Collections.<String> emptySet();
+    private Set<String> blockpools = Collections.<String> emptySet();
+    private boolean runDuringUpgrade = false;
+
+    Builder() {
+    }
+
+    Builder setBalancingPolicy(BalancingPolicy p) {
+      this.policy = p;
+      return this;
+    }
+
+    Builder setThreshold(double t) {
+      this.threshold = t;
+      return this;
+    }
+
+    Builder setMaxIdleIteration(int m) {
+      this.maxIdleIteration = m;
+      return this;
+    }
+
+    Builder setExcludedNodes(Set<String> nodes) {
+      this.excludedNodes = nodes;
+      return this;
+    }
+
+    Builder setIncludedNodes(Set<String> nodes) {
+      this.includedNodes = nodes;
+      return this;
+    }
+
+    Builder setSourceNodes(Set<String> nodes) {
+      this.sourceNodes = nodes;
+      return this;
+    }
+
+    Builder setBlockpools(Set<String> pools) {
+      this.blockpools = pools;
+      return this;
+    }
+
+    Builder setRunDuringUpgrade(boolean run) {
+      this.runDuringUpgrade = run;
+      return this;
+    }
+
+    BalancerParameters build() {
+      return new BalancerParameters(this);
+    }
+  }
+}

Some files were not shown because too many files changed in this diff