浏览代码

Merged latest changes from trunk

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5442@1603355 13f79535-47bb-0310-9956-ffa450edef68
Vinayakumar B 11 年之前
父节点
当前提交
418e91197a
共有 100 个文件被更改,包括 5516 次插入951 次删除
  1. 1 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
  2. 61 19
      hadoop-common-project/hadoop-common/CHANGES.txt
  3. 4 0
      hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
  4. 1 0
      hadoop-common-project/hadoop-common/pom.xml
  5. 0 2
      hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
  6. 0 2
      hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml
  7. 0 2
      hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
  8. 658 0
      hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html
  9. 3 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
  10. 27 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
  11. 46 14
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
  12. 24 24
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java
  13. 14 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java
  14. 10 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java
  15. 83 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java
  16. 50 24
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
  17. 6 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java
  18. 6 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
  19. 12 12
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java
  20. 10 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java
  21. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java
  22. 3 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java
  23. 4 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java
  24. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java
  25. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Tail.java
  26. 1 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Test.java
  27. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touchz.java
  28. 9 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java
  29. 4 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
  30. 7 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
  31. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java
  32. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SetFile.java
  33. 34 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java
  34. 1 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
  35. 49 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/GenericRefreshProtocol.java
  36. 35 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshHandler.java
  37. 134 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshRegistry.java
  38. 78 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshResponse.java
  39. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
  40. 119 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolClientSideTranslatorPB.java
  41. 37 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolPB.java
  42. 84 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolServerSideTranslatorPB.java
  43. 7 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
  44. 9 13
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
  45. 13 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
  46. 6 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/TableListing.java
  47. 18 13
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
  48. 4 3
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
  49. 61 0
      hadoop-common-project/hadoop-common/src/main/proto/GenericRefreshProtocol.proto
  50. 7 1
      hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm
  51. 732 0
      hadoop-common-project/hadoop-common/src/site/apt/Metrics.apt.vm
  52. 65 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritable.java
  53. 14 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java
  54. 221 125
      hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
  55. 128 1
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
  56. 385 55
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
  57. 35 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
  58. 55 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
  59. 113 1
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
  60. 213 9
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java
  61. 283 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerNoACLs.java
  62. 2 0
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java
  63. 112 67
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  64. 1 1
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  65. 5 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java
  66. 14 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
  67. 22 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  68. 59 300
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
  69. 289 42
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  70. 17 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
  71. 20 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
  72. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocols.java
  73. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
  74. 78 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
  75. 227 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java
  76. 241 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java
  77. 65 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
  78. 6 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java
  79. 31 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java
  80. 11 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
  81. 0 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
  82. 149 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
  83. 41 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java
  84. 13 28
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
  85. 10 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
  86. 47 34
      hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml
  87. 12 4
      hadoop-mapreduce-project/CHANGES.txt
  88. 9 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
  89. 9 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java
  90. 2 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
  91. 2 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
  92. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/CompositeRecordReader.java
  93. 21 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/WrappedRecordReader.java
  94. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/CompositeRecordReader.java
  95. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/WrappedRecordReader.java
  96. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistCp.md.vm
  97. 0 2
      hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java
  98. 0 2
      hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/Exec.java
  99. 0 2
      hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/FileSetUtils.java
  100. 0 2
      hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/versioninfo/VersionInfoMojo.java

+ 1 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java

@@ -193,7 +193,7 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
       
       for (String spnegoPrincipal : spnegoPrincipals) {
         LOG.info("Login using keytab {}, for principal {}",
-            keytab, principal);
+            keytab, spnegoPrincipal);
         final KerberosConfiguration kerberosConfiguration =
             new KerberosConfiguration(keytab, spnegoPrincipal);
         final LoginContext loginContext =

+ 61 - 19
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -342,24 +342,6 @@ Trunk (Unreleased)
 
     HADOOP-8589. ViewFs tests fail when tests and home dirs are nested (sanjay Radia)
 
-  BREAKDOWN OF HADOOP-10514 SUBTASKS AND RELATED JIRAS
-
-    HADOOP-10520. Extended attributes definition and FileSystem APIs for
-    extended attributes. (Yi Liu via wang)
-
-    HADOOP-10546. Javadoc and other small fixes for extended attributes in
-    hadoop-common. (Charles Lamb via wang)
-
-    HADOOP-10521. FsShell commands for extended attributes. (Yi Liu via wang)
-
-    HADOOP-10548. Improve FsShell xattr error handling and other fixes. (Charles Lamb via umamahesh)
-
-    HADOOP-10567. Shift XAttr value encoding code out for reuse. (Yi Liu via umamahesh)
-
-    HADOOP-10621. Remove CRLF for xattr value base64 encoding for better display.(Yi Liu via umamahesh)
-
-    HADOOP-10575. Small fixes for XAttrCommands and test. (Yi Liu via umamahesh)
-
 Release 2.5.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -435,6 +417,25 @@ Release 2.5.0 - UNRELEASED
     TCP RST and miss session expiration event due to bug in client connection
     management. (cnauroth)
 
+    HADOOP-10376. Refactor refresh*Protocols into a single generic
+    refreshConfigProtocol. (Chris Li via Arpit Agarwal)
+
+    HADOOP-6350. Documenting Hadoop metrics. (Akira Ajisaka via Arpit Agarwal)
+
+    HADOOP-10691. Improve the readability of 'hadoop fs -help'.
+    (Lei Xu via wang)
+
+    HADOOP-10688. Expose thread-level FileSystem StatisticsData (Sandy Ryza)
+
+    HADOOP-10657. Have RetryInvocationHandler log failover attempt at INFO
+    level. (Ming Ma via jing9)
+
+    HADOOP-10666. Remove Copyright /d/d/d/d Apache Software Foundation from
+    the source files license header. (Henry Saputra via wang)
+
+    HADOOP-10557. FsShell -cp -pa option for preserving extended ACLs.
+    (Akira Ajisaka via cnauroth)
+
   OPTIMIZATIONS
 
   BUG FIXES 
@@ -547,7 +548,48 @@ Release 2.5.0 - UNRELEASED
 
     HADOOP-10664. TestNetUtils.testNormalizeHostName fails. (atm)
 
-Release 2.4.1 - UNRELEASED
+    HADOOP-10656. The password keystore file is not picked by LDAP group mapping
+    (brandonli)
+
+    HADOOP-10622. Shell.runCommand can deadlock (Gera Shegalov via jlowe)
+
+    HADOOP-10686. Writables are not always configured. 
+    (Abraham Elmahrek via kasha)
+
+    HADOOP-10678. SecurityUtil has unnecessary synchronization on collection
+    used for only tests. (Benoy Antony via cnauroth)
+
+    HADOOP-10683. Users authenticated with KERBEROS are recorded as being
+    authenticated with SIMPLE. (Benoy Antony via cnauroth)
+
+    HADOOP-10702. KerberosAuthenticationHandler does not log the principal names
+    correctly. (Benoy Antony via cnauroth)
+
+    HADOOP-10699. Fix build native library on mac osx (Binglin Chang via
+    jlowe)
+
+  BREAKDOWN OF HADOOP-10514 SUBTASKS AND RELATED JIRAS
+
+    HADOOP-10520. Extended attributes definition and FileSystem APIs for
+    extended attributes. (Yi Liu via wang)
+
+    HADOOP-10546. Javadoc and other small fixes for extended attributes in
+    hadoop-common. (Charles Lamb via wang)
+
+    HADOOP-10521. FsShell commands for extended attributes. (Yi Liu via wang)
+
+    HADOOP-10548. Improve FsShell xattr error handling and other fixes. (Charles Lamb via umamahesh)
+
+    HADOOP-10567. Shift XAttr value encoding code out for reuse. (Yi Liu via umamahesh)
+
+    HADOOP-10621. Remove CRLF for xattr value base64 encoding for better display.(Yi Liu via umamahesh)
+
+    HADOOP-10575. Small fixes for XAttrCommands and test. (Yi Liu via umamahesh)
+
+    HADOOP-10561. Copy command with preserve option should handle Xattrs.
+    (Yi Liu via cnauroth)
+
+Release 2.4.1 - 2014-06-23 
 
   INCOMPATIBLE CHANGES
 

+ 4 - 0
hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml

@@ -287,6 +287,10 @@
       <!-- protobuf generated code -->
       <Class name="~org\.apache\.hadoop\.ipc\.proto\.RefreshCallQueueProtocolProtos.*"/>
     </Match>
+    <Match>
+      <!-- protobuf generated code -->
+      <Class name="~org\.apache\.hadoop\.ipc\.proto\.GenericRefreshProtocolProtos.*"/>
+    </Match>
 
     <!--
        Manually checked, misses child thread manually syncing on parent's intrinsic lock.

+ 1 - 0
hadoop-common-project/hadoop-common/pom.xml

@@ -318,6 +318,7 @@
                   <include>RefreshAuthorizationPolicyProtocol.proto</include>
                   <include>RefreshUserMappingsProtocol.proto</include>
                   <include>RefreshCallQueueProtocol.proto</include>
+                  <include>GenericRefreshProtocol.proto</include>
                 </includes>
               </source>
               <output>${project.build.directory}/generated-sources/java</output>

+ 0 - 2
hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh

@@ -1,5 +1,3 @@
-# Copyright 2011 The Apache Software Foundation
-# 
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information

+ 0 - 2
hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml

@@ -1,8 +1,6 @@
 <?xml version="1.0"?>
 <?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <!--
-
- Copyright 2011 The Apache Software Foundation
  
  Licensed to the Apache Software Foundation (ASF) under one
  or more contributor license agreements.  See the NOTICE file

+ 0 - 2
hadoop-common-project/hadoop-common/src/main/conf/log4j.properties

@@ -1,5 +1,3 @@
-# Copyright 2011 The Apache Software Foundation
-# 
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information

+ 658 - 0
hadoop-common-project/hadoop-common/src/main/docs/releasenotes.html

@@ -1,4 +1,662 @@
 <META http-equiv="Content-Type" content="text/html; charset=UTF-8">
+<title>Hadoop  2.4.1 Release Notes</title>
+<STYLE type="text/css">
+	H1 {font-family: sans-serif}
+	H2 {font-family: sans-serif; margin-left: 7mm}
+	TABLE {margin-left: 7mm}
+</STYLE>
+</head>
+<body>
+<h1>Hadoop  2.4.1 Release Notes</h1>
+These release notes include new developer and user-facing incompatibilities, features, and major improvements. 
+<a name="changes"/>
+<h2>Changes since Hadoop 2.4.0</h2>
+<ul>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2081">YARN-2081</a>.
+     Minor bug reported by Hong Zhiguo and fixed by Hong Zhiguo (applications/distributed-shell)<br>
+     <b>TestDistributedShell fails after YARN-1962</b><br>
+     <blockquote>java.lang.AssertionError: expected:&lt;1&gt; but was:&lt;0&gt;
+        at org.junit.Assert.fail(Assert.java:88)
+        at org.junit.Assert.failNotEquals(Assert.java:743)
+        at org.junit.Assert.assertEquals(Assert.java:118)
+        at org.junit.Assert.assertEquals(Assert.java:555)
+        at org.junit.Assert.assertEquals(Assert.java:542)
+        at org.apache.hadoop.yarn.applications.distributedshell.TestDistributedShell.testDSShell(TestDistributedShell.java:198)</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2066">YARN-2066</a>.
+     Minor bug reported by Ted Yu and fixed by Hong Zhiguo <br>
+     <b>Wrong field is referenced in GetApplicationsRequestPBImpl#mergeLocalToBuilder()</b><br>
+     <blockquote>{code}
+    if (this.finish != null) {
+      builder.setFinishBegin(start.getMinimumLong());
+      builder.setFinishEnd(start.getMaximumLong());
+    }
+{code}
+this.finish should be referenced in the if block.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2053">YARN-2053</a>.
+     Major sub-task reported by Sumit Mohanty and fixed by Wangda Tan (resourcemanager)<br>
+     <b>Slider AM fails to restart: NPE in RegisterApplicationMasterResponseProto$Builder.addAllNmTokensFromPreviousAttempts</b><br>
+     <blockquote>Slider AppMaster restart fails with the following:
+{code}
+org.apache.hadoop.yarn.proto.YarnServiceProtos$RegisterApplicationMasterResponseProto$Builder.addAllNmTokensFromPreviousAttempts(YarnServiceProtos.java:2700)
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-2016">YARN-2016</a>.
+     Major bug reported by Venkat Ranganathan and fixed by Junping Du (resourcemanager)<br>
+     <b>Yarn getApplicationRequest start time range is not honored</b><br>
+     <blockquote>When we query for the previous applications by creating an instance of GetApplicationsRequest and setting the start time range and application tag, we see that the start range provided is not honored and all applications with the tag are returned
+
+Attaching a reproducer.
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1986">YARN-1986</a>.
+     Critical bug reported by Jon Bringhurst and fixed by Hong Zhiguo <br>
+     <b>In Fifo Scheduler, node heartbeat in between creating app and attempt causes NPE</b><br>
+     <blockquote>After upgrade from 2.2.0 to 2.4.0, NPE on first job start.
+
+-After RM was restarted, the job runs without a problem.-
+
+{noformat}
+19:11:13,441 FATAL ResourceManager:600 - Error in handling event type NODE_UPDATE to the scheduler
+java.lang.NullPointerException
+	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.assignContainers(FifoScheduler.java:462)
+	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.nodeUpdate(FifoScheduler.java:714)
+	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.handle(FifoScheduler.java:743)
+	at org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler.handle(FifoScheduler.java:104)
+	at org.apache.hadoop.yarn.server.resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessor.run(ResourceManager.java:591)
+	at java.lang.Thread.run(Thread.java:744)
+19:11:13,443  INFO ResourceManager:604 - Exiting, bbye..
+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1976">YARN-1976</a>.
+     Major bug reported by Yesha Vora and fixed by Junping Du <br>
+     <b>Tracking url missing http protocol for FAILED application</b><br>
+     <blockquote>Run yarn application -list -appStates FAILED,  It does not print http protocol name like FINISHED apps.
+
+{noformat}
+-bash-4.1$ yarn application -list -appStates FINISHED,FAILED,KILLED
+14/04/15 23:55:07 INFO client.RMProxy: Connecting to ResourceManager at host
+Total number of applications (application-types: [] and states: [FINISHED, FAILED, KILLED]):4
+                Application-Id	    Application-Name	    Application-Type	      User	     Queue	             State	       Final-State	       Progress	                       Tracking-URL
+application_1397598467870_0004	           Sleep job	           MAPREDUCE	    hrt_qa	   default	          FINISHED	         SUCCEEDED	           100%	http://host:19888/jobhistory/job/job_1397598467870_0004
+application_1397598467870_0003	           Sleep job	           MAPREDUCE	    hrt_qa	   default	          FINISHED	         SUCCEEDED	           100%	http://host:19888/jobhistory/job/job_1397598467870_0003
+application_1397598467870_0002	           Sleep job	           MAPREDUCE	    hrt_qa	   default	            FAILED	            FAILED	           100%	host:8088/cluster/app/application_1397598467870_0002
+application_1397598467870_0001	          word count	           MAPREDUCE	    hrt_qa	   default	          FINISHED	         SUCCEEDED	           100%	http://host:19888/jobhistory/job/job_1397598467870_0001
+{noformat}
+
+It only prints 'host:8088/cluster/app/application_1397598467870_0002' instead 'http://host:8088/cluster/app/application_1397598467870_0002' </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1975">YARN-1975</a>.
+     Major bug reported by Nathan Roberts and fixed by Mit Desai (resourcemanager)<br>
+     <b>Used resources shows escaped html in CapacityScheduler and FairScheduler page</b><br>
+     <blockquote>Used resources displays as &amp;amp;lt;memory:1111, vCores;&amp;amp;gt; with capacity scheduler
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1962">YARN-1962</a>.
+     Major sub-task reported by Mohammad Kamrul Islam and fixed by Mohammad Kamrul Islam <br>
+     <b>Timeline server is enabled by default</b><br>
+     <blockquote>Since Timeline server is not matured and secured yet, enabling  it by default might create some confusion.
+
+We were playing with 2.4.0 and found a lot of exceptions for distributed shell example related to connection refused error. Btw, we didn't run TS because it is not secured yet.
+
+Although it is possible to explicitly turn it off through yarn-site config. In my opinion,  this extra change for this new service is not worthy at this point,.  
+
+This JIRA is to turn it off by default.
+If there is an agreement, i can put a simple patch about this.
+
+{noformat}
+14/04/17 23:24:33 ERROR impl.TimelineClientImpl: Failed to get the response from the timeline server.
+com.sun.jersey.api.client.ClientHandlerException: java.net.ConnectException: Connection refused
+	at com.sun.jersey.client.urlconnection.URLConnectionClientHandler.handle(URLConnectionClientHandler.java:149)
+	at com.sun.jersey.api.client.Client.handle(Client.java:648)
+	at com.sun.jersey.api.client.WebResource.handle(WebResource.java:670)
+	at com.sun.jersey.api.client.WebResource.access$200(WebResource.java:74)
+	at com.sun.jersey.api.client.WebResource$Builder.post(WebResource.java:563)
+	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.doPostingEntities(TimelineClientImpl.java:131)
+	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.putEntities(TimelineClientImpl.java:104)
+	at org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster.publishApplicationAttemptEvent(ApplicationMaster.java:1072)
+	at org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster.run(ApplicationMaster.java:515)
+	at org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster.main(ApplicationMaster.java:281)
+Caused by: java.net.ConnectException: Connection refused
+	at java.net.PlainSocketImpl.socketConnect(Native Method)
+	at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:339)
+	at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:198)
+	at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:182)
+	at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392)
+	at java.net.Socket.connect(Socket.java:579)
+	at java.net.Socket.connect(Socket.java:528)
+	at sun.net.NetworkClient.doConnect(NetworkClient.java:180)
+	at sun.net.www.http.HttpClient.openServer(HttpClient.java:432)
+	at sun.net.www.http.HttpClient.openServer(HttpClient.java:527)
+	at sun.net.www.http.HttpClient.&lt;in14/04/17 23:24:33 ERROR impl.TimelineClientImpl: Failed to get the response from the timeline server.
+com.sun.jersey.api.client.ClientHandlerException: java.net.ConnectException: Connection refused
+	at com.sun.jersey.client.urlconnection.URLConnectionClientHandler.handle(URLConnectionClientHandler.java:149)
+	at com.sun.jersey.api.client.Client.handle(Client.java:648)
+	at com.sun.jersey.api.client.WebResource.handle(WebResource.java:670)
+	at com.sun.jersey.api.client.WebResource.access$200(WebResource.java:74)
+	at com.sun.jersey.api.client.WebResource$Builder.post(WebResource.java:563)
+	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.doPostingEntities(TimelineClientImpl.java:131)
+	at org.apache.hadoop.yarn.client.api.impl.TimelineClientImpl.putEntities(TimelineClientImpl.java:104)
+	at org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster.publishApplicationAttemptEvent(ApplicationMaster.java:1072)
+	at org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster.run(ApplicationMaster.java:515)
+	at org.apache.hadoop.yarn.applications.distributedshell.ApplicationMaster.main(ApplicationMaster.java:281)
+Caused by: java.net.ConnectException: Connection refused
+	at java.net.PlainSocketImpl.socketConnect(Native Method)
+	at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:339)
+	at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:198)
+	at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:182)
+	at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392)
+	at java.net.Socket.connect(Socket.java:579)
+	at java.net.Socket.connect(Socket.java:528)
+	at sun.net.NetworkClient.doConnect(NetworkClient.java:180)
+	at sun.net.www.http.HttpClient.openServer(HttpClient.java:432)
+	at sun.net.www.http.HttpClient.openServer(HttpClient.java:527)
+	at sun.net.www.http.HttpClient.&lt;init&gt;(HttpClient.java:211)
+	at sun.net.www.http.HttpClient.New(HttpClient.java:308)
+	at sun.net.www.http.HttpClient.New(HttpClient.java:326)
+	at sun.net.www.protocol.http.HttpURLConnection.getNewHttpClient(HttpURLConnection.java:996)
+	at sun.net.www.protocol.http.HttpURLConnection.plainConnect(HttpURLConnection.java:932)
+	at sun.net.www.protocol.http.HttpURLConnection.connect(HttpURLConnection.java:850)
+	at sun.net.www.protocol.http.HttpURLConnection.getOutputStream(HttpURLConnection.java:1091)
+	at com.sun.jersey.client.urlconnection.URLConnectionClientHandler$1$1.getOutputStream(URLConnectionClientHandler.java:225)
+	at com.sun.jersey.api.client.CommittingOutputStream.commitWrite(CommittingOutputStream.java:117)
+	at com.sun.jersey.api.client.CommittingOutputStream.write(CommittingOutputStream.java:89)
+	at org.codehaus.jackson.impl.Utf8Generator._flushBuffer(Utf8Generator.java:1754)
+	at org.codehaus.jackson.impl.Utf8Generator.flush(Utf8Generator.java:1088)
+	at org.codehaus.jackson.map.ObjectMapper.writeValue(ObjectMapper.java:1354)
+	at org.codehaus.jackson.jaxrs.JacksonJsonProvider.writeTo(JacksonJsonProvider.java:527)
+	at com.sun.jersey.api.client.RequestWriter.writeRequestEntity(RequestWriter.java:300)
+	at com.sun.jersey.client.urlconnection.URLConnectionClientHandler._invoke(URLConnectionClientHandler.java:204)
+	at com.sun.jersey.client.urlconnection.URLConnectionClientHandler.handle(URLConnectionClientHandler.java:147)
+	... 9 moreit&gt;(HttpClient.java:211)
+	at sun.net.www.http.HttpClient.New(HttpClient.java:308)
+	at sun.net.www.http.HttpClient.New(HttpClient.java:326)
+	at sun.net.www.protocol.http.HttpURLConnection.getNewHttpClient(HttpURLConnection.java:996)
+	at sun.net.www.protocol.http.HttpURLConnection.plainConnect(HttpURLConnection.java:932)
+	at sun.net.www.protocol.http.HttpURLConnection.connect(HttpURLConnection.java:850)
+	at sun.net.www.protocol.http.HttpURLConnection.getOutputStream(HttpURLConnection.java:1091)
+	at com.sun.jersey.client.urlconnection.URLConnectionClientHandler$1$1.getOutputStream(URLConnectionClientHandler.java:225)
+	at com.sun.jersey.api.client.CommittingOutputStream.commitWrite(CommittingOutputStream.java:117)
+	at com.sun.jersey.api.client.CommittingOutputStream.write(CommittingOutputStream.java:89)
+	at org.codehaus.jackson.impl.Utf8Generator._flushBuffer(Utf8Generator.java:1754)
+	at org.codehaus.jackson.impl.Utf8Generator.flush(Utf8Generator.java:1088)
+	at org.codehaus.jackson.map.ObjectMapper.writeValue(ObjectMapper.java:1354)
+	at org.codehaus.jackson.jaxrs.JacksonJsonProvider.writeTo(JacksonJsonProvider.java:527)
+	at com.sun.jersey.api.client.RequestWriter.writeRequestEntity(RequestWriter.java:300)
+	at com.sun.jersey.client.urlconnection.URLConnectionClientHandler._invoke(URLConnectionClientHandler.java:204)
+	at com.sun.jersey.client.urlconnection.URLConnectionClientHandler.handle(URLConnectionClientHandler.java:147)
+	... 9 more
+
+{noformat}
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1957">YARN-1957</a>.
+     Major sub-task reported by Carlo Curino and fixed by Carlo Curino (resourcemanager)<br>
+     <b>ProportionalCapacitPreemptionPolicy handling of corner cases...</b><br>
+     <blockquote>The current version of ProportionalCapacityPreemptionPolicy should be improved to deal with the following two scenarios:
+1) when rebalancing over-capacity allocations, it potentially preempts without considering the maxCapacity constraints of a queue (i.e., preempting possibly more than strictly necessary)
+2) a zero capacity queue is preempted even if there is no demand (coherent with old use of zero-capacity to disabled queues)
+
+The proposed patch fixes both issues, and introduce few new test cases.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1947">YARN-1947</a>.
+     Major test reported by Jian He and fixed by Jian He <br>
+     <b>TestRMDelegationTokens#testRMDTMasterKeyStateOnRollingMasterKey is failing intermittently</b><br>
+     <blockquote>java.lang.AssertionError: null
+	at org.junit.Assert.fail(Assert.java:92)
+	at org.junit.Assert.assertTrue(Assert.java:43)
+	at org.junit.Assert.assertTrue(Assert.java:54)
+	at org.apache.hadoop.yarn.server.resourcemanager.security.TestRMDelegationTokens.testRMDTMasterKeyStateOnRollingMasterKey(TestRMDelegationTokens.java:117)</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1934">YARN-1934</a>.
+     Blocker bug reported by Rohith and fixed by Karthik Kambatla (resourcemanager)<br>
+     <b>Potential NPE in ZKRMStateStore caused by handling Disconnected event from ZK.</b><br>
+     <blockquote>For ZK disconnected event , zkClient is set to null. It is very much prone to throw NPE.
+
+{noformat}
+        case Disconnected:
+          LOG.info("ZKRMStateStore Session disconnected");
+          oldZkClient = zkClient;
+          zkClient = null;
+          break;
+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1933">YARN-1933</a>.
+     Major bug reported by Jian He and fixed by Jian He <br>
+     <b>TestAMRestart and TestNodeHealthService failing sometimes on Windows</b><br>
+     <blockquote>TestNodeHealthService failures:
+testNodeHealthScript(org.apache.hadoop.yarn.server.nodemanager.TestNodeHealthService)  Time elapsed: 1.405 sec  &lt;&lt;&lt; ERROR!
+java.io.FileNotFoundException: C:\Users\Administrator\Documents\hadoop-common\hadoop-yarn-project\hadoop-yarn\hadoop-yarn-server\hadoop-yarn-server-nodemanager\target\org.apache.hadoop.yarn.server.nodemanager.TestNodeHealthService-localDir\failingscript.cmd (The process cannot access the file because it is being used by another process)
+	at java.io.FileOutputStream.open(Native Method)
+	at java.io.FileOutputStream.&lt;init&gt;(FileOutputStream.java:221)
+	at java.io.FileOutputStream.&lt;init&gt;(FileOutputStream.java:171)
+	at org.apache.hadoop.yarn.server.nodemanager.TestNodeHealthService.writeNodeHealthScriptFile(TestNodeHealthService.java:82)
+	at org.apache.hadoop.yarn.server.nodemanager.TestNodeHealthService.testNodeHealthScript(TestNodeHealthService.java:154)
+
+testNodeHealthScriptShouldRun(org.apache.hadoop.yarn.server.nodemanager.TestNodeHealthService)  Time elapsed: 0 sec  &lt;&lt;&lt; ERROR!
+java.io.FileNotFoundException: C:\Users\Administrator\Documents\hadoop-common\hadoop-yarn-project\hadoop-yarn\hadoop-yarn-server\hadoop-yarn-server-nodemanager\target\org.apache.hadoop.yarn.server.nodemanager.TestNodeHealthService-localDir\failingscript.cmd (Access is denied)
+	at java.io.FileOutputStream.open(Native Method)
+	at java.io.FileOutputStream.&lt;init&gt;(FileOutputStream.java:221)
+	at java.io.FileOutputStream.&lt;init&gt;(FileOutputStream.java:171)
+	at org.apache.hadoop.yarn.server.nodemanager.TestNodeHealthService.writeNodeHealthScriptFile(TestNodeHealthService.java:82)
+	at org.apache.hadoop.yarn.server.nodemanager.TestNodeHealthService.testNodeHealthScriptShouldRun(TestNodeHealthService.java:103)
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1932">YARN-1932</a>.
+     Blocker bug reported by Mit Desai and fixed by Mit Desai <br>
+     <b>Javascript injection on the job status page</b><br>
+     <blockquote>Scripts can be injected into the job status page as the diagnostics field is
+not sanitized. Whatever string you set there will show up to the jobs page as it is ... ie. if you put any script commands, they will be executed in the browser of the user who is opening the page.
+
+We need escaping the diagnostic string in order to not run the scripts.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1931">YARN-1931</a>.
+     Blocker bug reported by Thomas Graves and fixed by Sandy Ryza (applications)<br>
+     <b>Private API change in YARN-1824 in 2.4 broke compatibility with previous releases</b><br>
+     <blockquote>YARN-1824 broke compatibility with previous 2.x releases by changes the API's in org.apache.hadoop.yarn.util.Apps.{setEnvFromInputString,addToEnvironment}  The old api should be added back in.
+
+This affects any ApplicationMasters who were using this api.  It also breaks previously built MapReduce libraries from working with the new Yarn release as MR uses this api. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1929">YARN-1929</a>.
+     Blocker bug reported by Rohith and fixed by Karthik Kambatla (resourcemanager)<br>
+     <b>DeadLock in RM when automatic failover is enabled.</b><br>
+     <blockquote>Dead lock detected  in RM when automatic failover is enabled.
+
+
+{noformat}
+Found one Java-level deadlock:
+=============================
+"Thread-2":
+  waiting to lock monitor 0x00007fb514303cf0 (object 0x00000000ef153fd0, a org.apache.hadoop.ha.ActiveStandbyElector),
+  which is held by "main-EventThread"
+"main-EventThread":
+  waiting to lock monitor 0x00007fb514750a48 (object 0x00000000ef154020, a org.apache.hadoop.yarn.server.resourcemanager.EmbeddedElectorService),
+  which is held by "Thread-2"
+{noformat}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1928">YARN-1928</a>.
+     Major bug reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>TestAMRMRPCNodeUpdates fails ocassionally</b><br>
+     <blockquote>{code}
+junit.framework.AssertionFailedError: expected:&lt;0&gt; but was:&lt;4&gt;
+	at junit.framework.Assert.fail(Assert.java:50)
+	at junit.framework.Assert.failNotEquals(Assert.java:287)
+	at junit.framework.Assert.assertEquals(Assert.java:67)
+	at junit.framework.Assert.assertEquals(Assert.java:199)
+	at junit.framework.Assert.assertEquals(Assert.java:205)
+	at org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager.TestAMRMRPCNodeUpdates.testAMRMUnusableNodes(TestAMRMRPCNodeUpdates.java:136)
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1926">YARN-1926</a>.
+     Major bug reported by Varun Vasudev and fixed by Varun Vasudev <br>
+     <b>DistributedShell unit tests fail on Windows</b><br>
+     <blockquote>Couple of unit tests for the DistributedShell fail on Windows - specifically testDSShellWithShellScript and testDSRestartWithPreviousRunningContainers </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1924">YARN-1924</a>.
+     Critical bug reported by Arpit Gupta and fixed by Jian He <br>
+     <b>STATE_STORE_OP_FAILED happens when ZKRMStateStore tries to update app(attempt) before storing it</b><br>
+     <blockquote>Noticed on a HA cluster Both RM shut down with this error. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1920">YARN-1920</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli <br>
+     <b>TestFileSystemApplicationHistoryStore.testMissingApplicationAttemptHistoryData fails in windows</b><br>
+     <blockquote>Though this was only failing in Windows, after debugging, I realized that the test fails because we are leaking a file-handle in the history service.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1914">YARN-1914</a>.
+     Major bug reported by Varun Vasudev and fixed by Varun Vasudev <br>
+     <b>Test TestFSDownload.testDownloadPublicWithStatCache fails on Windows</b><br>
+     <blockquote>The TestFSDownload.testDownloadPublicWithStatCache test in hadoop-yarn-common consistently fails on Windows environments.
+
+The root cause is that the test checks for execute permission for all users on every ancestor of the target directory. In windows, by default, group "Everyone" has no permissions on any directory in the install drive. It's unreasonable to expect this test to pass and we should skip it on Windows.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1910">YARN-1910</a>.
+     Major bug reported by Xuan Gong and fixed by Xuan Gong <br>
+     <b>TestAMRMTokens fails on windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1908">YARN-1908</a>.
+     Major bug reported by Tassapol Athiapinya and fixed by Vinod Kumar Vavilapalli (applications/distributed-shell)<br>
+     <b>Distributed shell with custom script has permission error.</b><br>
+     <blockquote>Create test1.sh having "pwd".
+
+Run this command as user1:
+hadoop jar /usr/lib/hadoop-yarn/hadoop-yarn-applications-distributedshell.jar -jar /usr/lib/hadoop-yarn/hadoop-yarn-applications-distributedshell.jar -shell_script test1.sh
+
+NM is run by yarn user. An exception is thrown because yarn user has no permissions on custom script in hdfs path. The custom script is created with distributed shell app.
+{code}
+Caused by: org.apache.hadoop.ipc.RemoteException(org.apache.hadoop.security.AccessControlException): Permission denied: user=yarn, access=WRITE, inode="/user/user1/DistributedShell/70":user1:user1:drwxr-xr-x
+	at org.apache.hadoop.hdfs.server.namenode.FSPermissionChecker.checkFsPermission(FSPermissionChecker.java:265)
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1907">YARN-1907</a>.
+     Major bug reported by Mit Desai and fixed by Mit Desai <br>
+     <b>TestRMApplicationHistoryWriter#testRMWritingMassiveHistory runs slow and intermittently fails</b><br>
+     <blockquote>The test has 10000 containers that it tries to cleanup.
+The cleanup has a timeout of 20000ms in which the test sometimes cannot do the cleanup completely and gives out an Assertion Failure.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1905">YARN-1905</a>.
+     Trivial test reported by Chris Nauroth and fixed by Chris Nauroth (nodemanager)<br>
+     <b>TestProcfsBasedProcessTree must only run on Linux.</b><br>
+     <blockquote>The tests in {{TestProcfsBasedProcessTree}} only make sense on Linux, where the process tree calculations are based on reading the /proc file system.  Right now, not all of the individual tests are skipped when the OS is not Linux.  This patch will make it consistent.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1903">YARN-1903</a>.
+     Major bug reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>Killing Container on NEW and LOCALIZING will result in exitCode and diagnostics not set</b><br>
+     <blockquote>The container status after stopping container is not expected.
+{code}
+java.lang.AssertionError: 4: 
+	at org.junit.Assert.fail(Assert.java:93)
+	at org.junit.Assert.assertTrue(Assert.java:43)
+	at org.apache.hadoop.yarn.client.api.impl.TestNMClient.testGetContainerStatus(TestNMClient.java:382)
+	at org.apache.hadoop.yarn.client.api.impl.TestNMClient.testContainerManagement(TestNMClient.java:346)
+	at org.apache.hadoop.yarn.client.api.impl.TestNMClient.testNMClient(TestNMClient.java:226)
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1898">YARN-1898</a>.
+     Major sub-task reported by Yesha Vora and fixed by Xuan Gong (resourcemanager)<br>
+     <b>Standby RM's conf, stacks, logLevel, metrics, jmx and logs links are redirecting to Active RM</b><br>
+     <blockquote>Standby RM links /conf, /stacks, /logLevel, /metrics, /jmx is redirected to Active RM.
+
+It should not be redirected to Active RM</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1892">YARN-1892</a>.
+     Minor improvement reported by Siddharth Seth and fixed by Jian He (scheduler)<br>
+     <b>Excessive logging in RM</b><br>
+     <blockquote>Mostly in the CS I believe
+
+{code}
+ INFO org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt: Application application_1395435468498_0011 reserved container container_1395435468498_0011_01_000213 on node host:  #containers=5 available=4096 used=20960, currently has 1 at priority 4; currentReservation 4096
+{code}
+
+{code}
+INFO org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue: hive2 usedResources: &lt;memory:20480, vCores:5&gt; clusterResources: &lt;memory:81920, vCores:16&gt; currentCapacity 0.25 required &lt;memory:4096, vCores:1&gt; potentialNewCapacity: 0.255 (  max-capacity: 0.25)
+{code}
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1883">YARN-1883</a>.
+     Major bug reported by Mit Desai and fixed by Mit Desai <br>
+     <b>TestRMAdminService fails due to inconsistent entries in UserGroups</b><br>
+     <blockquote>testRefreshUserToGroupsMappingsWithFileSystemBasedConfigurationProvider fails with the following error:
+{noformat}
+java.lang.AssertionError: null
+	at org.junit.Assert.fail(Assert.java:92)
+	at org.junit.Assert.assertTrue(Assert.java:43)
+	at org.junit.Assert.assertTrue(Assert.java:54)
+	at org.apache.hadoop.yarn.server.resourcemanager.TestRMAdminService.testRefreshUserToGroupsMappingsWithFileSystemBasedConfigurationProvider(TestRMAdminService.java:421)
+	at org.apache.hadoop.yarn.server.resourcemanager.TestRMAdminService.testOrder(TestRMAdminService.java:104)
+{noformat}
+
+Line Numbers will be inconsistent as I was testing to run it in a particular order. But the Line on which the failure occurs is
+{code}
+Assert.assertTrue(groupBefore.contains("test_group_A")
+        &amp;&amp; groupBefore.contains("test_group_B")
+        &amp;&amp; groupBefore.contains("test_group_C") &amp;&amp; groupBefore.size() == 3);
+{code}
+
+testRMInitialsWithFileSystemBasedConfigurationProvider() and
+testRefreshUserToGroupsMappingsWithFileSystemBasedConfigurationProvider()
+calls the function {{MockUnixGroupsMapping.updateGroups();}} which changes the list of userGroups.
+
+testRefreshUserToGroupsMappingsWithFileSystemBasedConfigurationProvider() tries to verify the groups before changing it and fails if testRMInitialsWithFileSystemBasedConfigurationProvider() already ran and made the changes.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1861">YARN-1861</a>.
+     Blocker sub-task reported by Arpit Gupta and fixed by Karthik Kambatla (resourcemanager)<br>
+     <b>Both RM stuck in standby mode when automatic failover is enabled</b><br>
+     <blockquote>In our HA tests we noticed that the tests got stuck because both RM's got into standby state and no one became active.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1837">YARN-1837</a>.
+     Major bug reported by Tsuyoshi OZAWA and fixed by Hong Zhiguo <br>
+     <b>TestMoveApplication.testMoveRejectedByScheduler randomly fails</b><br>
+     <blockquote>TestMoveApplication#testMoveRejectedByScheduler fails because of NullPointerException. It looks caused by unhandled exception handling at server-side.</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1750">YARN-1750</a>.
+     Major test reported by Ming Ma and fixed by Wangda Tan (nodemanager)<br>
+     <b>TestNodeStatusUpdater#testNMRegistration is incorrect in test case</b><br>
+     <blockquote>This test case passes. However, the test output log has
+
+java.lang.AssertionError: Number of applications should only be one! expected:&lt;1&gt; but was:&lt;2&gt;
+        at org.junit.Assert.fail(Assert.java:93)
+        at org.junit.Assert.failNotEquals(Assert.java:647)
+        at org.junit.Assert.assertEquals(Assert.java:128)
+        at org.junit.Assert.assertEquals(Assert.java:472)
+        at org.apache.hadoop.yarn.server.nodemanager.TestNodeStatusUpdater$MyResourceTracker.nodeHeartbeat(TestNodeStatusUpdater.java:267)
+        at org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl$1.run(NodeStatusUpdaterImpl.java:469)
+        at java.lang.Thread.run(Thread.java:695)
+
+TestNodeStatusUpdater.java has invalid asserts.
+
+      } else if (heartBeatID == 3) {
+        // Checks on the RM end
+        Assert.assertEquals("Number of applications should only be one!", 1,
+            appToContainers.size());
+        Assert.assertEquals("Number of container for the app should be two!",
+            2, appToContainers.get(appId2).size());
+
+
+We should fix the assert and add more check to the test.
+
+
+</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1701">YARN-1701</a>.
+     Major sub-task reported by Gera Shegalov and fixed by Tsuyoshi OZAWA <br>
+     <b>Improve default paths of timeline store and generic history store</b><br>
+     <blockquote>When I enable AHS via yarn.ahs.enabled, the app history is still not visible in AHS webUI. This is due to NullApplicationHistoryStore as yarn.resourcemanager.history-writer.class. It would be good to have just one key to enable basic functionality.
+
+yarn.ahs.fs-history-store.uri uses {code}${hadoop.log.dir}{code}, which is local file system location. However, FileSystemApplicationHistoryStore uses DFS by default.  </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1696">YARN-1696</a>.
+     Blocker sub-task reported by Karthik Kambatla and fixed by Tsuyoshi OZAWA (resourcemanager)<br>
+     <b>Document RM HA</b><br>
+     <blockquote>Add documentation for RM HA. Marking this a blocker for 2.4 as this is required to call RM HA Stable and ready for public consumption. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1281">YARN-1281</a>.
+     Major test reported by Karthik Kambatla and fixed by Tsuyoshi OZAWA (resourcemanager)<br>
+     <b>TestZKRMStateStoreZKClientConnections fails intermittently</b><br>
+     <blockquote>The test fails intermittently - haven't been able to reproduce the failure deterministically. </blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/YARN-1201">YARN-1201</a>.
+     Minor bug reported by Nemon Lou and fixed by Wangda Tan (resourcemanager)<br>
+     <b>TestAMAuthorization fails with local hostname cannot be resolved</b><br>
+     <blockquote>When hostname is 158-1-131-10, TestAMAuthorization fails.
+{code}
+Running org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization
+Tests run: 4, Failures: 0, Errors: 2, Skipped: 0, Time elapsed: 14.034 sec &lt;&lt;&lt; FAILURE! - in org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization
+testUnauthorizedAccess[0](org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization)  Time elapsed: 3.952 sec  &lt;&lt;&lt; ERROR!
+java.lang.NullPointerException: null
+        at org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.testUnauthorizedAccess(TestAMAuthorization.java:284)
+
+testUnauthorizedAccess[1](org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization)  Time elapsed: 3.116 sec  &lt;&lt;&lt; ERROR!
+java.lang.NullPointerException: null
+        at org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.testUnauthorizedAccess(TestAMAuthorization.java:284)
+
+
+Results :
+
+Tests in error:
+  TestAMAuthorization.testUnauthorizedAccess:284 NullPointer
+  TestAMAuthorization.testUnauthorizedAccess:284 NullPointer
+
+Tests run: 4, Failures: 0, Errors: 2, Skipped: 0
+
+{code}</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5843">MAPREDUCE-5843</a>.
+     Major test reported by Varun Vasudev and fixed by Varun Vasudev <br>
+     <b>TestMRKeyValueTextInputFormat failing on Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5841">MAPREDUCE-5841</a>.
+     Major bug reported by Sangjin Lee and fixed by Sangjin Lee (mrv2)<br>
+     <b>uber job doesn't terminate on getting mapred job kill</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5835">MAPREDUCE-5835</a>.
+     Critical bug reported by Ming Ma and fixed by Ming Ma <br>
+     <b>Killing Task might cause the job to go to ERROR state</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5833">MAPREDUCE-5833</a>.
+     Major test reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>TestRMContainerAllocator fails ocassionally</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5832">MAPREDUCE-5832</a>.
+     Major bug reported by Jian He and fixed by Vinod Kumar Vavilapalli <br>
+     <b>Few tests in TestJobClient fail on Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5828">MAPREDUCE-5828</a>.
+     Major bug reported by Vinod Kumar Vavilapalli and fixed by Vinod Kumar Vavilapalli <br>
+     <b>TestMapReduceJobControl fails on JDK 7 + Windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5827">MAPREDUCE-5827</a>.
+     Major bug reported by Zhijie Shen and fixed by Zhijie Shen <br>
+     <b>TestSpeculativeExecutionWithMRApp fails</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5826">MAPREDUCE-5826</a>.
+     Major bug reported by Varun Vasudev and fixed by Varun Vasudev <br>
+     <b>TestHistoryServerFileSystemStateStoreService.testTokenStore fails in windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5824">MAPREDUCE-5824</a>.
+     Major bug reported by Xuan Gong and fixed by Xuan Gong <br>
+     <b>TestPipesNonJavaInputFormat.testFormat fails in windows</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5821">MAPREDUCE-5821</a>.
+     Major bug reported by Todd Lipcon and fixed by Todd Lipcon (performance , task)<br>
+     <b>IFile merge allocates new byte array for every value</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5818">MAPREDUCE-5818</a>.
+     Major bug reported by Jian He and fixed by Jian He <br>
+     <b>hsadmin cmd is missing in mapred.cmd</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5815">MAPREDUCE-5815</a>.
+     Blocker bug reported by Gera Shegalov and fixed by Akira AJISAKA (client , mrv2)<br>
+     <b>Fix NPE in TestMRAppMaster</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-5714">MAPREDUCE-5714</a>.
+     Major bug reported by Jinghui Wang and fixed by Jinghui Wang (test)<br>
+     <b>TestMRAppComponentDependencies causes surefire to exit without saying proper goodbye</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/MAPREDUCE-3191">MAPREDUCE-3191</a>.
+     Trivial bug reported by Todd Lipcon and fixed by Chen He <br>
+     <b>docs for map output compression incorrectly reference SequenceFile</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6411">HDFS-6411</a>.
+     Major bug reported by Zhongyi Xie and fixed by Brandon Li (nfs)<br>
+     <b>nfs-hdfs-gateway mount raises I/O error and hangs when a unauthorized user attempts to access it</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6402">HDFS-6402</a>.
+     Trivial bug reported by Chris Nauroth and fixed by Chris Nauroth (namenode)<br>
+     <b>Suppress findbugs warning for failure to override equals and hashCode in FsAclPermission.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6397">HDFS-6397</a>.
+     Critical bug reported by Mohammad Kamrul Islam and fixed by Mohammad Kamrul Islam <br>
+     <b>NN shows inconsistent value in deadnode count </b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6362">HDFS-6362</a>.
+     Blocker bug reported by Arpit Agarwal and fixed by Arpit Agarwal (namenode)<br>
+     <b>InvalidateBlocks is inconsistent in usage of DatanodeUuid and StorageID</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6361">HDFS-6361</a>.
+     Major bug reported by Yongjun Zhang and fixed by Yongjun Zhang (nfs)<br>
+     <b>TestIdUserGroup.testUserUpdateSetting failed due to out of range nfsnobody Id</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6340">HDFS-6340</a>.
+     Blocker bug reported by Rahul Singhal and fixed by Rahul Singhal (datanode)<br>
+     <b>DN can't finalize upgrade</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6329">HDFS-6329</a>.
+     Blocker bug reported by Kihwal Lee and fixed by Kihwal Lee <br>
+     <b>WebHdfs does not work if HA is enabled on NN but logical URI is not configured.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6326">HDFS-6326</a>.
+     Blocker bug reported by Daryn Sharp and fixed by Chris Nauroth (webhdfs)<br>
+     <b>WebHdfs ACL compatibility is broken</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6325">HDFS-6325</a>.
+     Major bug reported by Konstantin Shvachko and fixed by Keith Pak (namenode)<br>
+     <b>Append should fail if the last block has insufficient number of replicas</b><br>
+     <blockquote>I have committed the fix to the trunk, branch-2, and branch-2.4 respectively. Thanks Keith!</blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6313">HDFS-6313</a>.
+     Blocker bug reported by Daryn Sharp and fixed by Kihwal Lee (webhdfs)<br>
+     <b>WebHdfs may use the wrong NN when configured for multiple HA NNs</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6245">HDFS-6245</a>.
+     Major bug reported by Arpit Gupta and fixed by Arpit Agarwal <br>
+     <b>datanode fails to start with a bad disk even when failed volumes is set</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6236">HDFS-6236</a>.
+     Minor bug reported by Chris Nauroth and fixed by Chris Nauroth (namenode)<br>
+     <b>ImageServlet should use Time#monotonicNow to measure latency.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6235">HDFS-6235</a>.
+     Trivial bug reported by Chris Nauroth and fixed by Chris Nauroth (namenode , test)<br>
+     <b>TestFileJournalManager can fail on Windows due to file locking if tests run out of order.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6234">HDFS-6234</a>.
+     Trivial bug reported by Chris Nauroth and fixed by Chris Nauroth (datanode , test)<br>
+     <b>TestDatanodeConfig#testMemlockLimit fails on Windows due to invalid file path.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6232">HDFS-6232</a>.
+     Major bug reported by Stephen Chu and fixed by Akira AJISAKA (tools)<br>
+     <b>OfflineEditsViewer throws a NPE on edits containing ACL modifications</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6231">HDFS-6231</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (hdfs-client)<br>
+     <b>DFSClient hangs infinitely if using hedged reads and all eligible datanodes die.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6229">HDFS-6229</a>.
+     Major bug reported by Jing Zhao and fixed by Jing Zhao (ha)<br>
+     <b>Race condition in failover can cause RetryCache fail to work</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6215">HDFS-6215</a>.
+     Minor bug reported by Kihwal Lee and fixed by Kihwal Lee <br>
+     <b>Wrong error message for upgrade</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6209">HDFS-6209</a>.
+     Minor bug reported by Arpit Agarwal and fixed by Arpit Agarwal (test)<br>
+     <b>Fix flaky test TestValidateConfigurationSettings.testThatDifferentRPCandHttpPortsAreOK</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6208">HDFS-6208</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (datanode)<br>
+     <b>DataNode caching can leak file descriptors.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6206">HDFS-6206</a>.
+     Major bug reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze <br>
+     <b>DFSUtil.substituteForWildcardAddress may throw NPE</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6204">HDFS-6204</a>.
+     Minor bug reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (test)<br>
+     <b>TestRBWBlockInvalidation may fail</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6198">HDFS-6198</a>.
+     Major bug reported by Chris Nauroth and fixed by Chris Nauroth (datanode)<br>
+     <b>DataNode rolling upgrade does not correctly identify current block pool directory and replace with trash on Windows.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6197">HDFS-6197</a>.
+     Minor bug reported by Chris Nauroth and fixed by Chris Nauroth (namenode)<br>
+     <b>Rolling upgrade rollback on Windows can fail attempting to rename edit log segment files to a destination that already exists.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-6189">HDFS-6189</a>.
+     Major test reported by Chris Nauroth and fixed by Chris Nauroth (test)<br>
+     <b>Multiple HDFS tests fail on Windows attempting to use a test root path containing a colon.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-4052">HDFS-4052</a>.
+     Minor improvement reported by Jing Zhao and fixed by Jing Zhao <br>
+     <b>BlockManager#invalidateWork should print logs outside the lock</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HDFS-2882">HDFS-2882</a>.
+     Major bug reported by Todd Lipcon and fixed by Vinayakumar B (datanode)<br>
+     <b>DN continues to start up, even if block pool fails to initialize</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10612">HADOOP-10612</a>.
+     Major bug reported by Brandon Li and fixed by Brandon Li (nfs)<br>
+     <b>NFS failed to refresh the user group id mapping table</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10562">HADOOP-10562</a>.
+     Critical bug reported by Suresh Srinivas and fixed by Suresh Srinivas <br>
+     <b>Namenode exits on exception without printing stack trace in AbstractDelegationTokenSecretManager</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10527">HADOOP-10527</a>.
+     Major bug reported by Kihwal Lee and fixed by Kihwal Lee <br>
+     <b>Fix incorrect return code and allow more retries on EINTR</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10522">HADOOP-10522</a>.
+     Critical bug reported by Kihwal Lee and fixed by Kihwal Lee <br>
+     <b>JniBasedUnixGroupMapping mishandles errors</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10490">HADOOP-10490</a>.
+     Minor bug reported by Chris Nauroth and fixed by Chris Nauroth (test)<br>
+     <b>TestMapFile and TestBloomMapFile leak file descriptors.</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10473">HADOOP-10473</a>.
+     Minor bug reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (test)<br>
+     <b>TestCallQueueManager is still flaky</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10466">HADOOP-10466</a>.
+     Minor improvement reported by Nicolas Liochon and fixed by Nicolas Liochon (security)<br>
+     <b>Lower the log level in UserGroupInformation</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10456">HADOOP-10456</a>.
+     Major bug reported by Nishkam Ravi and fixed by Nishkam Ravi (conf)<br>
+     <b>Bug in Configuration.java exposed by Spark (ConcurrentModificationException)</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-10455">HADOOP-10455</a>.
+     Major bug reported by Tsz Wo Nicholas Sze and fixed by Tsz Wo Nicholas Sze (ipc)<br>
+     <b>When there is an exception, ipc.Server should first check whether it is an terse exception</b><br>
+     <blockquote></blockquote></li>
+<li> <a href="https://issues.apache.org/jira/browse/HADOOP-8826">HADOOP-8826</a>.
+     Minor bug reported by Robert Joseph Evans and fixed by Mit Desai <br>
+     <b>Docs still refer to 0.20.205 as stable line</b><br>
+     <blockquote></blockquote></li>
+</ul>
+</body></html>
+<META http-equiv="Content-Type" content="text/html; charset=UTF-8">
 <title>Hadoop  2.4.0 Release Notes</title>
 <STYLE type="text/css">
 	H1 {font-family: sans-serif}

+ 3 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java

@@ -142,6 +142,9 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   public static final String
   HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_CALLQUEUE =
       "security.refresh.callqueue.protocol.acl";
+  public static final String
+  HADOOP_SECURITY_SERVICE_AUTHORIZATION_GENERIC_REFRESH =
+      "security.refresh.generic.protocol.acl";
   public static final String 
   SECURITY_HA_SERVICE_PROTOCOL_ACL = "security.ha.service.protocol.acl";
   public static final String 

+ 27 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -2804,7 +2804,7 @@ public abstract class FileSystem extends Configured implements Closeable {
      * be perceived as atomic with respect to other threads, which is all we
      * need.
      */
-    private static class StatisticsData {
+    public static class StatisticsData {
       volatile long bytesRead;
       volatile long bytesWritten;
       volatile int readOps;
@@ -2849,6 +2849,26 @@ public abstract class FileSystem extends Configured implements Closeable {
             + readOps + " read ops, " + largeReadOps + " large read ops, "
             + writeOps + " write ops";
       }
+      
+      public long getBytesRead() {
+        return bytesRead;
+      }
+      
+      public long getBytesWritten() {
+        return bytesWritten;
+      }
+      
+      public int getReadOps() {
+        return readOps;
+      }
+      
+      public int getLargeReadOps() {
+        return largeReadOps;
+      }
+      
+      public int getWriteOps() {
+        return writeOps;
+      }
     }
 
     private interface StatisticsAggregator<T> {
@@ -2907,7 +2927,7 @@ public abstract class FileSystem extends Configured implements Closeable {
     /**
      * Get or create the thread-local data associated with the current thread.
      */
-    private StatisticsData getThreadData() {
+    public StatisticsData getThreadStatistics() {
       StatisticsData data = threadData.get();
       if (data == null) {
         data = new StatisticsData(
@@ -2928,7 +2948,7 @@ public abstract class FileSystem extends Configured implements Closeable {
      * @param newBytes the additional bytes read
      */
     public void incrementBytesRead(long newBytes) {
-      getThreadData().bytesRead += newBytes;
+      getThreadStatistics().bytesRead += newBytes;
     }
     
     /**
@@ -2936,7 +2956,7 @@ public abstract class FileSystem extends Configured implements Closeable {
      * @param newBytes the additional bytes written
      */
     public void incrementBytesWritten(long newBytes) {
-      getThreadData().bytesWritten += newBytes;
+      getThreadStatistics().bytesWritten += newBytes;
     }
     
     /**
@@ -2944,7 +2964,7 @@ public abstract class FileSystem extends Configured implements Closeable {
      * @param count number of read operations
      */
     public void incrementReadOps(int count) {
-      getThreadData().readOps += count;
+      getThreadStatistics().readOps += count;
     }
 
     /**
@@ -2952,7 +2972,7 @@ public abstract class FileSystem extends Configured implements Closeable {
      * @param count number of large read operations
      */
     public void incrementLargeReadOps(int count) {
-      getThreadData().largeReadOps += count;
+      getThreadStatistics().largeReadOps += count;
     }
 
     /**
@@ -2960,7 +2980,7 @@ public abstract class FileSystem extends Configured implements Closeable {
      * @param count number of write operations
      */
     public void incrementWriteOps(int count) {
-      getThreadData().writeOps += count;
+      getThreadStatistics().writeOps += count;
     }
 
     /**

+ 46 - 14
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java

@@ -23,6 +23,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.LinkedList;
 
+import org.apache.commons.lang.WordUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -31,6 +32,7 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.shell.Command;
 import org.apache.hadoop.fs.shell.CommandFactory;
 import org.apache.hadoop.fs.shell.FsCommand;
+import org.apache.hadoop.tools.TableListing;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -40,6 +42,8 @@ public class FsShell extends Configured implements Tool {
   
   static final Log LOG = LogFactory.getLog(FsShell.class);
 
+  private static final int MAX_LINE_WIDTH = 80;
+
   private FileSystem fs;
   private Trash trash;
   protected CommandFactory commandFactory;
@@ -117,7 +121,7 @@ public class FsShell extends Configured implements Tool {
     public static final String NAME = "usage";
     public static final String USAGE = "[cmd ...]";
     public static final String DESCRIPTION =
-      "Displays the usage for given command or all commands if none\n" +
+      "Displays the usage for given command or all commands if none " +
       "is specified.";
     
     @Override
@@ -137,7 +141,7 @@ public class FsShell extends Configured implements Tool {
     public static final String NAME = "help";
     public static final String USAGE = "[cmd ...]";
     public static final String DESCRIPTION =
-      "Displays help for given command or all commands if none\n" +
+      "Displays help for given command or all commands if none " +
       "is specified.";
     
     @Override
@@ -197,7 +201,7 @@ public class FsShell extends Configured implements Tool {
       for (String name : commandFactory.getNames()) {
         Command instance = commandFactory.getInstance(name);
         if (!instance.isDeprecated()) {
-          System.out.println("\t[" + instance.getUsage() + "]");
+          out.println("\t[" + instance.getUsage() + "]");
           instances.add(instance);
         }
       }
@@ -217,20 +221,48 @@ public class FsShell extends Configured implements Tool {
     out.println(usagePrefix + " " + instance.getUsage());
   }
 
-  // TODO: will eventually auto-wrap the text, but this matches the expected
-  // output for the hdfs tests...
   private void printInstanceHelp(PrintStream out, Command instance) {
-    boolean firstLine = true;
+    out.println(instance.getUsage() + " :");
+    TableListing listing = null;
+    final String prefix = "  ";
     for (String line : instance.getDescription().split("\n")) {
-      String prefix;
-      if (firstLine) {
-        prefix = instance.getUsage() + ":\t";
-        firstLine = false;
-      } else {
-        prefix = "\t\t";
+      if (line.matches("^[ \t]*[-<].*$")) {
+        String[] segments = line.split(":");
+        if (segments.length == 2) {
+          if (listing == null) {
+            listing = createOptionTableListing();
+          }
+          listing.addRow(segments[0].trim(), segments[1].trim());
+          continue;
+        }
+      }
+
+      // Normal literal description.
+      if (listing != null) {
+        for (String listingLine : listing.toString().split("\n")) {
+          out.println(prefix + listingLine);
+        }
+        listing = null;
+      }
+
+      for (String descLine : WordUtils.wrap(
+          line, MAX_LINE_WIDTH, "\n", true).split("\n")) {
+        out.println(prefix + descLine);
+      }
+    }
+
+    if (listing != null) {
+      for (String listingLine : listing.toString().split("\n")) {
+        out.println(prefix + listingLine);
       }
-      System.out.println(prefix + line);
-    }    
+    }
+  }
+
+  // Creates a two-row table, the first row is for the command line option,
+  // the second row is for the option description.
+  private TableListing createOptionTableListing() {
+    return new TableListing.Builder().addField("").addField("", true)
+        .wrapWidth(MAX_LINE_WIDTH).build();
   }
 
   /**

+ 24 - 24
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShellPermissions.java

@@ -63,18 +63,18 @@ public class FsShellPermissions extends FsCommand {
     public static final String NAME = "chmod";
     public static final String USAGE = "[-R] <MODE[,MODE]... | OCTALMODE> PATH...";
     public static final String DESCRIPTION =
-      "Changes permissions of a file.\n" +
-      "\tThis works similar to shell's chmod with a few exceptions.\n\n" +
-      "-R\tmodifies the files recursively. This is the only option\n" +
-      "\tcurrently supported.\n\n" +
-      "MODE\tMode is same as mode used for chmod shell command.\n" +
-      "\tOnly letters recognized are 'rwxXt'. E.g. +t,a+r,g-w,+rwx,o=r\n\n" +
-      "OCTALMODE Mode specifed in 3 or 4 digits. If 4 digits, the first may\n" +
-      "be 1 or 0 to turn the sticky bit on or off, respectively.  Unlike " +
-      "shell command, it is not possible to specify only part of the mode\n" +
-      "\tE.g. 754 is same as u=rwx,g=rx,o=r\n\n" +
-      "\tIf none of 'augo' is specified, 'a' is assumed and unlike\n" +
-      "\tshell command, no umask is applied.";
+      "Changes permissions of a file. " +
+      "This works similar to the shell's chmod command with a few exceptions.\n" +
+      "-R: modifies the files recursively. This is the only option" +
+      " currently supported.\n" +
+      "<MODE>: Mode is the same as mode used for the shell's command. " +
+      "The only letters recognized are 'rwxXt', e.g. +t,a+r,g-w,+rwx,o=r.\n" +
+      "<OCTALMODE>: Mode specifed in 3 or 4 digits. If 4 digits, the first " +
+      "may be 1 or 0 to turn the sticky bit on or off, respectively.  Unlike " +
+      "the shell command, it is not possible to specify only part of the " +
+      "mode, e.g. 754 is same as u=rwx,g=rx,o=r.\n\n" +
+      "If none of 'augo' is specified, 'a' is assumed and unlike the " +
+      "shell command, no umask is applied.";
 
     protected ChmodParser pp;
 
@@ -121,18 +121,18 @@ public class FsShellPermissions extends FsCommand {
     public static final String NAME = "chown";
     public static final String USAGE = "[-R] [OWNER][:[GROUP]] PATH...";
     public static final String DESCRIPTION =
-      "Changes owner and group of a file.\n" +
-      "\tThis is similar to shell's chown with a few exceptions.\n\n" +
-      "\t-R\tmodifies the files recursively. This is the only option\n" +
-      "\tcurrently supported.\n\n" +
-      "\tIf only owner or group is specified then only owner or\n" +
-      "\tgroup is modified.\n\n" +
-      "\tThe owner and group names may only consist of digits, alphabet,\n"+
-      "\tand any of " + allowedChars + ". The names are case sensitive.\n\n" +
-      "\tWARNING: Avoid using '.' to separate user name and group though\n" +
-      "\tLinux allows it. If user names have dots in them and you are\n" +
-      "\tusing local file system, you might see surprising results since\n" +
-      "\tshell command 'chown' is used for local files.";
+      "Changes owner and group of a file. " +
+      "This is similar to the shell's chown command with a few exceptions.\n" +
+      "-R: modifies the files recursively. This is the only option " +
+      "currently supported.\n\n" +
+      "If only the owner or group is specified, then only the owner or " +
+      "group is modified. " +
+      "The owner and group names may only consist of digits, alphabet, "+
+      "and any of " + allowedChars + ". The names are case sensitive.\n\n" +
+      "WARNING: Avoid using '.' to separate user name and group though " +
+      "Linux allows it. If user names have dots in them and you are " +
+      "using local file system, you might see surprising results since " +
+      "the shell command 'chown' is used for local files.";
 
     ///allows only "allowedChars" above in names for owner and group
     static private final Pattern chownPattern = Pattern.compile(

+ 14 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclEntry.java

@@ -298,4 +298,18 @@ public class AclEntry {
     AclEntry aclEntry = builder.build();
     return aclEntry;
   }
+
+  /**
+   * Convert a List of AclEntries into a string - the reverse of parseAclSpec.
+   * @param aclSpec List of AclEntries to convert
+   * @return String representation of aclSpec
+   */
+  public static String aclSpecToString(List<AclEntry> aclSpec) {
+    StringBuilder buf = new StringBuilder();
+    for ( AclEntry e : aclSpec ) {
+      buf.append(e.toString());
+      buf.append(",");
+    }
+    return buf.substring(0, buf.length()-1);  // remove last ,
+  }
 }

+ 10 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/AclCommands.java

@@ -59,8 +59,8 @@ class AclCommands extends FsCommand {
     public static String DESCRIPTION = "Displays the Access Control Lists"
         + " (ACLs) of files and directories. If a directory has a default ACL,"
         + " then getfacl also displays the default ACL.\n"
-        + "-R: List the ACLs of all files and directories recursively.\n"
-        + "<path>: File or directory to list.\n";
+        + "  -R: List the ACLs of all files and directories recursively.\n"
+        + "  <path>: File or directory to list.\n";
 
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
@@ -153,19 +153,19 @@ class AclCommands extends FsCommand {
     public static String DESCRIPTION = "Sets Access Control Lists (ACLs)"
         + " of files and directories.\n" 
         + "Options:\n"
-        + "-b :Remove all but the base ACL entries. The entries for user,"
+        + "  -b :Remove all but the base ACL entries. The entries for user,"
         + " group and others are retained for compatibility with permission "
         + "bits.\n" 
-        + "-k :Remove the default ACL.\n"
-        + "-R :Apply operations to all files and directories recursively.\n"
-        + "-m :Modify ACL. New entries are added to the ACL, and existing"
+        + "  -k :Remove the default ACL.\n"
+        + "  -R :Apply operations to all files and directories recursively.\n"
+        + "  -m :Modify ACL. New entries are added to the ACL, and existing"
         + " entries are retained.\n"
-        + "-x :Remove specified ACL entries. Other ACL entries are retained.\n"
-        + "--set :Fully replace the ACL, discarding all existing entries."
+        + "  -x :Remove specified ACL entries. Other ACL entries are retained.\n"
+        + "  --set :Fully replace the ACL, discarding all existing entries."
         + " The <acl_spec> must include entries for user, group, and others"
         + " for compatibility with permission bits.\n"
-        + "<acl_spec>: Comma separated list of ACL entries.\n"
-        + "<path>: File or directory to modify.\n";
+        + "  <acl_spec>: Comma separated list of ACL entries.\n"
+        + "  <path>: File or directory to modify.\n";
 
     CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "b", "k", "R",
         "m", "x", "-set");

+ 83 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CommandWithDestination.java

@@ -22,7 +22,13 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.util.EnumSet;
+import java.util.Iterator;
 import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.NoSuchElementException;
 
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -34,6 +40,9 @@ import org.apache.hadoop.fs.PathIsDirectoryException;
 import org.apache.hadoop.fs.PathIsNotDirectoryException;
 import org.apache.hadoop.fs.PathNotFoundException;
 import org.apache.hadoop.fs.PathOperationException;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclUtil;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.IOUtils;
 
 /**
@@ -45,7 +54,6 @@ import org.apache.hadoop.io.IOUtils;
 abstract class CommandWithDestination extends FsCommand {  
   protected PathData dst;
   private boolean overwrite = false;
-  private boolean preserve = false;
   private boolean verifyChecksum = true;
   private boolean writeChecksum = true;
   
@@ -74,7 +82,54 @@ abstract class CommandWithDestination extends FsCommand {
    * implementation allows.
    */
   protected void setPreserve(boolean preserve) {
-    this.preserve = preserve;
+    if (preserve) {
+      preserve(FileAttribute.TIMESTAMPS);
+      preserve(FileAttribute.OWNERSHIP);
+      preserve(FileAttribute.PERMISSION);
+    } else {
+      preserveStatus.clear();
+    }
+  }
+  
+  protected static enum FileAttribute {
+    TIMESTAMPS, OWNERSHIP, PERMISSION, ACL, XATTR;
+
+    public static FileAttribute getAttribute(char symbol) {
+      for (FileAttribute attribute : values()) {
+        if (attribute.name().charAt(0) == Character.toUpperCase(symbol)) {
+          return attribute;
+        }
+      }
+      throw new NoSuchElementException("No attribute for " + symbol);
+    }
+  }
+  
+  private EnumSet<FileAttribute> preserveStatus = 
+      EnumSet.noneOf(FileAttribute.class);
+  
+  /**
+   * Checks if the input attribute should be preserved or not
+   *
+   * @param attribute - Attribute to check
+   * @return boolean true if attribute should be preserved, false otherwise
+   */
+  private boolean shouldPreserve(FileAttribute attribute) {
+    return preserveStatus.contains(attribute);
+  }
+  
+  /**
+   * Add file attributes that need to be preserved. This method may be
+   * called multiple times to add attributes.
+   *
+   * @param fileAttribute - Attribute to add, one at a time
+   */
+  protected void preserve(FileAttribute fileAttribute) {
+    for (FileAttribute attribute : preserveStatus) {
+      if (attribute.equals(fileAttribute)) {
+        return;
+      }
+    }
+    preserveStatus.add(fileAttribute);
   }
 
   /**
@@ -243,19 +298,44 @@ abstract class CommandWithDestination extends FsCommand {
     try {
       in = src.fs.open(src.path);
       copyStreamToTarget(in, target);
-      if(preserve) {
+      if (shouldPreserve(FileAttribute.TIMESTAMPS)) {
         target.fs.setTimes(
           target.path,
           src.stat.getModificationTime(),
           src.stat.getAccessTime());
+      }
+      if (shouldPreserve(FileAttribute.OWNERSHIP)) {
         target.fs.setOwner(
           target.path,
           src.stat.getOwner(),
           src.stat.getGroup());
+      }
+      if (shouldPreserve(FileAttribute.PERMISSION) ||
+          shouldPreserve(FileAttribute.ACL)) {
         target.fs.setPermission(
           target.path,
           src.stat.getPermission());
       }
+      if (shouldPreserve(FileAttribute.ACL)) {
+        FsPermission perm = src.stat.getPermission();
+        if (perm.getAclBit()) {
+          List<AclEntry> srcEntries =
+              src.fs.getAclStatus(src.path).getEntries();
+          List<AclEntry> srcFullEntries =
+              AclUtil.getAclFromPermAndEntries(perm, srcEntries);
+          target.fs.setAcl(target.path, srcFullEntries);
+        }
+      }
+      if (shouldPreserve(FileAttribute.XATTR)) {
+        Map<String, byte[]> srcXAttrs = src.fs.getXAttrs(src.path);
+        if (srcXAttrs != null) {
+          Iterator<Entry<String, byte[]>> iter = srcXAttrs.entrySet().iterator();
+          while (iter.hasNext()) {
+            Entry<String, byte[]> entry = iter.next();
+            target.fs.setXAttr(target.path, entry.getKey(), entry.getValue());
+          }
+        }
+      }
     } finally {
       IOUtils.closeStream(in);
     }

+ 50 - 24
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java

@@ -23,6 +23,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
 
@@ -54,10 +55,10 @@ class CopyCommands {
     public static final String NAME = "getmerge";    
     public static final String USAGE = "[-nl] <src> <localdst>";
     public static final String DESCRIPTION =
-      "Get all the files in the directories that\n" +
-      "match the source file pattern and merge and sort them to only\n" +
+      "Get all the files in the directories that " +
+      "match the source file pattern and merge and sort them to only " +
       "one file on local fs. <src> is kept.\n" +
-      "  -nl   Add a newline character at the end of each file.";
+      "-nl: Add a newline character at the end of each file.";
 
     protected PathData dst = null;
     protected String delimiter = null;
@@ -132,24 +133,49 @@ class CopyCommands {
 
   static class Cp extends CommandWithDestination {
     public static final String NAME = "cp";
-    public static final String USAGE = "[-f] [-p] <src> ... <dst>";
+    public static final String USAGE = "[-f] [-p | -p[topax]] <src> ... <dst>";
     public static final String DESCRIPTION =
-      "Copy files that match the file pattern <src> to a\n" +
-      "destination.  When copying multiple files, the destination\n" +
-      "must be a directory. Passing -p preserves access and\n" +
-      "modification times, ownership and the mode. Passing -f\n" +
-      "overwrites the destination if it already exists.\n";
-    
+      "Copy files that match the file pattern <src> to a " +
+      "destination.  When copying multiple files, the destination " +
+      "must be a directory. Passing -p preserves status " +
+      "[topax] (timestamps, ownership, permission, ACLs, XAttr). " +
+      "If -p is specified with no <arg>, then preserves " +
+      "timestamps, ownership, permission. If -pa is specified, " +
+      "then preserves permission also because ACL is a super-set of " +
+      "permission. Passing -f overwrites the destination if it " +
+      "already exists.\n";
+
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
-      CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "f", "p");
+      popPreserveOption(args);
+      CommandFormat cf = new CommandFormat(2, Integer.MAX_VALUE, "f");
       cf.parse(args);
       setOverwrite(cf.getOpt("f"));
-      setPreserve(cf.getOpt("p"));
       // should have a -r option
       setRecursive(true);
       getRemoteDestination(args);
     }
+    
+    private void popPreserveOption(List<String> args) {
+      for (Iterator<String> iter = args.iterator(); iter.hasNext(); ) {
+        String cur = iter.next();
+        if (cur.equals("--")) {
+          // stop parsing arguments when you see --
+          break;
+        } else if (cur.startsWith("-p")) {
+          iter.remove();
+          if (cur.length() == 2) {
+            setPreserve(true);
+          } else {
+            String attributes = cur.substring(2);
+            for (int index = 0; index < attributes.length(); index++) {
+              preserve(FileAttribute.getAttribute(attributes.charAt(index)));
+            }
+          }
+          return;
+        }
+      }
+    }
   }
   
   /** 
@@ -160,10 +186,10 @@ class CopyCommands {
     public static final String USAGE =
       "[-p] [-ignoreCrc] [-crc] <src> ... <localdst>";
     public static final String DESCRIPTION =
-      "Copy files that match the file pattern <src>\n" +
-      "to the local name.  <src> is kept.  When copying multiple,\n" +
-      "files, the destination must be a directory. Passing\n" +
-      "-p preserves access and modification times,\n" +
+      "Copy files that match the file pattern <src> " +
+      "to the local name.  <src> is kept.  When copying multiple " +
+      "files, the destination must be a directory. Passing " +
+      "-p preserves access and modification times, " +
       "ownership and the mode.\n";
 
     @Override
@@ -187,11 +213,11 @@ class CopyCommands {
     public static final String NAME = "put";
     public static final String USAGE = "[-f] [-p] <localsrc> ... <dst>";
     public static final String DESCRIPTION =
-      "Copy files from the local file system\n" +
-      "into fs. Copying fails if the file already\n" +
-      "exists, unless the -f flag is given. Passing\n" +
-      "-p preserves access and modification times,\n" +
-      "ownership and the mode. Passing -f overwrites\n" +
+      "Copy files from the local file system " +
+      "into fs. Copying fails if the file already " +
+      "exists, unless the -f flag is given. Passing " +
+      "-p preserves access and modification times, " +
+      "ownership and the mode. Passing -f overwrites " +
       "the destination if it already exists.\n";
 
     @Override
@@ -254,9 +280,9 @@ class CopyCommands {
     public static final String NAME = "appendToFile";
     public static final String USAGE = "<localsrc> ... <dst>";
     public static final String DESCRIPTION =
-        "Appends the contents of all the given local files to the\n" +
-            "given dst file. The dst file will be created if it does\n" +
-            "not exist. If <localSrc> is -, then the input is read\n" +
+        "Appends the contents of all the given local files to the " +
+            "given dst file. The dst file will be created if it does " +
+            "not exist. If <localSrc> is -, then the input is read " +
             "from stdin.";
 
     private static final int DEFAULT_IO_LENGTH = 1024 * 1024;

+ 6 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Delete.java

@@ -51,13 +51,13 @@ class Delete {
     public static final String NAME = "rm";
     public static final String USAGE = "[-f] [-r|-R] [-skipTrash] <src> ...";
     public static final String DESCRIPTION =
-      "Delete all files that match the specified file pattern.\n" +
+      "Delete all files that match the specified file pattern. " +
       "Equivalent to the Unix command \"rm <src>\"\n" +
-      "-skipTrash option bypasses trash, if enabled, and immediately\n" +
+      "-skipTrash: option bypasses trash, if enabled, and immediately " +
       "deletes <src>\n" +
-      "  -f     If the file does not exist, do not display a diagnostic\n" +
-      "         message or modify the exit status to reflect an error.\n" +
-      "  -[rR]  Recursively deletes directories";
+      "-f: If the file does not exist, do not display a diagnostic " +
+      "message or modify the exit status to reflect an error.\n" +
+      "-[rR]:  Recursively deletes directories";
 
     private boolean skipTrash = false;
     private boolean deleteDirs = false;
@@ -147,7 +147,7 @@ class Delete {
     public static final String USAGE =
       "[--ignore-fail-on-non-empty] <dir> ...";
     public static final String DESCRIPTION =
-      "Removes the directory entry specified by each directory argument,\n" +
+      "Removes the directory entry specified by each directory argument, " +
       "provided it is empty.\n"; 
     
     private boolean ignoreNonEmpty = false;

+ 6 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java

@@ -75,7 +75,7 @@ class Display extends FsCommand {
     public static final String NAME = "cat";
     public static final String USAGE = "[-ignoreCrc] <src> ...";
     public static final String DESCRIPTION =
-      "Fetch all files that match the file pattern <src> \n" +
+      "Fetch all files that match the file pattern <src> " +
       "and display their content on stdout.\n";
 
     private boolean verifyChecksum = true;
@@ -170,11 +170,11 @@ class Display extends FsCommand {
     public static final String NAME = "checksum";
     public static final String USAGE = "<src> ...";
     public static final String DESCRIPTION =
-      "Dump checksum information for files that match the file\n" +
-      "pattern <src> to stdout. Note that this requires a round-trip\n" +
-      "to a datanode storing each block of the file, and thus is not\n" +
-      "efficient to run on a large number of files. The checksum of a\n" +
-      "file depends on its content, block size and the checksum\n" +
+      "Dump checksum information for files that match the file " +
+      "pattern <src> to stdout. Note that this requires a round-trip " +
+      "to a datanode storing each block of the file, and thus is not " +
+      "efficient to run on a large number of files. The checksum of a " +
+      "file depends on its content, block size and the checksum " +
       "algorithm and parameters used for creating the file.";
 
     @Override

+ 12 - 12
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/FsUsage.java

@@ -57,12 +57,12 @@ class FsUsage extends FsCommand {
     public static final String NAME = "df";
     public static final String USAGE = "[-h] [<path> ...]";
     public static final String DESCRIPTION =
-      "Shows the capacity, free and used space of the filesystem.\n"+
-      "If the filesystem has multiple partitions, and no path to a\n" +
-      "particular partition is specified, then the status of the root\n" +
+      "Shows the capacity, free and used space of the filesystem. "+
+      "If the filesystem has multiple partitions, and no path to a " +
+      "particular partition is specified, then the status of the root " +
       "partitions will be shown.\n" +
-      "  -h   Formats the sizes of files in a human-readable fashion\n" +
-      "       rather than a number of bytes.\n\n";
+      "-h: Formats the sizes of files in a human-readable fashion " +
+      "rather than a number of bytes.";
     
     @Override
     protected void processOptions(LinkedList<String> args)
@@ -108,14 +108,14 @@ class FsUsage extends FsCommand {
     public static final String NAME = "du";
     public static final String USAGE = "[-s] [-h] <path> ...";
     public static final String DESCRIPTION =
-    "Show the amount of space, in bytes, used by the files that\n" +
+    "Show the amount of space, in bytes, used by the files that " +
     "match the specified file pattern. The following flags are optional:\n" +
-    "  -s   Rather than showing the size of each individual file that\n" +
-    "       matches the pattern, shows the total (summary) size.\n" +
-    "  -h   Formats the sizes of files in a human-readable fashion\n" +
-    "       rather than a number of bytes.\n\n" +
-    "Note that, even without the -s option, this only shows size summaries\n" +
-    "one level deep into a directory.\n" +
+    "-s: Rather than showing the size of each individual file that" +
+    " matches the pattern, shows the total (summary) size.\n" +
+    "-h: Formats the sizes of files in a human-readable fashion" +
+    " rather than a number of bytes.\n\n" +
+    "Note that, even without the -s option, this only shows size summaries " +
+    "one level deep into a directory.\n\n" +
     "The output is in the form \n" + 
     "\tsize\tname(full path)\n"; 
 

+ 10 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Ls.java

@@ -49,16 +49,16 @@ class Ls extends FsCommand {
   public static final String NAME = "ls";
   public static final String USAGE = "[-d] [-h] [-R] [<path> ...]";
   public static final String DESCRIPTION =
-		    "List the contents that match the specified file pattern. If\n" + 
-		    "path is not specified, the contents of /user/<currentUser>\n" +
-		    "will be listed. Directory entries are of the form \n" +
-		    "\tpermissions - userid groupid size_of_directory(in bytes) modification_date(yyyy-MM-dd HH:mm) directoryName \n" +
-		    "and file entries are of the form \n" + 
-		    "\tpermissions number_of_replicas userid groupid size_of_file(in bytes) modification_date(yyyy-MM-dd HH:mm) fileName \n" +
-		    "  -d  Directories are listed as plain files.\n" +
-		    "  -h  Formats the sizes of files in a human-readable fashion\n" +
-		    "      rather than a number of bytes.\n" +
-		    "  -R  Recursively list the contents of directories.";
+		    "List the contents that match the specified file pattern. If " +
+		    "path is not specified, the contents of /user/<currentUser> " +
+		    "will be listed. Directory entries are of the form:\n" +
+		    "\tpermissions - userId groupId sizeOfDirectory(in bytes) modificationDate(yyyy-MM-dd HH:mm) directoryName\n\n" +
+		    "and file entries are of the form:\n" +
+		    "\tpermissions numberOfReplicas userId groupId sizeOfFile(in bytes) modificationDate(yyyy-MM-dd HH:mm) fileName\n" +
+		    "-d:  Directories are listed as plain files.\n" +
+		    "-h:  Formats the sizes of files in a human-readable fashion " +
+		    "rather than a number of bytes.\n" +
+		    "-R:  Recursively list the contents of directories.";
 		  
   
 

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Mkdir.java

@@ -44,7 +44,7 @@ class Mkdir extends FsCommand {
   public static final String USAGE = "[-p] <path> ...";
   public static final String DESCRIPTION =
     "Create a directory in specified location.\n" +
-    "  -p  Do not fail if the directory already exists";
+    "-p: Do not fail if the directory already exists";
 
   private boolean createParents;
   

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/MoveCommands.java

@@ -45,7 +45,7 @@ class MoveCommands {
     public static final String NAME = "moveFromLocal";
     public static final String USAGE = "<localsrc> ... <dst>";
     public static final String DESCRIPTION = 
-      "Same as -put, except that the source is\n" +
+      "Same as -put, except that the source is " +
       "deleted after it's copied.";
 
     @Override
@@ -87,8 +87,8 @@ class MoveCommands {
     public static final String NAME = "mv";
     public static final String USAGE = "<src> ... <dst>";
     public static final String DESCRIPTION = 
-      "Move files that match the specified file pattern <src>\n" +
-      "to a destination <dst>.  When moving multiple files, the\n" +
+      "Move files that match the specified file pattern <src> " +
+      "to a destination <dst>.  When moving multiple files, the " +
       "destination must be a directory.";
 
     @Override

+ 4 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SetReplication.java

@@ -41,12 +41,12 @@ class SetReplication extends FsCommand {
   public static final String NAME = "setrep";
   public static final String USAGE = "[-R] [-w] <rep> <path> ...";
   public static final String DESCRIPTION =
-    "Set the replication level of a file. If <path> is a directory\n" +
-    "then the command recursively changes the replication factor of\n" +
+    "Set the replication level of a file. If <path> is a directory " +
+    "then the command recursively changes the replication factor of " +
     "all files under the directory tree rooted at <path>.\n" +
-    "The -w flag requests that the command wait for the replication\n" +
+    "-w: It requests that the command waits for the replication " +
     "to complete. This can potentially take a very long time.\n" +
-    "The -R flag is accepted for backwards compatibility. It has no effect.";
+    "-R: It is accepted for backwards compatibility. It has no effect.";
   
   protected short newRep = 0;
   protected List<PathData> waitList = new LinkedList<PathData>();

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Stat.java

@@ -51,8 +51,8 @@ class Stat extends FsCommand {
   public static final String NAME = "stat";
   public static final String USAGE = "[format] <path> ...";
   public static final String DESCRIPTION =
-    "Print statistics about the file/directory at <path>\n" +
-    "in the specified format. Format accepts filesize in blocks (%b), group name of owner(%g),\n" +
+    "Print statistics about the file/directory at <path> " +
+    "in the specified format. Format accepts filesize in blocks (%b), group name of owner(%g), " +
     "filename (%n), block size (%o), replication (%r), user name of owner(%u), modification date (%y, %Y)\n";
 
   protected static final SimpleDateFormat timeFmt;

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Tail.java

@@ -43,7 +43,7 @@ class Tail extends FsCommand {
   public static final String USAGE = "[-f] <file>";
   public static final String DESCRIPTION =
     "Show the last 1KB of the file.\n" +
-    "\t\tThe -f option shows appended data as the file grows.\n";
+    "-f: Shows appended data as the file grows.\n";
 
   private long startingOffset = -1024;
   private boolean follow = false;

+ 1 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Test.java

@@ -43,8 +43,7 @@ class Test extends FsCommand {
     "  -e  return 0 if <path> exists.\n" +
     "  -f  return 0 if <path> is a file.\n" +
     "  -s  return 0 if file <path> is greater than zero bytes in size.\n" +
-    "  -z  return 0 if file <path> is zero bytes in size.\n" +
-    "else, return 1.";
+    "  -z  return 0 if file <path> is zero bytes in size, else return 1.";
 
   private char flag;
   

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Touchz.java

@@ -47,8 +47,8 @@ class Touch extends FsCommand {
     public static final String NAME = "touchz";
     public static final String USAGE = "<path> ...";
     public static final String DESCRIPTION =
-      "Creates a file of zero length\n" +
-      "at <path> with current time as the timestamp of that <path>.\n" +
+      "Creates a file of zero length " +
+      "at <path> with current time as the timestamp of that <path>. " +
       "An error is returned if the file exists with non-zero length\n";
 
     @Override

+ 9 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java

@@ -59,10 +59,10 @@ class XAttrCommands extends FsCommand {
       "-R: Recursively list the attributes for all files and directories.\n" +
       "-n name: Dump the named extended attribute value.\n" +
       "-d: Dump all extended attribute values associated with pathname.\n" +
-      "-e <encoding>: Encode values after retrieving them.\n" +
-      "Valid encodings are \"text\", \"hex\", and \"base64\".\n" +
-      "Values encoded as text strings are enclosed in double quotes (\"),\n" +
-      " and values encoded as hexadecimal and base64 are prefixed with\n" +
+      "-e <encoding>: Encode values after retrieving them." +
+      "Valid encodings are \"text\", \"hex\", and \"base64\". " +
+      "Values encoded as text strings are enclosed in double quotes (\")," +
+      " and values encoded as hexadecimal and base64 are prefixed with " +
       "0x and 0s, respectively.\n" +
       "<path>: The file or directory.\n";
     private final static Function<String, XAttrCodec> enValueOfFunc =
@@ -137,11 +137,11 @@ class XAttrCommands extends FsCommand {
     public static final String DESCRIPTION =
       "Sets an extended attribute name and value for a file or directory.\n" +
       "-n name: The extended attribute name.\n" +
-      "-v value: The extended attribute value. There are three different\n" +
-      "encoding methods for the value. If the argument is enclosed in double\n" +
-      "quotes, then the value is the string inside the quotes. If the\n" +
-      "argument is prefixed with 0x or 0X, then it is taken as a hexadecimal\n" +
-      "number. If the argument begins with 0s or 0S, then it is taken as a\n" +
+      "-v value: The extended attribute value. There are three different " +
+      "encoding methods for the value. If the argument is enclosed in double " +
+      "quotes, then the value is the string inside the quotes. If the " +
+      "argument is prefixed with 0x or 0X, then it is taken as a hexadecimal " +
+      "number. If the argument begins with 0s or 0S, then it is taken as a " +
       "base64 encoding.\n" +
       "-x name: Remove the extended attribute.\n" +
       "<path>: The file or directory.\n";

+ 4 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.ha;
 
 import java.io.IOException;
 import java.io.PrintStream;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Map;
@@ -68,7 +69,7 @@ public abstract class HAAdmin extends Configured implements Tool {
   protected final static Map<String, UsageInfo> USAGE =
     ImmutableMap.<String, UsageInfo>builder()
     .put("-transitionToActive",
-        new UsageInfo(" <serviceId> [--"+FORCEACTIVE+"]", "Transitions the service into Active state"))
+        new UsageInfo("<serviceId> [--"+FORCEACTIVE+"]", "Transitions the service into Active state"))
     .put("-transitionToStandby",
         new UsageInfo("<serviceId>", "Transitions the service into Standby state"))
     .put("-failover",
@@ -104,7 +105,8 @@ public abstract class HAAdmin extends Configured implements Tool {
   protected abstract HAServiceTarget resolveTarget(String string);
   
   protected Collection<String> getTargetIds(String targetNodeToActivate) {
-    return Arrays.asList(new String[]{targetNodeToActivate});
+    return new ArrayList<String>(
+        Arrays.asList(new String[]{targetNodeToActivate}));
   }
 
   protected String getUsageString() {

+ 7 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java

@@ -256,7 +256,7 @@ public class MapFile {
       } else {
         keyClass= 
           (Class<? extends WritableComparable>) keyClassOption.getValue();
-        this.comparator = WritableComparator.get(keyClass);
+        this.comparator = WritableComparator.get(keyClass, conf);
       }
       this.lastKey = comparator.newKey();
       FileSystem fs = dirName.getFileSystem(conf);
@@ -428,12 +428,13 @@ public class MapFile {
       this.data = createDataFileReader(dataFile, conf, options);
       this.firstPosition = data.getPosition();
 
-      if (comparator == null)
-        this.comparator = 
-          WritableComparator.get(data.getKeyClass().
-                                   asSubclass(WritableComparable.class));
-      else
+      if (comparator == null) {
+        Class<? extends WritableComparable> cls;
+        cls = data.getKeyClass().asSubclass(WritableComparable.class);
+        this.comparator = WritableComparator.get(cls, conf);
+      } else {
         this.comparator = comparator;
+      }
 
       // open the index
       SequenceFile.Reader.Option[] indexOptions =

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SequenceFile.java

@@ -2676,7 +2676,7 @@ public class SequenceFile {
     /** Sort and merge files containing the named classes. */
     public Sorter(FileSystem fs, Class<? extends WritableComparable> keyClass,
                   Class valClass, Configuration conf)  {
-      this(fs, WritableComparator.get(keyClass), keyClass, valClass, conf);
+      this(fs, WritableComparator.get(keyClass, conf), keyClass, valClass, conf);
     }
 
     /** Sort and merge using an arbitrary {@link RawComparator}. */

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SetFile.java

@@ -52,7 +52,7 @@ public class SetFile extends MapFile {
                   Class<? extends WritableComparable> keyClass,
                   SequenceFile.CompressionType compress)
       throws IOException {
-      this(conf, fs, dirName, WritableComparator.get(keyClass), compress);
+      this(conf, fs, dirName, WritableComparator.get(keyClass, conf), compress);
     }
 
     /** Create a set naming the element comparator and compression type. */

+ 34 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java

@@ -24,6 +24,8 @@ import java.util.concurrent.ConcurrentHashMap;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.ReflectionUtils;
 
 /** A Comparator for {@link WritableComparable}s.
@@ -37,13 +39,21 @@ import org.apache.hadoop.util.ReflectionUtils;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Stable
-public class WritableComparator implements RawComparator {
+public class WritableComparator implements RawComparator, Configurable {
 
   private static final ConcurrentHashMap<Class, WritableComparator> comparators 
           = new ConcurrentHashMap<Class, WritableComparator>(); // registry
 
-  /** Get a comparator for a {@link WritableComparable} implementation. */
+  private Configuration conf;
+
+  /** For backwards compatibility. **/
   public static WritableComparator get(Class<? extends WritableComparable> c) {
+    return get(c, null);
+  }
+
+  /** Get a comparator for a {@link WritableComparable} implementation. */
+  public static WritableComparator get(
+      Class<? extends WritableComparable> c, Configuration conf) {
     WritableComparator comparator = comparators.get(c);
     if (comparator == null) {
       // force the static initializers to run
@@ -52,12 +62,24 @@ public class WritableComparator implements RawComparator {
       comparator = comparators.get(c);
       // if not, use the generic one
       if (comparator == null) {
-        comparator = new WritableComparator(c, true);
+        comparator = new WritableComparator(c, conf, true);
       }
     }
+    // Newly passed Configuration objects should be used.
+    ReflectionUtils.setConf(comparator, conf);
     return comparator;
   }
 
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+  }
+
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
+
   /**
    * Force initialization of the static members.
    * As of Java 5, referencing a class doesn't force it to initialize. Since
@@ -91,12 +113,19 @@ public class WritableComparator implements RawComparator {
 
   /** Construct for a {@link WritableComparable} implementation. */
   protected WritableComparator(Class<? extends WritableComparable> keyClass) {
-    this(keyClass, false);
+    this(keyClass, null, false);
   }
 
   protected WritableComparator(Class<? extends WritableComparable> keyClass,
       boolean createInstances) {
+    this(keyClass, null, createInstances);
+  }
+
+  protected WritableComparator(Class<? extends WritableComparable> keyClass,
+                               Configuration conf,
+                               boolean createInstances) {
     this.keyClass = keyClass;
+    this.conf = (conf != null) ? conf : new Configuration();
     if (createInstances) {
       key1 = newKey();
       key2 = newKey();
@@ -112,7 +141,7 @@ public class WritableComparator implements RawComparator {
 
   /** Construct a new {@link WritableComparable} instance. */
   public WritableComparable newKey() {
-    return ReflectionUtils.newInstance(keyClass, null);
+    return ReflectionUtils.newInstance(keyClass, conf);
   }
 
   /** Optimization hook.  Override this to make SequenceFile.Sorter's scream.

+ 1 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java

@@ -136,9 +136,7 @@ public class RetryInvocationHandler<T> implements RpcInvocationHandler {
               msg += " after " + invocationFailoverCount + " fail over attempts"; 
             }
             msg += ". Trying to fail over " + formatSleepMessage(action.delayMillis);
-            if (LOG.isDebugEnabled()) {
-              LOG.debug(msg, e);
-            }
+            LOG.info(msg, e);
           } else {
             if(LOG.isDebugEnabled()) {
               LOG.debug("Exception while invoking " + method.getName()

+ 49 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/GenericRefreshProtocol.java

@@ -0,0 +1,49 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc;
+
+import java.io.IOException;
+import java.util.Collection;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.io.retry.Idempotent;
+import org.apache.hadoop.security.KerberosInfo;
+
+/**
+ * Protocol which is used to refresh arbitrary things at runtime.
+ */
+@KerberosInfo(
+    serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface GenericRefreshProtocol {
+  /**
+   * Version 1: Initial version.
+   */
+  public static final long versionID = 1L;
+
+  /**
+   * Refresh the resource based on identity passed in.
+   * @throws IOException
+   */
+  @Idempotent
+  Collection<RefreshResponse> refresh(String identifier, String[] args)
+      throws IOException;
+}

+ 35 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshHandler.java

@@ -0,0 +1,35 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc;
+
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Used to registry custom methods to refresh at runtime.
+ */
+@InterfaceStability.Unstable
+public interface RefreshHandler {
+  /**
+   * Implement this method to accept refresh requests from the administrator.
+   * @param identifier is the identifier you registered earlier
+   * @param args contains a list of string args from the administrator
+   * @throws Exception as a shorthand for a RefreshResponse(-1, message)
+   * @return a RefreshResponse
+   */
+  RefreshResponse handleRefresh(String identifier, String[] args);
+}

+ 134 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshRegistry.java

@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc;
+
+import java.util.ArrayList;
+import java.util.Collection;
+
+import com.google.common.base.Joiner;
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.Multimap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Used to registry custom methods to refresh at runtime.
+ * Each identifier maps to one or more RefreshHandlers.
+ */
+@InterfaceStability.Unstable
+public class RefreshRegistry {
+  public static final Log LOG = LogFactory.getLog(RefreshRegistry.class);
+
+  // Used to hold singleton instance
+  private static class RegistryHolder {
+    @SuppressWarnings("All")
+    public static RefreshRegistry registry = new RefreshRegistry();
+  }
+
+  // Singleton access
+  public static RefreshRegistry defaultRegistry() {
+    return RegistryHolder.registry;
+  }
+
+  private final Multimap<String, RefreshHandler> handlerTable;
+
+  public RefreshRegistry() {
+    handlerTable = HashMultimap.create();
+  }
+
+  /**
+   * Registers an object as a handler for a given identity.
+   * Note: will prevent handler from being GC'd, object should unregister itself
+   *  when done
+   * @param identifier a unique identifier for this resource,
+   *                   such as org.apache.hadoop.blacklist
+   * @param handler the object to register
+   */
+  public synchronized void register(String identifier, RefreshHandler handler) {
+    if (identifier == null) {
+      throw new NullPointerException("Identifier cannot be null");
+    }
+    handlerTable.put(identifier, handler);
+  }
+
+  /**
+   * Remove the registered object for a given identity.
+   * @param identifier the resource to unregister
+   * @return the true if removed
+   */
+  public synchronized boolean unregister(String identifier, RefreshHandler handler) {
+    return handlerTable.remove(identifier, handler);
+  }
+
+  public synchronized void unregisterAll(String identifier) {
+    handlerTable.removeAll(identifier);
+  }
+
+  /**
+   * Lookup the responsible handler and return its result.
+   * This should be called by the RPC server when it gets a refresh request.
+   * @param identifier the resource to refresh
+   * @param args the arguments to pass on, not including the program name
+   * @throws IllegalArgumentException on invalid identifier
+   * @return the response from the appropriate handler
+   */
+  public synchronized Collection<RefreshResponse> dispatch(String identifier, String[] args) {
+    Collection<RefreshHandler> handlers = handlerTable.get(identifier);
+
+    if (handlers.size() == 0) {
+      String msg = "Identifier '" + identifier +
+        "' does not exist in RefreshRegistry. Valid options are: " +
+        Joiner.on(", ").join(handlerTable.keySet());
+
+      throw new IllegalArgumentException(msg);
+    }
+
+    ArrayList<RefreshResponse> responses =
+      new ArrayList<RefreshResponse>(handlers.size());
+
+    // Dispatch to each handler and store response
+    for(RefreshHandler handler : handlers) {
+      RefreshResponse response;
+
+      // Run the handler
+      try {
+        response = handler.handleRefresh(identifier, args);
+        if (response == null) {
+          throw new NullPointerException("Handler returned null.");
+        }
+
+        LOG.info(handlerName(handler) + " responds to '" + identifier +
+          "', says: '" + response.getMessage() + "', returns " +
+          response.getReturnCode());
+      } catch (Exception e) {
+        response = new RefreshResponse(-1, e.getLocalizedMessage());
+      }
+
+      response.setSenderName(handlerName(handler));
+      responses.add(response);
+    }
+
+    return responses;
+  }
+
+  private String handlerName(RefreshHandler h) {
+    return h.getClass().getName() + '@' + Integer.toHexString(h.hashCode());
+  }
+}

+ 78 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RefreshResponse.java

@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc;
+
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Return a response in the handler method for the user to see.
+ * Useful since you may want to display status to a user even though an
+ * error has not occurred.
+ */
+@InterfaceStability.Unstable
+public class RefreshResponse {
+  private int returnCode = -1;
+  private String message;
+  private String senderName;
+
+  /**
+   * Convenience method to create a response for successful refreshes.
+   * @return void response
+   */
+  public static RefreshResponse successResponse() {
+    return new RefreshResponse(0, "Success");
+  }
+
+  // Most RefreshHandlers will use this
+  public RefreshResponse(int returnCode, String message) {
+    this.returnCode = returnCode;
+    this.message = message;
+  }
+
+  /**
+   * Optionally set the sender of this RefreshResponse.
+   * This helps clarify things when multiple handlers respond.
+   * @param name The name of the sender
+   */
+  public void setSenderName(String name) {
+    senderName = name;
+  }
+  public String getSenderName() { return senderName; }
+
+  public int getReturnCode() { return returnCode; }
+  public void setReturnCode(int rc) { returnCode = rc; }
+
+  public void setMessage(String m) { message = m; }
+  public String getMessage() { return message; }
+
+  @Override
+  public String toString() {
+    String ret = "";
+
+    if (senderName != null) {
+      ret += senderName + ": ";
+    }
+
+    if (message != null) {
+      ret += message;
+    }
+
+    ret += " (exit " + returnCode + ")";
+    return ret;
+  }
+}

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

@@ -1221,7 +1221,7 @@ public abstract class Server {
         ugi.addTokenIdentifier(tokenId);
         return ugi;
       } else {
-        return UserGroupInformation.createRemoteUser(authorizedId);
+        return UserGroupInformation.createRemoteUser(authorizedId, authMethod);
       }
     }
 

+ 119 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolClientSideTranslatorPB.java

@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc.protocolPB;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+
+import org.apache.hadoop.ipc.ProtobufHelper;
+import org.apache.hadoop.ipc.ProtocolMetaInterface;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RefreshResponse;
+import org.apache.hadoop.ipc.RpcClientUtil;
+import org.apache.hadoop.ipc.GenericRefreshProtocol;
+import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshRequestProto;
+import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshResponseProto;
+import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshResponseCollectionProto;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+public class GenericRefreshProtocolClientSideTranslatorPB implements
+    ProtocolMetaInterface, GenericRefreshProtocol, Closeable {
+
+  /** RpcController is not used and hence is set to null. */
+  private final static RpcController NULL_CONTROLLER = null;
+  private final GenericRefreshProtocolPB rpcProxy;
+
+  public GenericRefreshProtocolClientSideTranslatorPB(
+      GenericRefreshProtocolPB rpcProxy) {
+    this.rpcProxy = rpcProxy;
+  }
+
+  @Override
+  public void close() throws IOException {
+    RPC.stopProxy(rpcProxy);
+  }
+
+  @Override
+  public Collection<RefreshResponse> refresh(String identifier, String[] args) throws IOException {
+    List<String> argList = Arrays.asList(args);
+
+    try {
+      GenericRefreshRequestProto request = GenericRefreshRequestProto.newBuilder()
+        .setIdentifier(identifier)
+        .addAllArgs(argList)
+        .build();
+
+      GenericRefreshResponseCollectionProto resp = rpcProxy.refresh(NULL_CONTROLLER, request);
+      return unpack(resp);
+    } catch (ServiceException se) {
+      throw ProtobufHelper.getRemoteException(se);
+    }
+  }
+
+  private Collection<RefreshResponse> unpack(GenericRefreshResponseCollectionProto collection) {
+    List<GenericRefreshResponseProto> responseProtos = collection.getResponsesList();
+    List<RefreshResponse> responses = new ArrayList<RefreshResponse>();
+
+    for (GenericRefreshResponseProto rp : responseProtos) {
+      RefreshResponse response = unpack(rp);
+      responses.add(response);
+    }
+
+    return responses;
+  }
+
+  private RefreshResponse unpack(GenericRefreshResponseProto proto) {
+    // The default values
+    String message = null;
+    String sender = null;
+    int returnCode = -1;
+
+    // ... that can be overridden by data from the protobuf
+    if (proto.hasUserMessage()) {
+      message = proto.getUserMessage();
+    }
+    if (proto.hasExitStatus()) {
+      returnCode = proto.getExitStatus();
+    }
+    if (proto.hasSenderName()) {
+      sender = proto.getSenderName();
+    }
+
+    // ... and put into a RefreshResponse
+    RefreshResponse response = new RefreshResponse(returnCode, message);
+    response.setSenderName(sender);
+
+    return response;
+  }
+
+  @Override
+  public boolean isMethodSupported(String methodName) throws IOException {
+    return RpcClientUtil.isMethodSupported(rpcProxy,
+      GenericRefreshProtocolPB.class,
+      RPC.RpcKind.RPC_PROTOCOL_BUFFER,
+      RPC.getProtocolVersion(GenericRefreshProtocolPB.class),
+      methodName);
+  }
+}

+ 37 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolPB.java

@@ -0,0 +1,37 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc.protocolPB;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.ipc.ProtocolInfo;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshProtocolService;
+
+@KerberosInfo(
+    serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
+@ProtocolInfo(
+    protocolName = "org.apache.hadoop.ipc.GenericRefreshProtocol",
+    protocolVersion = 1)
+@InterfaceAudience.LimitedPrivate({"HDFS"})
+@InterfaceStability.Evolving
+public interface GenericRefreshProtocolPB extends
+  GenericRefreshProtocolService.BlockingInterface {
+}

+ 84 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/protocolPB/GenericRefreshProtocolServerSideTranslatorPB.java

@@ -0,0 +1,84 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc.protocolPB;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.List;
+
+import org.apache.hadoop.ipc.GenericRefreshProtocol;
+import org.apache.hadoop.ipc.RefreshResponse;
+import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshRequestProto;
+import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshResponseProto;
+import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshResponseCollectionProto;
+
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
+
+public class GenericRefreshProtocolServerSideTranslatorPB implements
+    GenericRefreshProtocolPB {
+
+  private final GenericRefreshProtocol impl;
+
+  public GenericRefreshProtocolServerSideTranslatorPB(
+      GenericRefreshProtocol impl) {
+    this.impl = impl;
+  }
+
+  @Override
+  public GenericRefreshResponseCollectionProto refresh(
+      RpcController controller, GenericRefreshRequestProto request)
+      throws ServiceException {
+    try {
+      List<String> argList = request.getArgsList();
+      String[] args = argList.toArray(new String[argList.size()]);
+
+      if (!request.hasIdentifier()) {
+        throw new ServiceException("Request must contain identifier");
+      }
+
+      Collection<RefreshResponse> results = impl.refresh(request.getIdentifier(), args);
+
+      return pack(results);
+    } catch (IOException e) {
+      throw new ServiceException(e);
+    }
+  }
+
+  // Convert a collection of RefreshResponse objects to a
+  // RefreshResponseCollection proto
+  private GenericRefreshResponseCollectionProto pack(
+    Collection<RefreshResponse> responses) {
+    GenericRefreshResponseCollectionProto.Builder b =
+      GenericRefreshResponseCollectionProto.newBuilder();
+
+    for (RefreshResponse response : responses) {
+      GenericRefreshResponseProto.Builder respBuilder =
+        GenericRefreshResponseProto.newBuilder();
+      respBuilder.setExitStatus(response.getReturnCode());
+      respBuilder.setUserMessage(response.getMessage());
+      respBuilder.setSenderName(response.getSenderName());
+
+      // Add to collection
+      b.addResponses(respBuilder);
+    }
+
+    return b.build();
+  }
+}

+ 7 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java

@@ -40,6 +40,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.IOUtils;
 
 /**
  * An implementation of {@link GroupMappingServiceProvider} which
@@ -312,8 +313,8 @@ public class LdapGroupsMapping
     keystorePass =
         conf.get(LDAP_KEYSTORE_PASSWORD_KEY, LDAP_KEYSTORE_PASSWORD_DEFAULT);
     if (keystorePass.isEmpty()) {
-      keystorePass = extractPassword(
-        conf.get(LDAP_KEYSTORE_PASSWORD_KEY, LDAP_KEYSTORE_PASSWORD_DEFAULT));
+      keystorePass = extractPassword(conf.get(LDAP_KEYSTORE_PASSWORD_FILE_KEY,
+          LDAP_KEYSTORE_PASSWORD_FILE_DEFAULT));
     }
     
     bindUser = conf.get(BIND_USER_KEY, BIND_USER_DEFAULT);
@@ -346,18 +347,20 @@ public class LdapGroupsMapping
       return "";
     }
     
+    Reader reader = null;
     try {
       StringBuilder password = new StringBuilder();
-      Reader reader = new FileReader(pwFile);
+      reader = new FileReader(pwFile);
       int c = reader.read();
       while (c > -1) {
         password.append((char)c);
         c = reader.read();
       }
-      reader.close();
       return password.toString().trim();
     } catch (IOException ioe) {
       throw new RuntimeException("Could not read password file: " + pwFile, ioe);
+    } finally {
+      IOUtils.cleanup(LOG, reader);
     }
   }
 }

+ 9 - 13
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java

@@ -289,12 +289,10 @@ public class SecurityUtil {
    */
   public static KerberosInfo 
   getKerberosInfo(Class<?> protocol, Configuration conf) {
-    synchronized (testProviders) {
-      for(SecurityInfo provider: testProviders) {
-        KerberosInfo result = provider.getKerberosInfo(protocol, conf);
-        if (result != null) {
-          return result;
-        }
+    for(SecurityInfo provider: testProviders) {
+      KerberosInfo result = provider.getKerberosInfo(protocol, conf);
+      if (result != null) {
+        return result;
       }
     }
     
@@ -317,13 +315,11 @@ public class SecurityUtil {
    * @return the TokenInfo or null if it has no KerberosInfo defined
    */
   public static TokenInfo getTokenInfo(Class<?> protocol, Configuration conf) {
-    synchronized (testProviders) {
-      for(SecurityInfo provider: testProviders) {
-        TokenInfo result = provider.getTokenInfo(protocol, conf);
-        if (result != null) {
-          return result;
-        }      
-      }
+    for(SecurityInfo provider: testProviders) {
+      TokenInfo result = provider.getTokenInfo(protocol, conf);
+      if (result != null) {
+        return result;
+      }      
     }
     
     synchronized (securityInfoProviders) {

+ 13 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java

@@ -1157,13 +1157,25 @@ public class UserGroupInformation {
   @InterfaceAudience.Public
   @InterfaceStability.Evolving
   public static UserGroupInformation createRemoteUser(String user) {
+    return createRemoteUser(user, AuthMethod.SIMPLE);
+  }
+  
+  /**
+   * Create a user from a login name. It is intended to be used for remote
+   * users in RPC, since it won't have any credentials.
+   * @param user the full user principal name, must not be empty or null
+   * @return the UserGroupInformation for the remote user.
+   */
+  @InterfaceAudience.Public
+  @InterfaceStability.Evolving
+  public static UserGroupInformation createRemoteUser(String user, AuthMethod authMethod) {
     if (user == null || user.isEmpty()) {
       throw new IllegalArgumentException("Null user");
     }
     Subject subject = new Subject();
     subject.getPrincipals().add(new User(user));
     UserGroupInformation result = new UserGroupInformation(subject);
-    result.setAuthenticationMethod(AuthenticationMethod.SIMPLE);
+    result.setAuthenticationMethod(authMethod);
     return result;
   }
 

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/TableListing.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/TableListing.java

@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hdfs.tools;
+package org.apache.hadoop.tools;
 
 import java.util.ArrayList;
 import java.util.LinkedList;
@@ -26,14 +26,14 @@ import org.apache.hadoop.classification.InterfaceAudience;
 
 /**
  * This class implements a "table listing" with column headers.
- * 
+ *
  * Example:
- * 
+ *
  * NAME   OWNER   GROUP   MODE       WEIGHT
  * pool1  andrew  andrew  rwxr-xr-x     100
  * pool2  andrew  andrew  rwxr-xr-x     100
  * pool3  andrew  andrew  rwxr-xr-x     100
- * 
+ *
  */
 @InterfaceAudience.Private
 public class TableListing {
@@ -141,14 +141,14 @@ public class TableListing {
 
     /**
      * Add a new field to the Table under construction.
-     * 
+     *
      * @param title Field title.
      * @param justification Right or left justification. Defaults to left.
      * @param wrap Width at which to auto-wrap the content of the cell.
      *        Defaults to Integer.MAX_VALUE.
      * @return This Builder object
      */
-    public Builder addField(String title, Justification justification, 
+    public Builder addField(String title, Justification justification,
         boolean wrap) {
       columns.add(new Column(title, justification, wrap));
       return this;

+ 18 - 13
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java

@@ -526,12 +526,8 @@ abstract public class Shell {
       }
       // wait for the process to finish and check the exit code
       exitCode  = process.waitFor();
-      try {
-        // make sure that the error thread exits
-        errThread.join();
-      } catch (InterruptedException ie) {
-        LOG.warn("Interrupted while reading the error stream", ie);
-      }
+      // make sure that the error thread exits
+      joinThread(errThread);
       completed.set(true);
       //the timeout thread handling
       //taken care in finally block
@@ -560,13 +556,9 @@ abstract public class Shell {
       } catch (IOException ioe) {
         LOG.warn("Error while closing the input stream", ioe);
       }
-      try {
-        if (!completed.get()) {
-          errThread.interrupt();
-          errThread.join();
-        }
-      } catch (InterruptedException ie) {
-        LOG.warn("Interrupted while joining errThread");
+      if (!completed.get()) {
+        errThread.interrupt();
+        joinThread(errThread);
       }
       try {
         InputStream stderr = process.getErrorStream();
@@ -581,6 +573,19 @@ abstract public class Shell {
     }
   }
 
+  private static void joinThread(Thread t) {
+    while (t.isAlive()) {
+      try {
+        t.join();
+      } catch (InterruptedException ie) {
+        if (LOG.isWarnEnabled()) {
+          LOG.warn("Interrupted while joining on: " + t, ie);
+        }
+        t.interrupt(); // propagate interrupt
+      }
+    }
+  }
+
   /** return an array containing the command name & its parameters */ 
   protected abstract String[] getExecString();
   

+ 4 - 3
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c

@@ -73,7 +73,10 @@ Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
   // was successful or not (as long as it was called we need to call
   // endnetgrent)
   setnetgrentCalledFlag = 1;
-#ifndef __FreeBSD__
+#if defined(__FreeBSD__) || defined(__MACH__)
+  setnetgrent(cgroup);
+  {
+#else
   if(setnetgrent(cgroup) == 1) {
 #endif
     current = NULL;
@@ -90,9 +93,7 @@ Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
         userListSize++;
       }
     }
-#ifndef __FreeBSD__
   }
-#endif
 
   //--------------------------------------------------
   // build return data (java array)

+ 61 - 0
hadoop-common-project/hadoop-common/src/main/proto/GenericRefreshProtocol.proto

@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * These .proto interfaces are private and stable.
+ * Please see http://wiki.apache.org/hadoop/Compatibility
+ * for what changes are allowed for a *stable* .proto interface.
+ */
+
+option java_package = "org.apache.hadoop.ipc.proto";
+option java_outer_classname = "GenericRefreshProtocolProtos";
+option java_generic_services = true;
+option java_generate_equals_and_hash = true;
+package hadoop.common;
+
+/**
+ *  Refresh request.
+ */
+message GenericRefreshRequestProto {
+    optional string identifier = 1;
+    repeated string args = 2;
+}
+
+/**
+ * A single response from a refresh handler.
+ */
+message GenericRefreshResponseProto {
+    optional int32 exitStatus = 1; // unix exit status to return
+    optional string userMessage = 2; // to be displayed to the user
+    optional string senderName = 3; // which handler sent this message
+}
+
+/**
+ * Collection of responses from zero or more handlers.
+ */
+message GenericRefreshResponseCollectionProto {
+    repeated GenericRefreshResponseProto responses = 1;
+}
+
+/**
+ * Protocol which is used to refresh a user-specified feature.
+ */
+service GenericRefreshProtocolService {
+  rpc refresh(GenericRefreshRequestProto)
+      returns(GenericRefreshResponseCollectionProto);
+}

+ 7 - 1
hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm

@@ -159,7 +159,7 @@ count
 
 cp
 
-   Usage: <<<hdfs dfs -cp [-f] URI [URI ...] <dest> >>>
+   Usage: <<<hdfs dfs -cp [-f] [-p | -p[topax]] URI [URI ...] <dest> >>>
 
    Copy files from source to destination. This command allows multiple sources
    as well in which case the destination must be a directory.
@@ -167,6 +167,12 @@ cp
     Options:
 
       * The -f option will overwrite the destination if it already exists.
+      
+      * The -p option will preserve file attributes [topx] (timestamps, 
+        ownership, permission, ACL, XAttr). If -p is specified with no <arg>,
+        then preserves timestamps, ownership, permission. If -pa is specified,
+        then preserves permission also because ACL is a super-set of
+        permission.
 
    Example:
 

+ 732 - 0
hadoop-common-project/hadoop-common/src/site/apt/Metrics.apt.vm

@@ -0,0 +1,732 @@
+~~ Licensed to the Apache Software Foundation (ASF) under one or more
+~~ contributor license agreements.  See the NOTICE file distributed with
+~~ this work for additional information regarding copyright ownership.
+~~ The ASF licenses this file to You under the Apache License, Version 2.0
+~~ (the "License"); you may not use this file except in compliance with
+~~ the License.  You may obtain a copy of the License at
+~~
+~~     http://www.apache.org/licenses/LICENSE-2.0
+~~
+~~ Unless required by applicable law or agreed to in writing, software
+~~ distributed under the License is distributed on an "AS IS" BASIS,
+~~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+~~ See the License for the specific language governing permissions and
+~~ limitations under the License.
+
+  ---
+  Metrics Guide
+  ---
+  ---
+  ${maven.build.timestamp}
+
+%{toc}
+
+Overview
+
+  Metrics are statistical information exposed by Hadoop daemons,
+  used for monitoring, performance tuning and debug.
+  There are many metrics available by default
+  and they are very useful for troubleshooting.
+  This page shows the details of the available metrics.
+
+  Each section describes each context into which metrics are grouped.
+
+  The documentation of Metrics 2.0 framework is
+  {{{../../api/org/apache/hadoop/metrics2/package-summary.html}here}}.
+
+jvm context
+
+* JvmMetrics
+
+  Each metrics record contains tags such as ProcessName, SessionID
+  and Hostname as additional information along with metrics.
+
+*-------------------------------------+--------------------------------------+
+|| Name                               || Description
+*-------------------------------------+--------------------------------------+
+|<<<MemNonHeapUsedM>>> | Current non-heap memory used in MB
+*-------------------------------------+--------------------------------------+
+|<<<MemNonHeapCommittedM>>> | Current non-heap memory committed in MB
+*-------------------------------------+--------------------------------------+
+|<<<MemNonHeapMaxM>>> | Max non-heap memory size in MB
+*-------------------------------------+--------------------------------------+
+|<<<MemHeapUsedM>>> | Current heap memory used in MB
+*-------------------------------------+--------------------------------------+
+|<<<MemHeapCommittedM>>> | Current heap memory committed in MB
+*-------------------------------------+--------------------------------------+
+|<<<MemHeapMaxM>>> | Max heap memory size in MB
+*-------------------------------------+--------------------------------------+
+|<<<MemMaxM>>> | Max memory size in MB
+*-------------------------------------+--------------------------------------+
+|<<<ThreadsNew>>> | Current number of NEW threads
+*-------------------------------------+--------------------------------------+
+|<<<ThreadsRunnable>>> | Current number of RUNNABLE threads
+*-------------------------------------+--------------------------------------+
+|<<<ThreadsBlocked>>> | Current number of BLOCKED threads
+*-------------------------------------+--------------------------------------+
+|<<<ThreadsWaiting>>> | Current number of WAITING threads
+*-------------------------------------+--------------------------------------+
+|<<<ThreadsTimedWaiting>>> | Current number of TIMED_WAITING threads
+*-------------------------------------+--------------------------------------+
+|<<<ThreadsTerminated>>> | Current number of TERMINATED threads
+*-------------------------------------+--------------------------------------+
+|<<<GcInfo>>>  | Total GC count and GC time in msec, grouped by the kind of GC. \
+               | ex.) GcCountPS Scavenge=6, GCTimeMillisPS Scavenge=40,
+               | GCCountPS MarkSweep=0, GCTimeMillisPS MarkSweep=0
+*-------------------------------------+--------------------------------------+
+|<<<GcCount>>> | Total GC count
+*-------------------------------------+--------------------------------------+
+|<<<GcTimeMillis>>> | Total GC time in msec
+*-------------------------------------+--------------------------------------+
+|<<<LogFatal>>> | Total number of FATAL logs
+*-------------------------------------+--------------------------------------+
+|<<<LogError>>> | Total number of ERROR logs
+*-------------------------------------+--------------------------------------+
+|<<<LogWarn>>> | Total number of WARN logs
+*-------------------------------------+--------------------------------------+
+|<<<LogInfo>>> | Total number of INFO logs
+*-------------------------------------+--------------------------------------+
+
+rpc context
+
+* rpc
+
+  Each metrics record contains tags such as Hostname
+  and port (number to which server is bound)
+  as additional information along with metrics.
+
+*-------------------------------------+--------------------------------------+
+|| Name                               || Description
+*-------------------------------------+--------------------------------------+
+|<<<ReceivedBytes>>> | Total number of received bytes
+*-------------------------------------+--------------------------------------+
+|<<<SentBytes>>> | Total number of sent bytes
+*-------------------------------------+--------------------------------------+
+|<<<RpcQueueTimeNumOps>>> | Total number of RPC calls
+*-------------------------------------+--------------------------------------+
+|<<<RpcQueueTimeAvgTime>>> | Average queue time in milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<RpcProcessingTimeNumOps>>> | Total number of RPC calls (same to
+                               | RpcQueueTimeNumOps)
+*-------------------------------------+--------------------------------------+
+|<<<RpcProcessingAvgTime>>> | Average Processing time in milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<RpcAuthenticationFailures>>> | Total number of authentication failures
+*-------------------------------------+--------------------------------------+
+|<<<RpcAuthenticationSuccesses>>> | Total number of authentication successes
+*-------------------------------------+--------------------------------------+
+|<<<RpcAuthorizationFailures>>> | Total number of authorization failures
+*-------------------------------------+--------------------------------------+
+|<<<RpcAuthorizationSuccesses>>> | Total number of authorization successes
+*-------------------------------------+--------------------------------------+
+|<<<NumOpenConnections>>> | Current number of open connections
+*-------------------------------------+--------------------------------------+
+|<<<CallQueueLength>>> | Current length of the call queue
+*-------------------------------------+--------------------------------------+
+|<<<rpcQueueTime>>><num><<<sNumOps>>> | Shows total number of RPC calls
+| | (<num> seconds granularity) if <<<rpc.metrics.quantile.enable>>> is set to
+| | true. <num> is specified by <<<rpc.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<rpcQueueTime>>><num><<<s50thPercentileLatency>>> |
+| | Shows the 50th percentile of RPC queue time in milliseconds
+| | (<num> seconds granularity) if <<<rpc.metrics.quantile.enable>>> is set to
+| | true. <num> is specified by <<<rpc.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<rpcQueueTime>>><num><<<s75thPercentileLatency>>> |
+| | Shows the 75th percentile of RPC queue time in milliseconds
+| | (<num> seconds granularity) if <<<rpc.metrics.quantile.enable>>> is set to
+| | true. <num> is specified by <<<rpc.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<rpcQueueTime>>><num><<<s90thPercentileLatency>>> |
+| | Shows the 90th percentile of RPC queue time in milliseconds
+| | (<num> seconds granularity) if <<<rpc.metrics.quantile.enable>>> is set to
+| | true. <num> is specified by <<<rpc.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<rpcQueueTime>>><num><<<s95thPercentileLatency>>> |
+| | Shows the 95th percentile of RPC queue time in milliseconds
+| | (<num> seconds granularity) if <<<rpc.metrics.quantile.enable>>> is set to
+| | true. <num> is specified by <<<rpc.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<rpcQueueTime>>><num><<<s99thPercentileLatency>>> |
+| | Shows the 99th percentile of RPC queue time in milliseconds
+| | (<num> seconds granularity) if <<<rpc.metrics.quantile.enable>>> is set to
+| | true. <num> is specified by <<<rpc.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<rpcProcessingTime>>><num><<<sNumOps>>> | Shows total number of RPC calls
+| | (<num> seconds granularity) if <<<rpc.metrics.quantile.enable>>> is set to
+| | true. <num> is specified by <<<rpc.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<rpcProcessingTime>>><num><<<s50thPercentileLatency>>> |
+| | Shows the 50th percentile of RPC processing time in milliseconds
+| | (<num> seconds granularity) if <<<rpc.metrics.quantile.enable>>> is set to
+| | true. <num> is specified by <<<rpc.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<rpcProcessingTime>>><num><<<s75thPercentileLatency>>> |
+| | Shows the 75th percentile of RPC processing time in milliseconds
+| | (<num> seconds granularity) if <<<rpc.metrics.quantile.enable>>> is set to
+| | true. <num> is specified by <<<rpc.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<rpcProcessingTime>>><num><<<s90thPercentileLatency>>> |
+| | Shows the 90th percentile of RPC processing time in milliseconds
+| | (<num> seconds granularity) if <<<rpc.metrics.quantile.enable>>> is set to
+| | true. <num> is specified by <<<rpc.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<rpcProcessingTime>>><num><<<s95thPercentileLatency>>> |
+| | Shows the 95th percentile of RPC processing time in milliseconds
+| | (<num> seconds granularity) if <<<rpc.metrics.quantile.enable>>> is set to
+| | true. <num> is specified by <<<rpc.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<rpcProcessingTime>>><num><<<s99thPercentileLatency>>> |
+| | Shows the 99th percentile of RPC processing time in milliseconds
+| | (<num> seconds granularity) if <<<rpc.metrics.quantile.enable>>> is set to
+| | true. <num> is specified by <<<rpc.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+
+* RetryCache/NameNodeRetryCache
+
+  RetryCache metrics is useful to monitor NameNode fail-over.
+  Each metrics record contains Hostname tag.
+
+*-------------------------------------+--------------------------------------+
+|| Name                               || Description
+*-------------------------------------+--------------------------------------+
+|<<<CacheHit>>> | Total number of RetryCache hit
+*-------------------------------------+--------------------------------------+
+|<<<CacheCleared>>> | Total number of RetryCache cleared
+*-------------------------------------+--------------------------------------+
+|<<<CacheUpdated>>> | Total number of RetryCache updated
+*-------------------------------------+--------------------------------------+
+
+rpcdetailed context
+
+  Metrics of rpcdetailed context are exposed in unified manner by RPC
+  layer. Two metrics are exposed for each RPC based on its name.
+  Metrics named "(RPC method name)NumOps" indicates total number of
+  method calls, and metrics named "(RPC method name)AvgTime" shows
+  average turn around time for method calls in milliseconds.
+
+* rpcdetailed
+
+  Each metrics record contains tags such as Hostname
+  and port (number to which server is bound)
+  as additional information along with metrics.
+
+  The Metrics about RPCs which is not called are not included
+  in metrics record.
+
+*-------------------------------------+--------------------------------------+
+|| Name                               || Description
+*-------------------------------------+--------------------------------------+
+|<methodname><<<NumOps>>> | Total number of the times the method is called
+*-------------------------------------+--------------------------------------+
+|<methodname><<<AvgTime>>> | Average turn around time of the method in
+                           | milliseconds
+*-------------------------------------+--------------------------------------+
+
+dfs context
+
+* namenode
+
+  Each metrics record contains tags such as ProcessName, SessionId,
+  and Hostname as additional information along with metrics.
+
+*-------------------------------------+--------------------------------------+
+|| Name                               || Description
+*-------------------------------------+--------------------------------------+
+|<<<CreateFileOps>>> | Total number of files created
+*-------------------------------------+--------------------------------------+
+|<<<FilesCreated>>> | Total number of files and directories created by create
+                    | or mkdir operations
+*-------------------------------------+--------------------------------------+
+|<<<FilesAppended>>> | Total number of files appended
+*-------------------------------------+--------------------------------------+
+|<<<GetBlockLocations>>> | Total number of getBlockLocations operations
+*-------------------------------------+--------------------------------------+
+|<<<FilesRenamed>>> | Total number of rename <<operations>> (NOT number of
+                    | files/dirs renamed)
+*-------------------------------------+--------------------------------------+
+|<<<GetListingOps>>> | Total number of directory listing operations
+*-------------------------------------+--------------------------------------+
+|<<<DeleteFileOps>>> | Total number of delete operations
+*-------------------------------------+--------------------------------------+
+|<<<FilesDeleted>>> | Total number of files and directories deleted by delete
+                    | or rename operations
+*-------------------------------------+--------------------------------------+
+|<<<FileInfoOps>>> | Total number of getFileInfo and getLinkFileInfo
+                   | operations
+*-------------------------------------+--------------------------------------+
+|<<<AddBlockOps>>> | Total number of addBlock operations succeeded
+*-------------------------------------+--------------------------------------+
+|<<<GetAdditionalDatanodeOps>>> | Total number of getAdditionalDatanode
+                                | operations
+*-------------------------------------+--------------------------------------+
+|<<<CreateSymlinkOps>>> | Total number of createSymlink operations
+*-------------------------------------+--------------------------------------+
+|<<<GetLinkTargetOps>>> | Total number of getLinkTarget operations
+*-------------------------------------+--------------------------------------+
+|<<<FilesInGetListingOps>>> | Total number of files and directories listed by
+                            | directory listing operations
+*-------------------------------------+--------------------------------------+
+|<<<AllowSnapshotOps>>> | Total number of allowSnapshot operations
+*-------------------------------------+--------------------------------------+
+|<<<DisallowSnapshotOps>>> | Total number of disallowSnapshot operations
+*-------------------------------------+--------------------------------------+
+|<<<CreateSnapshotOps>>> | Total number of createSnapshot operations
+*-------------------------------------+--------------------------------------+
+|<<<DeleteSnapshotOps>>> | Total number of deleteSnapshot operations
+*-------------------------------------+--------------------------------------+
+|<<<RenameSnapshotOps>>> | Total number of renameSnapshot operations
+*-------------------------------------+--------------------------------------+
+|<<<ListSnapshottableDirOps>>> | Total number of snapshottableDirectoryStatus
+                               | operations
+*-------------------------------------+--------------------------------------+
+|<<<SnapshotDiffReportOps>>> | Total number of getSnapshotDiffReport
+                             | operations
+*-------------------------------------+--------------------------------------+
+|<<<TransactionsNumOps>>> | Total number of Journal transactions
+*-------------------------------------+--------------------------------------+
+|<<<TransactionsAvgTime>>> | Average time of Journal transactions in
+                           | milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<SyncsNumOps>>> | Total number of Journal syncs
+*-------------------------------------+--------------------------------------+
+|<<<SyncsAvgTime>>> | Average time of Journal syncs in milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<TransactionsBatchedInSync>>> | Total number of Journal transactions batched
+                                 | in sync
+*-------------------------------------+--------------------------------------+
+|<<<BlockReportNumOps>>> | Total number of processing block reports from
+                         | DataNode
+*-------------------------------------+--------------------------------------+
+|<<<BlockReportAvgTime>>> | Average time of processing block reports in
+                          | milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<CacheReportNumOps>>> | Total number of processing cache reports from
+                         | DataNode
+*-------------------------------------+--------------------------------------+
+|<<<CacheReportAvgTime>>> | Average time of processing cache reports in
+                          | milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<SafeModeTime>>> | The interval between FSNameSystem starts and the last
+                    | time safemode leaves in milliseconds. \
+                    | (sometimes not equal to the time in SafeMode,
+                    | see {{{https://issues.apache.org/jira/browse/HDFS-5156}HDFS-5156}})
+*-------------------------------------+--------------------------------------+
+|<<<FsImageLoadTime>>> | Time loading FS Image at startup in milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<FsImageLoadTime>>> | Time loading FS Image at startup in milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<GetEditNumOps>>> | Total number of edits downloads from SecondaryNameNode
+*-------------------------------------+--------------------------------------+
+|<<<GetEditAvgTime>>> | Average edits download time in milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<GetImageNumOps>>> |Total number of fsimage downloads from SecondaryNameNode
+*-------------------------------------+--------------------------------------+
+|<<<GetImageAvgTime>>> | Average fsimage download time in milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<PutImageNumOps>>> | Total number of fsimage uploads to SecondaryNameNode
+*-------------------------------------+--------------------------------------+
+|<<<PutImageAvgTime>>> | Average fsimage upload time in milliseconds
+*-------------------------------------+--------------------------------------+
+
+* FSNamesystem
+
+  Each metrics record contains tags such as HAState and Hostname
+  as additional information along with metrics.
+
+*-------------------------------------+--------------------------------------+
+|| Name                               || Description
+*-------------------------------------+--------------------------------------+
+|<<<MissingBlocks>>> | Current number of missing blocks
+*-------------------------------------+--------------------------------------+
+|<<<ExpiredHeartbeats>>> | Total number of expired heartbeats
+*-------------------------------------+--------------------------------------+
+|<<<TransactionsSinceLastCheckpoint>>> | Total number of transactions since
+                                       | last checkpoint
+*-------------------------------------+--------------------------------------+
+|<<<TransactionsSinceLastLogRoll>>> | Total number of transactions since last
+                                    | edit log roll
+*-------------------------------------+--------------------------------------+
+|<<<LastWrittenTransactionId>>> | Last transaction ID written to the edit log
+*-------------------------------------+--------------------------------------+
+|<<<LastCheckpointTime>>> | Time in milliseconds since epoch of last checkpoint
+*-------------------------------------+--------------------------------------+
+|<<<CapacityTotal>>> | Current raw capacity of DataNodes in bytes
+*-------------------------------------+--------------------------------------+
+|<<<CapacityTotalGB>>> | Current raw capacity of DataNodes in GB
+*-------------------------------------+--------------------------------------+
+|<<<CapacityUsed>>> | Current used capacity across all DataNodes in bytes
+*-------------------------------------+--------------------------------------+
+|<<<CapacityUsedGB>>> | Current used capacity across all DataNodes in GB
+*-------------------------------------+--------------------------------------+
+|<<<CapacityRemaining>>> | Current remaining capacity in bytes
+*-------------------------------------+--------------------------------------+
+|<<<CapacityRemainingGB>>> | Current remaining capacity in GB
+*-------------------------------------+--------------------------------------+
+|<<<CapacityUsedNonDFS>>> | Current space used by DataNodes for non DFS
+                          | purposes in bytes
+*-------------------------------------+--------------------------------------+
+|<<<TotalLoad>>> | Current number of connections
+*-------------------------------------+--------------------------------------+
+|<<<SnapshottableDirectories>>> | Current number of snapshottable directories
+*-------------------------------------+--------------------------------------+
+|<<<Snapshots>>> | Current number of snapshots
+*-------------------------------------+--------------------------------------+
+|<<<BlocksTotal>>> | Current number of allocated blocks in the system
+*-------------------------------------+--------------------------------------+
+|<<<FilesTotal>>> | Current number of files and directories
+*-------------------------------------+--------------------------------------+
+|<<<PendingReplicationBlocks>>> | Current number of blocks pending to be
+                                | replicated
+*-------------------------------------+--------------------------------------+
+|<<<UnderReplicatedBlocks>>> | Current number of blocks under replicated
+*-------------------------------------+--------------------------------------+
+|<<<CorruptBlocks>>> | Current number of blocks with corrupt replicas.
+*-------------------------------------+--------------------------------------+
+|<<<ScheduledReplicationBlocks>>> | Current number of blocks scheduled for
+                                  | replications
+*-------------------------------------+--------------------------------------+
+|<<<PendingDeletionBlocks>>> | Current number of blocks pending deletion
+*-------------------------------------+--------------------------------------+
+|<<<ExcessBlocks>>> | Current number of excess blocks
+*-------------------------------------+--------------------------------------+
+|<<<PostponedMisreplicatedBlocks>>> | (HA-only) Current number of blocks
+                                    | postponed to replicate
+*-------------------------------------+--------------------------------------+
+|<<<PendingDataNodeMessageCourt>>> | (HA-only) Current number of pending
+                                   | block-related messages for later
+                                   | processing in the standby NameNode
+*-------------------------------------+--------------------------------------+
+|<<<MillisSinceLastLoadedEdits>>> | (HA-only) Time in milliseconds since the
+                                  | last time standby NameNode load edit log.
+                                  | In active NameNode, set to 0
+*-------------------------------------+--------------------------------------+
+|<<<BlockCapacity>>> | Current number of block capacity
+*-------------------------------------+--------------------------------------+
+|<<<StaleDataNodes>>> | Current number of DataNodes marked stale due to delayed
+                      | heartbeat
+*-------------------------------------+--------------------------------------+
+|<<<TotalFiles>>> |Current number of files and directories (same as FilesTotal)
+*-------------------------------------+--------------------------------------+
+
+* JournalNode
+
+  The server-side metrics for a journal from the JournalNode's perspective.
+  Each metrics record contains Hostname tag as additional information
+  along with metrics.
+
+*-------------------------------------+--------------------------------------+
+|| Name                               || Description
+*-------------------------------------+--------------------------------------+
+|<<<Syncs60sNumOps>>> | Number of sync operations (1 minute granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs60s50thPercentileLatencyMicros>>> | The 50th percentile of sync
+| | latency in microseconds (1 minute granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs60s75thPercentileLatencyMicros>>> | The 75th percentile of sync
+| | latency in microseconds (1 minute granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs60s90thPercentileLatencyMicros>>> | The 90th percentile of sync
+| | latency in microseconds (1 minute granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs60s95thPercentileLatencyMicros>>> | The 95th percentile of sync
+| | latency in microseconds (1 minute granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs60s99thPercentileLatencyMicros>>> | The 99th percentile of sync
+| | latency in microseconds (1 minute granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs300sNumOps>>> | Number of sync operations (5 minutes granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs300s50thPercentileLatencyMicros>>> | The 50th percentile of sync
+| | latency in microseconds (5 minutes granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs300s75thPercentileLatencyMicros>>> | The 75th percentile of sync
+| | latency in microseconds (5 minutes granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs300s90thPercentileLatencyMicros>>> | The 90th percentile of sync
+| | latency in microseconds (5 minutes granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs300s95thPercentileLatencyMicros>>> | The 95th percentile of sync
+| | latency in microseconds (5 minutes granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs300s99thPercentileLatencyMicros>>> | The 99th percentile of sync
+| | latency in microseconds (5 minutes granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs3600sNumOps>>> | Number of sync operations (1 hour granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs3600s50thPercentileLatencyMicros>>> | The 50th percentile of sync
+| | latency in microseconds (1 hour granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs3600s75thPercentileLatencyMicros>>> | The 75th percentile of sync
+| | latency in microseconds (1 hour granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs3600s90thPercentileLatencyMicros>>> | The 90th percentile of sync
+| | latency in microseconds (1 hour granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs3600s95thPercentileLatencyMicros>>> | The 95th percentile of sync
+| | latency in microseconds (1 hour granularity)
+*-------------------------------------+--------------------------------------+
+|<<<Syncs3600s99thPercentileLatencyMicros>>> | The 99th percentile of sync
+| | latency in microseconds (1 hour granularity)
+*-------------------------------------+--------------------------------------+
+|<<<BatchesWritten>>> | Total number of batches written since startup
+*-------------------------------------+--------------------------------------+
+|<<<TxnsWritten>>> | Total number of transactions written since startup
+*-------------------------------------+--------------------------------------+
+|<<<BytesWritten>>> | Total number of bytes written since startup
+*-------------------------------------+--------------------------------------+
+|<<<BatchesWrittenWhileLagging>>> | Total number of batches written where this
+| | node was lagging
+*-------------------------------------+--------------------------------------+
+|<<<LastWriterEpoch>>> | Current writer's epoch number
+*-------------------------------------+--------------------------------------+
+|<<<CurrentLagTxns>>> | The number of transactions that this JournalNode is
+| | lagging
+*-------------------------------------+--------------------------------------+
+|<<<LastWrittenTxId>>> | The highest transaction id stored on this JournalNode
+*-------------------------------------+--------------------------------------+
+|<<<LastPromisedEpoch>>> | The last epoch number which this node has promised
+| | not to accept any lower epoch, or 0 if no promises have been made
+*-------------------------------------+--------------------------------------+
+
+* datanode
+
+  Each metrics record contains tags such as SessionId and Hostname
+  as additional information along with metrics.
+
+*-------------------------------------+--------------------------------------+
+|| Name                               || Description
+*-------------------------------------+--------------------------------------+
+|<<<BytesWritten>>> | Total number of bytes written to DataNode
+*-------------------------------------+--------------------------------------+
+|<<<BytesRead>>> | Total number of bytes read from DataNode
+*-------------------------------------+--------------------------------------+
+|<<<BlocksWritten>>> | Total number of blocks written to DataNode
+*-------------------------------------+--------------------------------------+
+|<<<BlocksRead>>> | Total number of blocks read from DataNode
+*-------------------------------------+--------------------------------------+
+|<<<BlocksReplicated>>> | Total number of blocks replicated
+*-------------------------------------+--------------------------------------+
+|<<<BlocksRemoved>>> | Total number of blocks removed
+*-------------------------------------+--------------------------------------+
+|<<<BlocksVerified>>> | Total number of blocks verified
+*-------------------------------------+--------------------------------------+
+|<<<BlockVerificationFailures>>> | Total number of verifications failures
+*-------------------------------------+--------------------------------------+
+|<<<BlocksCached>>> | Total number of blocks cached
+*-------------------------------------+--------------------------------------+
+|<<<BlocksUncached>>> | Total number of blocks uncached
+*-------------------------------------+--------------------------------------+
+|<<<ReadsFromLocalClient>>> | Total number of read operations from local client
+*-------------------------------------+--------------------------------------+
+|<<<ReadsFromRemoteClient>>> | Total number of read operations from remote
+                             | client
+*-------------------------------------+--------------------------------------+
+|<<<WritesFromLocalClient>>> | Total number of write operations from local
+                             | client
+*-------------------------------------+--------------------------------------+
+|<<<WritesFromRemoteClient>>> | Total number of write operations from remote
+                              | client
+*-------------------------------------+--------------------------------------+
+|<<<BlocksGetLocalPathInfo>>> | Total number of operations to get local path
+                              | names of blocks
+*-------------------------------------+--------------------------------------+
+|<<<FsyncCount>>> | Total number of fsync
+*-------------------------------------+--------------------------------------+
+|<<<VolumeFailures>>> | Total number of volume failures occurred
+*-------------------------------------+--------------------------------------+
+|<<<ReadBlockOpNumOps>>> | Total number of read operations
+*-------------------------------------+--------------------------------------+
+|<<<ReadBlockOpAvgTime>>> | Average time of read operations in milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<WriteBlockOpNumOps>>> | Total number of write operations
+*-------------------------------------+--------------------------------------+
+|<<<WriteBlockOpAvgTime>>> | Average time of write operations in milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<BlockChecksumOpNumOps>>> | Total number of blockChecksum operations
+*-------------------------------------+--------------------------------------+
+|<<<BlockChecksumOpAvgTime>>> | Average time of blockChecksum operations in
+                              | milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<CopyBlockOpNumOps>>> | Total number of block copy operations
+*-------------------------------------+--------------------------------------+
+|<<<CopyBlockOpAvgTime>>> | Average time of block copy operations in
+                          | milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<ReplaceBlockOpNumOps>>> | Total number of block replace operations
+*-------------------------------------+--------------------------------------+
+|<<<ReplaceBlockOpAvgTime>>> | Average time of block replace operations in
+                             | milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<HeartbeatsNumOps>>> | Total number of heartbeats
+*-------------------------------------+--------------------------------------+
+|<<<HeartbeatsAvgTime>>> | Average heartbeat time in milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<BlockReportsNumOps>>> | Total number of block report operations
+*-------------------------------------+--------------------------------------+
+|<<<BlockReportsAvgTime>>> | Average time of block report operations in
+                           | milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<CacheReportsNumOps>>> | Total number of cache report operations
+*-------------------------------------+--------------------------------------+
+|<<<CacheReportsAvgTime>>> | Average time of cache report operations in
+                           | milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<PacketAckRoundTripTimeNanosNumOps>>> | Total number of ack round trip
+*-------------------------------------+--------------------------------------+
+|<<<PacketAckRoundTripTimeNanosAvgTime>>> | Average time from ack send to
+| | receive minus the downstream ack time in nanoseconds
+*-------------------------------------+--------------------------------------+
+|<<<FlushNanosNumOps>>> | Total number of flushes
+*-------------------------------------+--------------------------------------+
+|<<<FlushNanosAvgTime>>> | Average flush time in nanoseconds
+*-------------------------------------+--------------------------------------+
+|<<<FsyncNanosNumOps>>> | Total number of fsync
+*-------------------------------------+--------------------------------------+
+|<<<FsyncNanosAvgTime>>> | Average fsync time in nanoseconds
+*-------------------------------------+--------------------------------------+
+|<<<SendDataPacketBlockedOnNetworkNanosNumOps>>> | Total number of sending
+                                                 | packets
+*-------------------------------------+--------------------------------------+
+|<<<SendDataPacketBlockedOnNetworkNanosAvgTime>>> | Average waiting time of
+| | sending packets in nanoseconds
+*-------------------------------------+--------------------------------------+
+|<<<SendDataPacketTransferNanosNumOps>>> | Total number of sending packets
+*-------------------------------------+--------------------------------------+
+|<<<SendDataPacketTransferNanosAvgTime>>> | Average transfer time of sending
+                                          | packets in nanoseconds
+*-------------------------------------+--------------------------------------+
+
+ugi context
+
+* UgiMetrics
+
+  UgiMetrics is related to user and group information.
+  Each metrics record contains Hostname tag as additional information
+  along with metrics.
+
+*-------------------------------------+--------------------------------------+
+|| Name                               || Description
+*-------------------------------------+--------------------------------------+
+|<<<LoginSuccessNumOps>>> | Total number of successful kerberos logins
+*-------------------------------------+--------------------------------------+
+|<<<LoginSuccessAvgTime>>> | Average time for successful kerberos logins in
+                           | milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<LoginFailureNumOps>>> | Total number of failed kerberos logins
+*-------------------------------------+--------------------------------------+
+|<<<LoginFailureAvgTime>>> | Average time for failed kerberos logins in
+                           | milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<getGroupsNumOps>>> | Total number of group resolutions
+*-------------------------------------+--------------------------------------+
+|<<<getGroupsAvgTime>>> | Average time for group resolution in milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<getGroups>>><num><<<sNumOps>>> |
+| | Total number of group resolutions (<num> seconds granularity). <num> is
+| | specified by <<<hadoop.user.group.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<getGroups>>><num><<<s50thPercentileLatency>>> |
+| | Shows the 50th percentile of group resolution time in milliseconds
+| | (<num> seconds granularity). <num> is specified by
+| | <<<hadoop.user.group.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<getGroups>>><num><<<s75thPercentileLatency>>> |
+| | Shows the 75th percentile of group resolution time in milliseconds
+| | (<num> seconds granularity). <num> is specified by
+| | <<<hadoop.user.group.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<getGroups>>><num><<<s90thPercentileLatency>>> |
+| | Shows the 90th percentile of group resolution time in milliseconds
+| | (<num> seconds granularity). <num> is specified by
+| | <<<hadoop.user.group.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<getGroups>>><num><<<s95thPercentileLatency>>> |
+| | Shows the 95th percentile of group resolution time in milliseconds
+| | (<num> seconds granularity). <num> is specified by
+| | <<<hadoop.user.group.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+|<<<getGroups>>><num><<<s99thPercentileLatency>>> |
+| | Shows the 99th percentile of group resolution time in milliseconds
+| | (<num> seconds granularity). <num> is specified by
+| | <<<hadoop.user.group.metrics.percentiles.intervals>>>.
+*-------------------------------------+--------------------------------------+
+
+metricssystem context
+
+* MetricsSystem
+
+  MetricsSystem shows the statistics for metrics snapshots and publishes.
+  Each metrics record contains Hostname tag as additional information
+  along with metrics.
+
+*-------------------------------------+--------------------------------------+
+|| Name                               || Description
+*-------------------------------------+--------------------------------------+
+|<<<NumActiveSources>>> | Current number of active metrics sources
+*-------------------------------------+--------------------------------------+
+|<<<NumAllSources>>> | Total number of metrics sources
+*-------------------------------------+--------------------------------------+
+|<<<NumActiveSinks>>> | Current number of active sinks
+*-------------------------------------+--------------------------------------+
+|<<<NumAllSinks>>> | Total number of sinks \
+                   | (BUT usually less than <<<NumActiveSinks>>>,
+                   | see {{{https://issues.apache.org/jira/browse/HADOOP-9946}HADOOP-9946}})
+*-------------------------------------+--------------------------------------+
+|<<<SnapshotNumOps>>> | Total number of operations to snapshot statistics from
+                      | a metrics source
+*-------------------------------------+--------------------------------------+
+|<<<SnapshotAvgTime>>> | Average time in milliseconds to snapshot statistics
+                       | from a metrics source
+*-------------------------------------+--------------------------------------+
+|<<<PublishNumOps>>> | Total number of operations to publish statistics to a
+                     | sink
+*-------------------------------------+--------------------------------------+
+|<<<PublishAvgTime>>> | Average time in milliseconds to publish statistics to
+                      | a sink
+*-------------------------------------+--------------------------------------+
+|<<<DroppedPubAll>>> | Total number of dropped publishes
+*-------------------------------------+--------------------------------------+
+|<<<Sink_>>><instance><<<NumOps>>> | Total number of sink operations for the
+                                   | <instance>
+*-------------------------------------+--------------------------------------+
+|<<<Sink_>>><instance><<<AvgTime>>> | Average time in milliseconds of sink
+                                    | operations for the <instance>
+*-------------------------------------+--------------------------------------+
+|<<<Sink_>>><instance><<<Dropped>>> | Total number of dropped sink operations
+                                    | for the <instance>
+*-------------------------------------+--------------------------------------+
+|<<<Sink_>>><instance><<<Qsize>>> | Current queue length of sink operations \
+                                  | (BUT always set to 0 because nothing to
+                                  | increment this metrics, see
+                                  | {{{https://issues.apache.org/jira/browse/HADOOP-9941}HADOOP-9941}})
+*-------------------------------------+--------------------------------------+
+
+default context
+
+* StartupProgress
+
+  StartupProgress metrics shows the statistics of NameNode startup.
+  Four metrics are exposed for each startup phase based on its name.
+  The startup <phase>s are <<<LoadingFsImage>>>, <<<LoadingEdits>>>,
+  <<<SavingCheckpoint>>>, and <<<SafeMode>>>.
+  Each metrics record contains Hostname tag as additional information
+  along with metrics.
+
+*-------------------------------------+--------------------------------------+
+|| Name                               || Description
+*-------------------------------------+--------------------------------------+
+|<<<ElapsedTime>>> | Total elapsed time in milliseconds
+*-------------------------------------+--------------------------------------+
+|<<<PercentComplete>>> | Current rate completed in NameNode startup progress \
+                       | (The max value is not 100 but 1.0)
+*-------------------------------------+--------------------------------------+
+|<phase><<<Count>>> | Total number of steps completed in the phase
+*-------------------------------------+--------------------------------------+
+|<phase><<<ElapsedTime>>> | Total elapsed time in the phase in milliseconds
+*-------------------------------------+--------------------------------------+
+|<phase><<<Total>>> | Total number of steps in the phase
+*-------------------------------------+--------------------------------------+
+|<phase><<<PercentComplete>>> | Current rate completed in the phase \
+                              | (The max value is not 100 but 1.0)
+*-------------------------------------+--------------------------------------+

+ 65 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestWritable.java

@@ -23,6 +23,7 @@ import java.io.DataOutput;
 import java.io.IOException;
 import java.util.Random;
 
+import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.ReflectionUtils;
 
@@ -30,6 +31,11 @@ import junit.framework.TestCase;
 
 /** Unit tests for Writable. */
 public class TestWritable extends TestCase {
+private static final String TEST_CONFIG_PARAM = "frob.test";
+private static final String TEST_CONFIG_VALUE = "test";
+private static final String TEST_WRITABLE_CONFIG_PARAM = "test.writable";
+private static final String TEST_WRITABLE_CONFIG_VALUE = TEST_CONFIG_VALUE;
+
   public TestWritable(String name) { super(name); }
 
   /** Example class used in test cases below. */
@@ -64,6 +70,25 @@ public class TestWritable extends TestCase {
     }
   }
 
+  public static class SimpleWritableComparable extends SimpleWritable
+      implements WritableComparable<SimpleWritableComparable>, Configurable {
+    private Configuration conf;
+
+    public SimpleWritableComparable() {}
+
+    public void setConf(Configuration conf) {
+      this.conf = conf;
+    }
+
+    public Configuration getConf() {
+      return this.conf;
+    }
+
+    public int compareTo(SimpleWritableComparable o) {
+      return this.state - o.state;
+    }
+  }
+
   /** Test 1: Check that SimpleWritable. */
   public void testSimpleWritable() throws Exception {
     testWritable(new SimpleWritable());
@@ -121,9 +146,34 @@ public class TestWritable extends TestCase {
     @Override public int compareTo(Frob o) { return 0; }
   }
 
-  /** Test that comparator is defined. */
+  /** Test that comparator is defined and configured. */
   public static void testGetComparator() throws Exception {
-    assert(WritableComparator.get(Frob.class) instanceof FrobComparator);
+    Configuration conf = new Configuration();
+
+    // Without conf.
+    WritableComparator frobComparator = WritableComparator.get(Frob.class);
+    assert(frobComparator instanceof FrobComparator);
+    assertNotNull(frobComparator.getConf());
+    assertNull(frobComparator.getConf().get(TEST_CONFIG_PARAM));
+
+    // With conf.
+    conf.set(TEST_CONFIG_PARAM, TEST_CONFIG_VALUE);
+    frobComparator = WritableComparator.get(Frob.class, conf);
+    assert(frobComparator instanceof FrobComparator);
+    assertNotNull(frobComparator.getConf());
+    assertEquals(conf.get(TEST_CONFIG_PARAM), TEST_CONFIG_VALUE);
+
+    // Without conf. should reuse configuration.
+    frobComparator = WritableComparator.get(Frob.class);
+    assert(frobComparator instanceof FrobComparator);
+    assertNotNull(frobComparator.getConf());
+    assertEquals(conf.get(TEST_CONFIG_PARAM), TEST_CONFIG_VALUE);
+
+    // New conf. should use new configuration.
+    frobComparator = WritableComparator.get(Frob.class, new Configuration());
+    assert(frobComparator instanceof FrobComparator);
+    assertNotNull(frobComparator.getConf());
+    assertNull(frobComparator.getConf().get(TEST_CONFIG_PARAM));
   }
 
   /**
@@ -153,4 +203,17 @@ public class TestWritable extends TestCase {
         .compare(writable1, writable3) == 0);
   }
 
+  /**
+   * Test that Writable's are configured by Comparator.
+   */
+  public void testConfigurableWritableComparator() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set(TEST_WRITABLE_CONFIG_PARAM, TEST_WRITABLE_CONFIG_VALUE);
+
+    WritableComparator wc = WritableComparator.get(SimpleWritableComparable.class, conf);
+    SimpleWritableComparable key = ((SimpleWritableComparable)wc.newKey());
+    assertNotNull(wc.getConf());
+    assertNotNull(key.getConf());
+    assertEquals(key.getConf().get(TEST_WRITABLE_CONFIG_PARAM), TEST_WRITABLE_CONFIG_VALUE);
+  }
 }

+ 14 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestUserGroupInformation.java

@@ -20,6 +20,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.token.Token;
@@ -31,6 +32,7 @@ import javax.security.auth.Subject;
 import javax.security.auth.kerberos.KerberosPrincipal;
 import javax.security.auth.login.AppConfigurationEntry;
 import javax.security.auth.login.LoginContext;
+
 import java.io.BufferedReader;
 import java.io.IOException;
 import java.io.InputStreamReader;
@@ -151,6 +153,18 @@ public class TestUserGroupInformation {
     assertEquals(AuthenticationMethod.PROXY, ugi.getAuthenticationMethod());
     assertEquals(AuthenticationMethod.SIMPLE, ugi.getRealAuthenticationMethod());
   }
+  
+  @Test (timeout = 30000)
+  public void testCreateRemoteUser() {
+    UserGroupInformation ugi = UserGroupInformation.createRemoteUser("user1");
+    assertEquals(AuthenticationMethod.SIMPLE, ugi.getAuthenticationMethod());
+    assertTrue (ugi.toString().contains("(auth:SIMPLE)"));
+    ugi = UserGroupInformation.createRemoteUser("user1", 
+        AuthMethod.KERBEROS);
+    assertEquals(AuthenticationMethod.KERBEROS, ugi.getAuthenticationMethod());
+    assertTrue (ugi.toString().contains("(auth:KERBEROS)"));
+  }
+  
   /** Test login method */
   @Test (timeout = 30000)
   public void testLogin() throws Exception {

+ 221 - 125
hadoop-common-project/hadoop-common/src/test/resources/testConf.xml

@@ -54,47 +54,55 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-ls \[-d\] \[-h\] \[-R\] \[&lt;path&gt; \.\.\.\]:( |\t)*List the contents that match the specified file pattern. If( )*</expected-output>
+          <expected-output>^-ls \[-d\] \[-h\] \[-R\] \[&lt;path&gt; \.\.\.\] :( |\t)*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*path is not specified, the contents of /user/&lt;currentUser&gt;( )*</expected-output>
+          <expected-output>^\s*List the contents that match the specified file pattern. If path is not</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*path is not specified, the contents of /user/&lt;currentUser&gt;( )*</expected-output>
+          <expected-output>^\s*specified, the contents of /user/&lt;currentUser&gt; will be listed. Directory entries( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*will be listed. Directory entries are of the form( )*</expected-output>
+          <expected-output>^\s*are of the form:( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*permissions - userid groupid size_of_directory\(in bytes\) modification_date\(yyyy-MM-dd HH:mm\) directoryName( )*</expected-output>
+          <expected-output>^\s*permissions - userId groupId sizeOfDirectory\(in bytes\)( )*</expected-output>
         </comparator>
-         <comparator>
+        <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*and file entries are of the form( )*</expected-output>
+          <expected-output>^\s*modificationDate\(yyyy-MM-dd HH:mm\) directoryName( )*</expected-output>
         </comparator>
-          <comparator>
+        <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*permissions number_of_replicas userid groupid size_of_file\(in bytes\) modification_date\(yyyy-MM-dd HH:mm\) fileName( )*</expected-output>
+          <expected-output>^\s*and file entries are of the form:( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*-d\s+Directories are listed as plain files\.</expected-output>
+          <expected-output>^\s*permissions numberOfReplicas userId groupId sizeOfFile\(in bytes\)( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*-h\s+Formats the sizes of files in a human-readable fashion( )*</expected-output>
+          <expected-output>^\s*modificationDate\(yyyy-MM-dd HH:mm\) fileName( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*rather than a number of bytes\.( )*</expected-output>
+          <expected-output>^\s*-d\s+Directories are listed as plain files\.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*-R\s+Recursively list the contents of directories\.</expected-output>
+          <expected-output>^\s*-h\s+Formats the sizes of files in a human-readable fashion rather than a number( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*of bytes\.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*-R\s+Recursively list the contents of directories\.( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -109,7 +117,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-lsr:\s+\(DEPRECATED\) Same as 'ls -R'</expected-output>
+          <expected-output>^-lsr :\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s+\(DEPRECATED\) Same as 'ls -R'</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -125,23 +137,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-get( )*\[-p\]( )*\[-ignoreCrc\]( )*\[-crc\]( )*&lt;src&gt; \.\.\. &lt;localdst&gt;:( |\t)*Copy files that match the file pattern &lt;src&gt;( )*</expected-output>
+          <expected-output>^-get( )*\[-p\]( )*\[-ignoreCrc\]( )*\[-crc\]( )*&lt;src&gt; \.\.\. &lt;localdst&gt; :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*to the local name.( )*&lt;src&gt; is kept.( )*When copying multiple,( )*</expected-output>
+          <expected-output>\s*Copy files that match the file pattern &lt;src&gt; to the local name.  &lt;src&gt; is kept.\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*files, the destination must be a directory.( )*Passing( )*</expected-output>
+          <expected-output>\s*When copying multiple files, the destination must be a directory. Passing -p\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*-p preserves access and modification times,( )*</expected-output>
-        </comparator>
-        <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^( |\t)*ownership and the mode.( )*</expected-output>
+          <expected-output>^( |\t)*preserves access and modification times, ownership and the mode.*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -156,35 +164,39 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-du \[-s\] \[-h\] &lt;path&gt; \.\.\.:\s+Show the amount of space, in bytes, used by the files that\s*</expected-output>
+          <expected-output>^-du \[-s\] \[-h\] &lt;path&gt; \.\.\. :\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*Show the amount of space, in bytes, used by the files that match the specified\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s*match the specified file pattern. The following flags are optional:</expected-output>
+          <expected-output>^\s*file pattern. The following flags are optional:\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s*-s\s*Rather than showing the size of each individual file that</expected-output>
+          <expected-output>^\s*-s\s*Rather than showing the size of each individual file that matches the\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s*matches the pattern, shows the total \(summary\) size.</expected-output>
+          <expected-output>^\s*pattern, shows the total \(summary\) size.\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s*-h\s*Formats the sizes of files in a human-readable fashion</expected-output>
+          <expected-output>^\s*-h\s*Formats the sizes of files in a human-readable fashion rather than a number\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>\s*rather than a number of bytes.</expected-output>
+          <expected-output>\s*of bytes.\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s*Note that, even without the -s option, this only shows size summaries</expected-output>
+          <expected-output>^\s*Note that, even without the -s option, this only shows size summaries one level\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s*one level deep into a directory.</expected-output>
+          <expected-output>^\s*deep into a directory.</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
@@ -207,7 +219,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-dus:\s+\(DEPRECATED\) Same as 'du -s'</expected-output>
+          <expected-output>^-dus :</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*\(DEPRECATED\) Same as 'du -s'</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -222,7 +238,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-count \[-q\] &lt;path&gt; \.\.\.:( |\t)*Count the number of directories, files and bytes under the paths( )*</expected-output>
+          <expected-output>^-count \[-q\] &lt;path&gt; \.\.\. :\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*Count the number of directories, files and bytes under the paths( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
@@ -253,15 +273,15 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-mv &lt;src&gt; \.\.\. &lt;dst&gt;:( |\t)*Move files that match the specified file pattern &lt;src&gt;( )*</expected-output>
+          <expected-output>^-mv &lt;src&gt; \.\.\. &lt;dst&gt; :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*to a destination &lt;dst&gt;.  When moving multiple files, the( )*</expected-output>
+          <expected-output>\s*Move files that match the specified file pattern &lt;src&gt; to a destination &lt;dst&gt;.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*destination must be a directory.( )*</expected-output>
+          <expected-output>^( |\t)*When moving multiple files, the destination must be a directory.( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -276,23 +296,31 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-cp \[-f\] \[-p\] &lt;src&gt; \.\.\. &lt;dst&gt;:( |\t)*Copy files that match the file pattern &lt;src&gt; to a( )*</expected-output>
+          <expected-output>^-cp \[-f\] \[-p \| -p\[topax\]\] &lt;src&gt; \.\.\. &lt;dst&gt; :\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*Copy files that match the file pattern &lt;src&gt; to a destination.  When copying( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*destination.  When copying multiple files, the destination( )*</expected-output>
+          <expected-output>^( |\t)*multiple files, the destination must be a directory.( )*Passing -p preserves status( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*must be a directory.( )*Passing -p preserves access and( )*</expected-output>
+          <expected-output>^( |\t)*\[topax\] \(timestamps, ownership, permission, ACLs, XAttr\). If -p is specified( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*modification times, ownership and the mode. Passing -f( )*</expected-output>
+          <expected-output>^( |\t)*with no &lt;arg&gt;, then preserves timestamps, ownership, permission. If -pa is( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*overwrites the destination if it already exists.( )*</expected-output>
+          <expected-output>^( |\t)*specified, then preserves permission also because ACL is a super-set of( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*permission. Passing -f overwrites the destination if it already exists.( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -307,31 +335,31 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rm \[-f\] \[-r\|-R\] \[-skipTrash\] &lt;src&gt; \.\.\.:( |\t)*Delete all files that match the specified file pattern.( )*</expected-output>
+          <expected-output>^-rm \[-f\] \[-r\|-R\] \[-skipTrash\] &lt;src&gt; \.\.\. :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*Equivalent to the Unix command "rm &lt;src&gt;"( )*</expected-output>
+          <expected-output>^\s*Delete all files that match the specified file pattern. Equivalent to the Unix( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*-skipTrash option bypasses trash, if enabled, and immediately( )*</expected-output>
+          <expected-output>^\s*command "rm &lt;src&gt;"( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*deletes &lt;src&gt;( )*</expected-output>
+          <expected-output>^\s*-skipTrash\s+option bypasses trash, if enabled, and immediately deletes &lt;src&gt;( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s+-f\s+If the file does not exist, do not display a diagnostic</expected-output>
+          <expected-output>^\s+-f\s+If the file does not exist, do not display a diagnostic message or\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s+message or modify the exit status to reflect an error\.</expected-output>
+          <expected-output>^\s+modify the exit status to reflect an error\.\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^\s+-\[rR\]\s+Recursively deletes directories</expected-output>
+          <expected-output>^\s+-\[rR\]\s+Recursively deletes directories\s*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -346,11 +374,15 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rmdir \[--ignore-fail-on-non-empty\] &lt;dir&gt; \.\.\.:\s+Removes the directory entry specified by each directory argument,</expected-output>
+          <expected-output>^-rmdir \[--ignore-fail-on-non-empty\] &lt;dir&gt; \.\.\. :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>\s+provided it is empty.</expected-output>
+          <expected-output>\s+Removes the directory entry specified by each directory argument, provided it is\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>\s+empty\.\s*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -365,7 +397,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-rmr:\s+\(DEPRECATED\) Same as 'rm -r'</expected-output>
+          <expected-output>^-rmr :\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*\(DEPRECATED\) Same as 'rm -r'\s*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -380,27 +416,23 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-put \[-f\] \[-p\] &lt;localsrc&gt; \.\.\. &lt;dst&gt;:\s+Copy files from the local file system</expected-output>
+          <expected-output>^-put \[-f\] \[-p\] &lt;localsrc&gt; \.\.\. &lt;dst&gt; :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*into fs.( )*Copying fails if the file already( )*</expected-output>
+          <expected-output>^\s*Copy files from the local file system into fs.( )*Copying fails if the file already( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*exists, unless the -f flag is given.( )*Passing( )*</expected-output>
+          <expected-output>^\s*exists, unless the -f flag is given.( )*Passing -p preserves access and( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*-p preserves access and modification times,( )*</expected-output>
+          <expected-output>^\s*modification times, ownership and the mode. Passing -f overwrites the( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*ownership and the mode. Passing -f overwrites( )*</expected-output>
-        </comparator>
-        <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^( |\t)*the destination if it already exists.( )*</expected-output>
+          <expected-output>^( |\t)*destination if it already exists.( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -415,7 +447,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-copyFromLocal \[-f\] \[-p\] &lt;localsrc&gt; \.\.\. &lt;dst&gt;:\s+Identical to the -put command\.</expected-output>
+          <expected-output>^-copyFromLocal \[-f\] \[-p\] &lt;localsrc&gt; \.\.\. &lt;dst&gt; :\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*Identical to the -put command\.\s*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -430,11 +466,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-moveFromLocal &lt;localsrc&gt; \.\.\. &lt;dst&gt;:\s+Same as -put, except that the source is</expected-output>
+          <expected-output>^-moveFromLocal &lt;localsrc&gt; \.\.\. &lt;dst&gt; :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*deleted after it's copied.</expected-output>
+          <expected-output>^( |\t)*Same as -put, except that the source is deleted after it's copied.</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -450,23 +486,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-get( )*\[-p\]( )*\[-ignoreCrc\]( )*\[-crc\]( )*&lt;src&gt; \.\.\. &lt;localdst&gt;:( |\t)*Copy files that match the file pattern &lt;src&gt;( )*</expected-output>
-        </comparator>
-        <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^( |\t)*to the local name.( )*&lt;src&gt; is kept.( )*When copying multiple,( )*</expected-output>
+          <expected-output>^-get( )*\[-p\]( )*\[-ignoreCrc\]( )*\[-crc\]( )*&lt;src&gt; \.\.\. &lt;localdst&gt; :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*files, the destination must be a directory.( )*Passing( )*</expected-output>
+          <expected-output>^( |\t)*Copy files that match the file pattern &lt;src&gt; to the local name.( )*&lt;src&gt; is kept.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*-p preserves access and modification times,( )*</expected-output>
+          <expected-output>^( |\t)*When copying multiple files, the destination must be a directory. Passing -p( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*ownership and the mode.( )*</expected-output>
+          <expected-output>^( |\t)*preserves access and modification times, ownership and the mode.( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -481,19 +513,19 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-getmerge \[-nl\] &lt;src&gt; &lt;localdst&gt;:( |\t)*Get all the files in the directories that( )*</expected-output>
+          <expected-output>^-getmerge \[-nl\] &lt;src&gt; &lt;localdst&gt; :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*match the source file pattern and merge and sort them to only( )*</expected-output>
+          <expected-output>^( |\t)*Get all the files in the directories that match the source file pattern and( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*one file on local fs. &lt;src&gt; is kept.( )*</expected-output>
+          <expected-output>^( |\t)*merge and sort them to only one file on local fs. &lt;src&gt; is kept.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*-nl   Add a newline character at the end of each file.( )*</expected-output>
+          <expected-output>^( |\t)*-nl\s+Add a newline character at the end of each file.( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -509,11 +541,15 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-cat \[-ignoreCrc\] &lt;src&gt; \.\.\.:( |\t)*Fetch all files that match the file pattern &lt;src&gt;( )*</expected-output>
+          <expected-output>^-cat \[-ignoreCrc\] &lt;src&gt; \.\.\. :\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*Fetch all files that match the file pattern &lt;src&gt; and display their content on\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*and display their content on stdout.</expected-output>
+          <expected-output>^\s*stdout.</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -529,7 +565,27 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-checksum &lt;src&gt; \.\.\.:( |\t)*Dump checksum information for files.*</expected-output>
+          <expected-output>^-checksum &lt;src&gt; \.\.\. :\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*Dump checksum information for files that match the file pattern &lt;src&gt; to stdout\.\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*Note that this requires a round-trip to a datanode storing each block of the\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*file, and thus is not efficient to run on a large number of files\. The checksum\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*of a file depends on its content, block size and the checksum algorithm and\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*parameters used for creating the file\.\s*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -544,7 +600,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-copyToLocal \[-p\] \[-ignoreCrc\] \[-crc\] &lt;src&gt; \.\.\. &lt;localdst&gt;:\s+Identical to the -get command.</expected-output>
+          <expected-output>^-copyToLocal \[-p\] \[-ignoreCrc\] \[-crc\] &lt;src&gt; \.\.\. &lt;localdst&gt; :\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*Identical to the -get command.\s*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -559,7 +619,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-moveToLocal &lt;src&gt; &lt;localdst&gt;:\s+Not implemented yet</expected-output>
+          <expected-output>^-moveToLocal &lt;src&gt; &lt;localdst&gt; :\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*Not implemented yet</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -574,7 +638,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-mkdir \[-p\] &lt;path&gt; \.\.\.:( |\t)*Create a directory in specified location.( )*</expected-output>
+          <expected-output>^-mkdir \[-p\] &lt;path&gt; \.\.\. :\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*Create a directory in specified location.( )*</expected-output>
         </comparator>
         <comparator>
           <type>TokenComparator</type>
@@ -593,27 +661,31 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-setrep \[-R\] \[-w\] &lt;rep&gt; &lt;path&gt; \.\.\.:( |\t)*Set the replication level of a file. If &lt;path&gt; is a directory( )*</expected-output>
+          <expected-output>^-setrep \[-R\] \[-w\] &lt;rep&gt; &lt;path&gt; \.\.\. :\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*Set the replication level of a file. If &lt;path&gt; is a directory then the command( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*then the command recursively changes the replication factor of( )*</expected-output>
+          <expected-output>^\s*recursively changes the replication factor of all files under the directory tree( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*all files under the directory tree rooted at &lt;path&gt;\.( )*</expected-output>
+          <expected-output>^\s*rooted at &lt;path&gt;\.( )*</expected-output>
         </comparator>
         <comparator>
             <type>RegexpComparator</type>
-            <expected-output>^( |\t)*The -w flag requests that the command wait for the replication( )*</expected-output>
+            <expected-output>^\s*-w\s+It requests that the command waits for the replication to complete\. This( )*</expected-output>
         </comparator>
         <comparator>
             <type>RegexpComparator</type>
-            <expected-output>^( |\t)*to complete. This can potentially take a very long time\.( )*</expected-output>
+            <expected-output>^( |\t)*can potentially take a very long time\.( )*</expected-output>
         </comparator>
           <comparator>
               <type>RegexpComparator</type>
-              <expected-output>^( |\t)*The -R flag is accepted for backwards compatibility\. It has no effect\.( )*</expected-output>
+              <expected-output>^( |\t)*-R\s+It is accepted for backwards compatibility\. It has no effect\.( )*</expected-output>
           </comparator>
       </comparators>
     </test>
@@ -628,15 +700,15 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-touchz &lt;path&gt; \.\.\.:( |\t)*Creates a file of zero length( )*</expected-output>
+          <expected-output>^-touchz &lt;path&gt; \.\.\. :( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*at &lt;path&gt; with current time as the timestamp of that &lt;path&gt;.( )*</expected-output>
+          <expected-output>^( |\t)*Creates a file of zero length at &lt;path&gt; with current time as the timestamp of( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*An error is returned if the file exists with non-zero length( )*</expected-output>
+          <expected-output>^( |\t)* that &lt;path&gt;\. An error is returned if the file exists with non-zero length( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -651,11 +723,15 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-test -\[defsz\] &lt;path&gt;:\sAnswer various questions about &lt;path&gt;, with result via exit status.</expected-output>
+          <expected-output>^-test -\[defsz\] &lt;path&gt; :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*else, return 1.( )*</expected-output>
+          <expected-output>^\s*Answer various questions about &lt;path&gt;, with result via exit status.</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*-[defsz]\s+return 0 if .*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -670,15 +746,23 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-stat \[format\] &lt;path&gt; \.\.\.:( |\t)*Print statistics about the file/directory at &lt;path&gt;( )*</expected-output>
+          <expected-output>^-stat \[format\] &lt;path&gt; \.\.\. :\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*Print statistics about the file/directory at &lt;path&gt; in the specified format.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*in the specified format. Format accepts filesize in blocks \(%b\), group name of owner\(%g\),( )*</expected-output>
+          <expected-output>^( |\t)*Format accepts filesize in blocks \(%b\), group name of owner\(%g\), filename \(%n\),( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*filename \(%n\), block size \(%o\), replication \(%r\), user name of owner\(%u\), modification date \(%y, %Y\)( )*</expected-output>
+          <expected-output>^( |\t)*block size \(%o\), replication \(%r\), user name of owner\(%u\), modification date( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*\(%y, %Y\)( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -693,11 +777,15 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-tail \[-f\] &lt;file&gt;:( |\t)+Show the last 1KB of the file.( )*</expected-output>
+          <expected-output>^-tail \[-f\] &lt;file&gt; :\s*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^\s*Show the last 1KB of the file.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*The -f option shows appended data as the file grows.( )*</expected-output>
+          <expected-output>^( |\t)*-f\s+Shows appended data as the file grows.( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -712,47 +800,55 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-chmod \[-R\] &lt;MODE\[,MODE\]... \| OCTALMODE&gt; PATH...:( |\t)*Changes permissions of a file.( )*</expected-output>
+          <expected-output>^-chmod \[-R\] &lt;MODE\[,MODE\]... \| OCTALMODE&gt; PATH... :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*This works similar to shell's chmod with a few exceptions.( )*</expected-output>
+          <expected-output>^( |\t)*Changes permissions of a file. This works similar to the shell's chmod command( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*-R( |\t)*modifies the files recursively. This is the only option( )*</expected-output>
+          <expected-output>^( |\t)*with a few exceptions.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*currently supported.( )*</expected-output>
+          <expected-output>^( |\t)*-R\s*modifies the files recursively. This is the only option currently( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*MODE( |\t)*Mode is same as mode used for chmod shell command.( )*</expected-output>
+          <expected-output>^( |\t)*supported.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*Only letters recognized are 'rwxXt'. E.g. \+t,a\+r,g-w,\+rwx,o=r( )*</expected-output>
+          <expected-output>^( |\t)*&lt;MODE&gt;\s*Mode is the same as mode used for the shell's command. The only( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*OCTALMODE Mode specifed in 3 or 4 digits. If 4 digits, the first may( )*</expected-output>
+          <expected-output>^( |\t)*letters recognized are 'rwxXt', e\.g\. \+t,a\+r,g-w,\+rwx,o=r\.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*be 1 or 0 to turn the sticky bit on or off, respectively.( )*Unlike( |\t)*shell command, it is not possible to specify only part of the mode( )*</expected-output>
+          <expected-output>^( |\t)*&lt;OCTALMODE&gt;\s+Mode specifed in 3 or 4 digits. If 4 digits, the first may be 1 or( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*E.g. 754 is same as u=rwx,g=rx,o=r( )*</expected-output>
+          <expected-output>^( |\t)*0 to turn the sticky bit on or off, respectively.( )*Unlike( |\t)*the( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*If none of 'augo' is specified, 'a' is assumed and unlike( )*</expected-output>
+          <expected-output>^( |\t)*shell command, it is not possible to specify only part of the( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*shell command, no umask is applied.( )*</expected-output>
+          <expected-output>^( |\t)*mode, e\.g\. 754 is same as u=rwx,g=rx,o=r\.( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*If none of 'augo' is specified, 'a' is assumed and unlike the shell command, no( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*umask is applied.( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -767,51 +863,47 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-chown \[-R\] \[OWNER\]\[:\[GROUP\]\] PATH...:( |\t)*Changes owner and group of a file.( )*</expected-output>
+          <expected-output>^-chown \[-R\] \[OWNER\]\[:\[GROUP\]\] PATH... :\s*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*This is similar to shell's chown with a few exceptions.( )*</expected-output>
+          <expected-output>^\s*Changes owner and group of a file\. This is similar to the shell's chown command( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*-R( |\t)*modifies the files recursively. This is the only option( )*</expected-output>
+          <expected-output>^( |\t)*with a few exceptions.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*currently supported.( )*</expected-output>
+          <expected-output>^( |\t)*-R( |\t)*modifies the files recursively. This is the only option currently( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*If only owner or group is specified then only owner or( )*</expected-output>
+          <expected-output>^( |\t)*supported.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*group is modified.( )*</expected-output>
+          <expected-output>^( |\t)*If only the owner or group is specified, then only the owner or group is( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*The owner and group names may only consist of digits, alphabet,( )*</expected-output>
+          <expected-output>^( |\t)*modified. The owner and group names may only consist of digits, alphabet, and( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*and any of .+?. The names are case sensitive.( )*</expected-output>
+          <expected-output>^( |\t)*any of .+?. The names are case sensitive.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*WARNING: Avoid using '.' to separate user name and group though( )*</expected-output>
+          <expected-output>^( |\t)*WARNING: Avoid using '.' to separate user name and group though Linux allows it.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*Linux allows it. If user names have dots in them and you are( )*</expected-output>
+          <expected-output>^( |\t)*If user names have dots in them and you are using local file system, you might( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*using local file system, you might see surprising results since( )*</expected-output>
-        </comparator>
-        <comparator>
-          <type>RegexpComparator</type>
-          <expected-output>^( |\t)*shell command 'chown' is used for local files.( )*</expected-output>
+          <expected-output>^( |\t)*see surprising results since the shell command 'chown' is used for local files.( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -826,7 +918,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-chgrp \[-R\] GROUP PATH...:( |\t)*This is equivalent to -chown ... :GROUP ...( )*</expected-output>
+          <expected-output>^-chgrp \[-R\] GROUP PATH... :( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*This is equivalent to -chown ... :GROUP ...( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -841,11 +937,11 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-help \[cmd ...\]:( |\t)*Displays help for given command or all commands if none( )*</expected-output>
+          <expected-output>^-help \[cmd ...\] :( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*is specified.( )*</expected-output>
+          <expected-output>^( |\t)*Displays help for given command or all commands if none is specified.( )*</expected-output>
         </comparator>
       </comparators>
     </test>

+ 128 - 1
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java

@@ -31,6 +31,8 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PositionedReadable;
 import org.apache.hadoop.fs.Seekable;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.net.NetUtils;
@@ -86,6 +88,7 @@ public class HttpFSFileSystem extends FileSystem
   public static final String REPLICATION_PARAM = "replication";
   public static final String BLOCKSIZE_PARAM = "blocksize";
   public static final String PERMISSION_PARAM = "permission";
+  public static final String ACLSPEC_PARAM = "aclspec";
   public static final String DESTINATION_PARAM = "destination";
   public static final String RECURSIVE_PARAM = "recursive";
   public static final String SOURCES_PARAM = "sources";
@@ -95,6 +98,7 @@ public class HttpFSFileSystem extends FileSystem
   public static final String ACCESS_TIME_PARAM = "accesstime";
 
   public static final Short DEFAULT_PERMISSION = 0755;
+  public static final String ACLSPEC_DEFAULT = "";
 
   public static final String RENAME_JSON = "boolean";
 
@@ -152,6 +156,11 @@ public class HttpFSFileSystem extends FileSystem
   public static final String CONTENT_SUMMARY_SPACE_CONSUMED_JSON = "spaceConsumed";
   public static final String CONTENT_SUMMARY_SPACE_QUOTA_JSON = "spaceQuota";
 
+  public static final String ACL_STATUS_JSON = "AclStatus";
+  public static final String ACL_STICKY_BIT_JSON = "stickyBit";
+  public static final String ACL_ENTRIES_JSON = "entries";
+  public static final String ACL_BIT_JSON = "aclBit";
+
   public static final String ERROR_JSON = "RemoteException";
   public static final String ERROR_EXCEPTION_JSON = "exception";
   public static final String ERROR_CLASSNAME_JSON = "javaClassName";
@@ -169,10 +178,12 @@ public class HttpFSFileSystem extends FileSystem
     OPEN(HTTP_GET), GETFILESTATUS(HTTP_GET), LISTSTATUS(HTTP_GET),
     GETHOMEDIRECTORY(HTTP_GET), GETCONTENTSUMMARY(HTTP_GET),
     GETFILECHECKSUM(HTTP_GET),  GETFILEBLOCKLOCATIONS(HTTP_GET),
-    INSTRUMENTATION(HTTP_GET),
+    INSTRUMENTATION(HTTP_GET), GETACLSTATUS(HTTP_GET),
     APPEND(HTTP_POST), CONCAT(HTTP_POST),
     CREATE(HTTP_PUT), MKDIRS(HTTP_PUT), RENAME(HTTP_PUT), SETOWNER(HTTP_PUT),
     SETPERMISSION(HTTP_PUT), SETREPLICATION(HTTP_PUT), SETTIMES(HTTP_PUT),
+    MODIFYACLENTRIES(HTTP_PUT), REMOVEACLENTRIES(HTTP_PUT),
+    REMOVEDEFAULTACL(HTTP_PUT), REMOVEACL(HTTP_PUT), SETACL(HTTP_PUT),
     DELETE(HTTP_DELETE);
 
     private String httpMethod;
@@ -798,6 +809,105 @@ public class HttpFSFileSystem extends FileSystem
     return (Boolean) json.get(SET_REPLICATION_JSON);
   }
 
+  /**
+   * Modify the ACL entries for a file.
+   *
+   * @param path Path to modify
+   * @param aclSpec List<AclEntry> describing modifications
+   * @throws IOException
+   */
+  @Override
+  public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
+          throws IOException {
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(OP_PARAM, Operation.MODIFYACLENTRIES.toString());
+    params.put(ACLSPEC_PARAM, AclEntry.aclSpecToString(aclSpec));
+    HttpURLConnection conn = getConnection(
+            Operation.MODIFYACLENTRIES.getMethod(), params, path, true);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+  }
+
+  /**
+   * Remove the specified ACL entries from a file
+   * @param path Path to modify
+   * @param aclSpec List<AclEntry> describing entries to remove
+   * @throws IOException
+   */
+  @Override
+  public void removeAclEntries(Path path, List<AclEntry> aclSpec)
+          throws IOException {
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(OP_PARAM, Operation.REMOVEACLENTRIES.toString());
+    params.put(ACLSPEC_PARAM, AclEntry.aclSpecToString(aclSpec));
+    HttpURLConnection conn = getConnection(
+            Operation.REMOVEACLENTRIES.getMethod(), params, path, true);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+  }
+
+  /**
+   * Removes the default ACL for the given file
+   * @param path Path from which to remove the default ACL.
+   * @throws IOException
+   */
+  @Override
+  public void removeDefaultAcl(Path path) throws IOException {
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(OP_PARAM, Operation.REMOVEDEFAULTACL.toString());
+    HttpURLConnection conn = getConnection(
+            Operation.REMOVEDEFAULTACL.getMethod(), params, path, true);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+  }
+
+  /**
+   * Remove all ACLs from a file
+   * @param path Path from which to remove all ACLs
+   * @throws IOException
+   */
+  @Override
+  public void removeAcl(Path path) throws IOException {
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(OP_PARAM, Operation.REMOVEACL.toString());
+    HttpURLConnection conn = getConnection(Operation.REMOVEACL.getMethod(),
+            params, path, true);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+  }
+
+  /**
+   * Set the ACLs for the given file
+   * @param path Path to modify
+   * @param aclSpec List<AclEntry> describing modifications, must include
+   *                entries for user, group, and others for compatibility
+   *                with permission bits.
+   * @throws IOException
+   */
+  @Override
+  public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(OP_PARAM, Operation.SETACL.toString());
+    params.put(ACLSPEC_PARAM, AclEntry.aclSpecToString(aclSpec));
+    HttpURLConnection conn = getConnection(Operation.SETACL.getMethod(),
+                                           params, path, true);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+  }
+
+  /**
+   * Get the ACL information for a given file
+   * @param path Path to acquire ACL info for
+   * @return the ACL information in JSON format
+   * @throws IOException
+   */
+  @Override
+  public AclStatus getAclStatus(Path path) throws IOException {
+    Map<String, String> params = new HashMap<String, String>();
+    params.put(OP_PARAM, Operation.GETACLSTATUS.toString());
+    HttpURLConnection conn = getConnection(Operation.GETACLSTATUS.getMethod(),
+            params, path, true);
+    HttpFSUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
+    JSONObject json = (JSONObject) HttpFSUtils.jsonParse(conn);
+    json = (JSONObject) json.get(ACL_STATUS_JSON);
+    return createAclStatus(json);
+  }
+
   private FileStatus createFileStatus(Path parent, JSONObject json) {
     String pathSuffix = (String) json.get(PATH_SUFFIX_JSON);
     Path path = (pathSuffix.equals("")) ? parent : new Path(parent, pathSuffix);
@@ -830,6 +940,23 @@ public class HttpFSFileSystem extends FileSystem
     return fileStatus;
   }
 
+  /**
+   * Convert the given JSON object into an AclStatus
+   * @param json Input JSON representing the ACLs
+   * @return Resulting AclStatus
+   */
+  private AclStatus createAclStatus(JSONObject json) {
+    AclStatus.Builder aclStatusBuilder = new AclStatus.Builder()
+            .owner((String) json.get(OWNER_JSON))
+            .group((String) json.get(GROUP_JSON))
+            .stickyBit((Boolean) json.get(ACL_STICKY_BIT_JSON));
+    JSONArray entries = (JSONArray) json.get(ACL_ENTRIES_JSON);
+    for ( Object e : entries ) {
+      aclStatusBuilder.addEntry(AclEntry.parseAclEntry(e.toString(), true));
+    }
+    return aclStatusBuilder.build();
+  }
+
   @Override
   public ContentSummary getContentSummary(Path f) throws IOException {
     Map<String, String> params = new HashMap<String, String>();

+ 385 - 55
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java

@@ -26,7 +26,10 @@ import org.apache.hadoop.fs.GlobFilter;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathFilter;
 import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.protocol.AclException;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.lib.service.FileSystemAccess;
 import org.json.simple.JSONArray;
@@ -36,6 +39,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.util.LinkedHashMap;
+import java.util.List;
 import java.util.Map;
 
 /**
@@ -44,34 +48,170 @@ import java.util.Map;
 @InterfaceAudience.Private
 public class FSOperations {
 
-  @SuppressWarnings({"unchecked", "deprecation"})
-  private static Map fileStatusToJSONRaw(FileStatus status, boolean emptyPathSuffix) {
-    Map json = new LinkedHashMap();
-    json.put(HttpFSFileSystem.PATH_SUFFIX_JSON, (emptyPathSuffix) ? "" : status.getPath().getName());
-    json.put(HttpFSFileSystem.TYPE_JSON, HttpFSFileSystem.FILE_TYPE.getType(status).toString());
-    json.put(HttpFSFileSystem.LENGTH_JSON, status.getLen());
-    json.put(HttpFSFileSystem.OWNER_JSON, status.getOwner());
-    json.put(HttpFSFileSystem.GROUP_JSON, status.getGroup());
-    json.put(HttpFSFileSystem.PERMISSION_JSON, HttpFSFileSystem.permissionToString(status.getPermission()));
-    json.put(HttpFSFileSystem.ACCESS_TIME_JSON, status.getAccessTime());
-    json.put(HttpFSFileSystem.MODIFICATION_TIME_JSON, status.getModificationTime());
-    json.put(HttpFSFileSystem.BLOCK_SIZE_JSON, status.getBlockSize());
-    json.put(HttpFSFileSystem.REPLICATION_JSON, status.getReplication());
-    return json;
+  /**
+   * This class is used to group a FileStatus and an AclStatus together.
+   * It's needed for the GETFILESTATUS and LISTSTATUS calls, which take
+   * most info from the FileStatus and a wee bit from the AclStatus.
+   */
+  private static class StatusPair {
+    private FileStatus fileStatus;
+    private AclStatus aclStatus;
+
+    /**
+     * Simple constructor
+     * @param fileStatus Existing FileStatus object
+     * @param aclStatus Existing AclStatus object
+     */
+    public StatusPair(FileStatus fileStatus, AclStatus aclStatus) {
+      this.fileStatus = fileStatus;
+      this.aclStatus = aclStatus;
+    }
+
+    /**
+     * Create one StatusPair by performing the underlying calls to
+     * fs.getFileStatus and fs.getAclStatus
+     * @param fs The FileSystem where 'path' lives
+     * @param path The file/directory to query
+     * @throws IOException
+     */
+    public StatusPair(FileSystem fs, Path path) throws IOException {
+      fileStatus = fs.getFileStatus(path);
+      aclStatus = null;
+      try {
+        aclStatus = fs.getAclStatus(path);
+      } catch (AclException e) {
+        /*
+         * The cause is almost certainly an "ACLS aren't enabled"
+         * exception, so leave aclStatus at null and carry on.
+         */
+      } catch (UnsupportedOperationException e) {
+        /* Ditto above - this is the case for a local file system */
+      }
+    }
+
+    /**
+     * Return a Map suitable for conversion into JSON format
+     * @return The JSONish Map
+     */
+    public Map<String,Object> toJson() {
+      Map<String,Object> json = new LinkedHashMap<String,Object>();
+      json.put(HttpFSFileSystem.FILE_STATUS_JSON, toJsonInner(true));
+      return json;
+    }
+
+    /**
+     * Return in inner part of the JSON for the status - used by both the
+     * GETFILESTATUS and LISTSTATUS calls.
+     * @param emptyPathSuffix Whether or not to include PATH_SUFFIX_JSON
+     * @return The JSONish Map
+     */
+    public Map<String,Object> toJsonInner(boolean emptyPathSuffix) {
+      Map<String,Object> json = new LinkedHashMap<String,Object>();
+      json.put(HttpFSFileSystem.PATH_SUFFIX_JSON,
+              (emptyPathSuffix) ? "" : fileStatus.getPath().getName());
+      json.put(HttpFSFileSystem.TYPE_JSON,
+              HttpFSFileSystem.FILE_TYPE.getType(fileStatus).toString());
+      json.put(HttpFSFileSystem.LENGTH_JSON, fileStatus.getLen());
+      json.put(HttpFSFileSystem.OWNER_JSON, fileStatus.getOwner());
+      json.put(HttpFSFileSystem.GROUP_JSON, fileStatus.getGroup());
+      json.put(HttpFSFileSystem.PERMISSION_JSON,
+              HttpFSFileSystem.permissionToString(fileStatus.getPermission()));
+      json.put(HttpFSFileSystem.ACCESS_TIME_JSON, fileStatus.getAccessTime());
+      json.put(HttpFSFileSystem.MODIFICATION_TIME_JSON,
+              fileStatus.getModificationTime());
+      json.put(HttpFSFileSystem.BLOCK_SIZE_JSON, fileStatus.getBlockSize());
+      json.put(HttpFSFileSystem.REPLICATION_JSON, fileStatus.getReplication());
+      if ( (aclStatus != null) && !(aclStatus.getEntries().isEmpty()) ) {
+        json.put(HttpFSFileSystem.ACL_BIT_JSON,true);
+      }
+      return json;
+    }
   }
 
   /**
-   * Converts a FileSystemAccess <code>FileStatus</code> object into a JSON
-   * object.
+   * Simple class used to contain and operate upon a list of StatusPair
+   * objects.  Used by LISTSTATUS.
+   */
+  private static class StatusPairs {
+    private StatusPair[] statusPairs;
+
+    /**
+     * Construct a list of StatusPair objects
+     * @param fs The FileSystem where 'path' lives
+     * @param path The directory to query
+     * @param filter A possible filter for entries in the directory
+     * @throws IOException
+     */
+    public StatusPairs(FileSystem fs, Path path, PathFilter filter)
+            throws IOException {
+      /* Grab all the file statuses at once in an array */
+      FileStatus[] fileStatuses = fs.listStatus(path, filter);
+
+      /* We'll have an array of StatusPairs of the same length */
+      AclStatus aclStatus = null;
+      statusPairs = new StatusPair[fileStatuses.length];
+
+      /*
+       * For each FileStatus, attempt to acquire an AclStatus.  If the
+       * getAclStatus throws an exception, we assume that ACLs are turned
+       * off entirely and abandon the attempt.
+       */
+      boolean useAcls = true;   // Assume ACLs work until proven otherwise
+      for (int i = 0; i < fileStatuses.length; i++) {
+        if (useAcls) {
+          try {
+            aclStatus = fs.getAclStatus(fileStatuses[i].getPath());
+          } catch (AclException e) {
+            /* Almost certainly due to an "ACLs not enabled" exception */
+            aclStatus = null;
+            useAcls = false;
+          } catch (UnsupportedOperationException e) {
+            /* Ditto above - this is the case for a local file system */
+            aclStatus = null;
+            useAcls = false;
+          }
+        }
+        statusPairs[i] = new StatusPair(fileStatuses[i], aclStatus);
+      }
+    }
+
+    /**
+     * Return a Map suitable for conversion into JSON.
+     * @return A JSONish Map
+     */
+    @SuppressWarnings({"unchecked"})
+    public Map<String,Object> toJson() {
+      Map<String,Object> json = new LinkedHashMap<String,Object>();
+      Map<String,Object> inner = new LinkedHashMap<String,Object>();
+      JSONArray statuses = new JSONArray();
+      for (StatusPair s : statusPairs) {
+        statuses.add(s.toJsonInner(false));
+      }
+      inner.put(HttpFSFileSystem.FILE_STATUS_JSON, statuses);
+      json.put(HttpFSFileSystem.FILE_STATUSES_JSON, inner);
+      return json;
+    }
+  }
+
+  /** Converts an <code>AclStatus</code> object into a JSON object.
    *
-   * @param status FileSystemAccess file status.
+   * @param aclStatus AclStatus object
    *
-   * @return The JSON representation of the file status.
+   * @return The JSON representation of the ACLs for the file
    */
-  @SuppressWarnings({"unchecked", "deprecation"})
-  private static Map fileStatusToJSON(FileStatus status) {
-    Map json = new LinkedHashMap();
-    json.put(HttpFSFileSystem.FILE_STATUS_JSON, fileStatusToJSONRaw(status, true));
+  @SuppressWarnings({"unchecked"})
+  private static Map<String,Object> aclStatusToJSON(AclStatus aclStatus) {
+    Map<String,Object> json = new LinkedHashMap<String,Object>();
+    Map<String,Object> inner = new LinkedHashMap<String,Object>();
+    JSONArray entriesArray = new JSONArray();
+    inner.put(HttpFSFileSystem.OWNER_JSON, aclStatus.getOwner());
+    inner.put(HttpFSFileSystem.GROUP_JSON, aclStatus.getGroup());
+    inner.put(HttpFSFileSystem.ACL_STICKY_BIT_JSON, aclStatus.isStickyBit());
+    for ( AclEntry e : aclStatus.getEntries() ) {
+      entriesArray.add(e.toString());
+    }
+    inner.put(HttpFSFileSystem.ACL_ENTRIES_JSON, entriesArray);
+    json.put(HttpFSFileSystem.ACL_STATUS_JSON, inner);
     return json;
   }
 
@@ -117,30 +257,6 @@ public class FSOperations {
     return response;
   }
 
-  /**
-   * Converts a FileSystemAccess <code>FileStatus</code> array into a JSON array
-   * object.
-   *
-   * @param status FileSystemAccess file status array.
-   * <code>SCHEME://HOST:PORT</code> in the file status.
-   *
-   * @return The JSON representation of the file status array.
-   */
-  @SuppressWarnings("unchecked")
-  private static Map fileStatusToJSON(FileStatus[] status) {
-    JSONArray json = new JSONArray();
-    if (status != null) {
-      for (FileStatus s : status) {
-        json.add(fileStatusToJSONRaw(s, false));
-      }
-    }
-    Map response = new LinkedHashMap();
-    Map temp = new LinkedHashMap();
-    temp.put(HttpFSFileSystem.FILE_STATUS_JSON, json);
-    response.put(HttpFSFileSystem.FILE_STATUSES_JSON, temp);
-    return response;
-  }
-
   /**
    * Converts an object into a Json Map with with one key-value entry.
    * <p/>
@@ -418,18 +534,19 @@ public class FSOperations {
     }
 
     /**
-     * Executes the filesystem operation.
+     * Executes the filesystem getFileStatus operation and returns the
+     * result in a JSONish Map.
      *
      * @param fs filesystem instance to use.
      *
      * @return a Map object (JSON friendly) with the file status.
      *
-     * @throws IOException thrown if an IO error occured.
+     * @throws IOException thrown if an IO error occurred.
      */
     @Override
     public Map execute(FileSystem fs) throws IOException {
-      FileStatus status = fs.getFileStatus(path);
-      return fileStatusToJSON(status);
+      StatusPair sp = new StatusPair(fs, path);
+      return sp.toJson();
     }
 
   }
@@ -482,19 +599,20 @@ public class FSOperations {
     }
 
     /**
-     * Executes the filesystem operation.
+     * Returns data for a JSON Map containing the information for
+     * the set of files in 'path' that match 'filter'.
      *
      * @param fs filesystem instance to use.
      *
      * @return a Map with the file status of the directory
-     *         contents.
+     *         contents that match the filter
      *
-     * @throws IOException thrown if an IO error occured.
+     * @throws IOException thrown if an IO error occurred.
      */
     @Override
     public Map execute(FileSystem fs) throws IOException {
-      FileStatus[] status = fs.listStatus(path, filter);
-      return fileStatusToJSON(status);
+      StatusPairs sp = new StatusPairs(fs, path, filter);
+      return sp.toJson();
     }
 
     @Override
@@ -690,6 +808,218 @@ public class FSOperations {
 
   }
 
+  /**
+   * Executor that sets the acl for a file in a FileSystem
+   */
+  @InterfaceAudience.Private
+  public static class FSSetAcl implements FileSystemAccess.FileSystemExecutor<Void> {
+
+    private Path path;
+    private List<AclEntry> aclEntries;
+
+    /**
+     * Creates a set-acl executor.
+     *
+     * @param path path to set the acl.
+     * @param aclSpec acl to set.
+     */
+    public FSSetAcl(String path, String aclSpec) {
+      this.path = new Path(path);
+      this.aclEntries = AclEntry.parseAclSpec(aclSpec, true);
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return void.
+     *
+     * @throws IOException thrown if an IO error occurred.
+     */
+    @Override
+    public Void execute(FileSystem fs) throws IOException {
+      fs.setAcl(path, aclEntries);
+      return null;
+    }
+
+  }
+
+  /**
+   * Executor that removes all acls from a file in a FileSystem
+   */
+  @InterfaceAudience.Private
+  public static class FSRemoveAcl implements FileSystemAccess.FileSystemExecutor<Void> {
+
+    private Path path;
+
+    /**
+     * Creates a remove-acl executor.
+     *
+     * @param path path from which to remove the acl.
+     */
+    public FSRemoveAcl(String path) {
+      this.path = new Path(path);
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return void.
+     *
+     * @throws IOException thrown if an IO error occurred.
+     */
+    @Override
+    public Void execute(FileSystem fs) throws IOException {
+      fs.removeAcl(path);
+      return null;
+    }
+
+  }
+
+  /**
+   * Executor that modifies acl entries for a file in a FileSystem
+   */
+  @InterfaceAudience.Private
+  public static class FSModifyAclEntries implements FileSystemAccess.FileSystemExecutor<Void> {
+
+    private Path path;
+    private List<AclEntry> aclEntries;
+
+    /**
+     * Creates a modify-acl executor.
+     *
+     * @param path path to set the acl.
+     * @param aclSpec acl to set.
+     */
+    public FSModifyAclEntries(String path, String aclSpec) {
+      this.path = new Path(path);
+      this.aclEntries = AclEntry.parseAclSpec(aclSpec, true);
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return void.
+     *
+     * @throws IOException thrown if an IO error occurred.
+     */
+    @Override
+    public Void execute(FileSystem fs) throws IOException {
+      fs.modifyAclEntries(path, aclEntries);
+      return null;
+    }
+
+  }
+
+  /**
+   * Executor that removes acl entries from a file in a FileSystem
+   */
+  @InterfaceAudience.Private
+  public static class FSRemoveAclEntries implements FileSystemAccess.FileSystemExecutor<Void> {
+
+    private Path path;
+    private List<AclEntry> aclEntries;
+
+    /**
+     * Creates a remove acl entry executor.
+     *
+     * @param path path to set the acl.
+     * @param aclSpec acl parts to remove.
+     */
+    public FSRemoveAclEntries(String path, String aclSpec) {
+      this.path = new Path(path);
+      this.aclEntries = AclEntry.parseAclSpec(aclSpec, true);
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return void.
+     *
+     * @throws IOException thrown if an IO error occurred.
+     */
+    @Override
+    public Void execute(FileSystem fs) throws IOException {
+      fs.removeAclEntries(path, aclEntries);
+      return null;
+    }
+
+  }
+
+  /**
+   * Executor that removes the default acl from a directory in a FileSystem
+   */
+  @InterfaceAudience.Private
+  public static class FSRemoveDefaultAcl implements FileSystemAccess.FileSystemExecutor<Void> {
+
+    private Path path;
+
+    /**
+     * Creates an executor for removing the default acl.
+     *
+     * @param path path to set the acl.
+     */
+    public FSRemoveDefaultAcl(String path) {
+      this.path = new Path(path);
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return void.
+     *
+     * @throws IOException thrown if an IO error occurred.
+     */
+    @Override
+    public Void execute(FileSystem fs) throws IOException {
+      fs.removeDefaultAcl(path);
+      return null;
+    }
+
+  }
+
+  /**
+   * Executor that gets the ACL information for a given file.
+   */
+  @InterfaceAudience.Private
+  public static class FSAclStatus implements FileSystemAccess.FileSystemExecutor<Map> {
+    private Path path;
+
+    /**
+     * Creates an executor for getting the ACLs for a file.
+     *
+     * @param path the path to retrieve the ACLs.
+     */
+    public FSAclStatus(String path) {
+      this.path = new Path(path);
+    }
+
+    /**
+     * Executes the filesystem operation.
+     *
+     * @param fs filesystem instance to use.
+     *
+     * @return a Map object (JSON friendly) with the file status.
+     *
+     * @throws IOException thrown if an IO error occurred.
+     */
+    @Override
+    public Map execute(FileSystem fs) throws IOException {
+      AclStatus status = fs.getAclStatus(path);
+      return aclStatusToJSON(status);
+    }
+
+  }
+
   /**
    * Executor that performs a set-replication FileSystemAccess files system operation.
    */

+ 35 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java

@@ -33,12 +33,16 @@ import org.slf4j.MDC;
 import javax.ws.rs.ext.Provider;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.regex.Pattern;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT;
 
 /**
  * HttpFS ParametersProvider.
  */
 @Provider
 @InterfaceAudience.Private
+@SuppressWarnings("unchecked")
 public class HttpFSParametersProvider extends ParametersProvider {
 
   private static final Map<Enum, Class<Param<?>>[]> PARAMS_DEF =
@@ -55,6 +59,7 @@ public class HttpFSParametersProvider extends ParametersProvider {
     PARAMS_DEF.put(Operation.GETFILECHECKSUM, new Class[]{DoAsParam.class});
     PARAMS_DEF.put(Operation.GETFILEBLOCKLOCATIONS,
       new Class[]{DoAsParam.class});
+    PARAMS_DEF.put(Operation.GETACLSTATUS, new Class[]{DoAsParam.class});
     PARAMS_DEF.put(Operation.INSTRUMENTATION, new Class[]{DoAsParam.class});
     PARAMS_DEF.put(Operation.APPEND,
       new Class[]{DoAsParam.class, DataParam.class});
@@ -77,6 +82,16 @@ public class HttpFSParametersProvider extends ParametersProvider {
                   AccessTimeParam.class});
     PARAMS_DEF.put(Operation.DELETE,
       new Class[]{DoAsParam.class, RecursiveParam.class});
+    PARAMS_DEF.put(Operation.SETACL,
+            new Class[]{DoAsParam.class, AclPermissionParam.class});
+    PARAMS_DEF.put(Operation.REMOVEACL,
+            new Class[]{DoAsParam.class});
+    PARAMS_DEF.put(Operation.MODIFYACLENTRIES,
+            new Class[]{DoAsParam.class, AclPermissionParam.class});
+    PARAMS_DEF.put(Operation.REMOVEACLENTRIES,
+            new Class[]{DoAsParam.class, AclPermissionParam.class});
+    PARAMS_DEF.put(Operation.REMOVEDEFAULTACL,
+            new Class[]{DoAsParam.class});
   }
 
   public HttpFSParametersProvider() {
@@ -370,6 +385,26 @@ public class HttpFSParametersProvider extends ParametersProvider {
 
   }
 
+  /**
+   * Class for AclPermission parameter.
+   */
+  @InterfaceAudience.Private
+  public static class AclPermissionParam extends StringParam {
+
+    /**
+     * Parameter name.
+     */
+    public static final String NAME = HttpFSFileSystem.ACLSPEC_PARAM;
+
+    /**
+     * Constructor.
+     */
+    public AclPermissionParam() {
+      super(NAME, HttpFSFileSystem.ACLSPEC_DEFAULT,
+              Pattern.compile(DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT));
+    }
+  }
+
   /**
    * Class for replication parameter.
    */

+ 55 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java

@@ -23,6 +23,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
 import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.AccessTimeParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.AclPermissionParam;
 import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.BlockSizeParam;
 import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DataParam;
 import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DestinationParam;
@@ -313,6 +314,14 @@ public class HttpFSServer {
         response = Response.status(Response.Status.BAD_REQUEST).build();
         break;
       }
+      case GETACLSTATUS: {
+        FSOperations.FSAclStatus command =
+                new FSOperations.FSAclStatus(path);
+        Map json = fsExecute(user, doAs, command);
+        AUDIT_LOG.info("ACL status for [{}]", path);
+        response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+        break;
+      }
       default: {
         throw new IOException(
           MessageFormat.format("Invalid HTTP GET operation [{0}]",
@@ -579,6 +588,52 @@ public class HttpFSServer {
         response = Response.ok().build();
         break;
       }
+      case SETACL: {
+        String aclSpec = params.get(AclPermissionParam.NAME,
+                AclPermissionParam.class);
+        FSOperations.FSSetAcl command =
+                new FSOperations.FSSetAcl(path, aclSpec);
+        fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] to acl [{}]", path, aclSpec);
+        response = Response.ok().build();
+        break;
+      }
+      case REMOVEACL: {
+        FSOperations.FSRemoveAcl command =
+                new FSOperations.FSRemoveAcl(path);
+        fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] removed acl", path);
+        response = Response.ok().build();
+        break;
+      }
+      case MODIFYACLENTRIES: {
+        String aclSpec = params.get(AclPermissionParam.NAME,
+                AclPermissionParam.class);
+        FSOperations.FSModifyAclEntries command =
+                new FSOperations.FSModifyAclEntries(path, aclSpec);
+        fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] modify acl entry with [{}]", path, aclSpec);
+        response = Response.ok().build();
+        break;
+      }
+      case REMOVEACLENTRIES: {
+        String aclSpec = params.get(AclPermissionParam.NAME,
+                AclPermissionParam.class);
+        FSOperations.FSRemoveAclEntries command =
+                new FSOperations.FSRemoveAclEntries(path, aclSpec);
+        fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] remove acl entry [{}]", path, aclSpec);
+        response = Response.ok().build();
+        break;
+      }
+      case REMOVEDEFAULTACL: {
+        FSOperations.FSRemoveDefaultAcl command =
+                new FSOperations.FSRemoveDefaultAcl(path);
+        fsExecute(user, doAs, command);
+        AUDIT_LOG.info("[{}] remove default acl", path);
+        response = Response.ok().build();
+        break;
+      }
       default: {
         throw new IOException(
           MessageFormat.format("Invalid HTTP PUT operation [{0}]",

+ 113 - 1
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java

@@ -26,6 +26,8 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -87,6 +89,7 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
     String fsDefaultName = getProxiedFSURI();
     Configuration conf = new Configuration(false);
     conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     File hdfsSite = new File(new File(homeDir, "conf"), "hdfs-site.xml");
     OutputStream os = new FileOutputStream(hdfsSite);
     conf.writeXml(os);
@@ -479,9 +482,112 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
     Assert.assertEquals(httpContentSummary.getSpaceQuota(), hdfsContentSummary.getSpaceQuota());
   }
 
+  /**
+   * Runs assertions testing that two AclStatus objects contain the same info
+   * @param a First AclStatus
+   * @param b Second AclStatus
+   * @throws Exception
+   */
+  private void assertSameAcls(AclStatus a, AclStatus b) throws Exception {
+    Assert.assertTrue(a.getOwner().equals(b.getOwner()));
+    Assert.assertTrue(a.getGroup().equals(b.getGroup()));
+    Assert.assertTrue(a.isStickyBit() == b.isStickyBit());
+    Assert.assertTrue(a.getEntries().size() == b.getEntries().size());
+    for (AclEntry e : a.getEntries()) {
+      Assert.assertTrue(b.getEntries().contains(e));
+    }
+    for (AclEntry e : b.getEntries()) {
+      Assert.assertTrue(a.getEntries().contains(e));
+    }
+  }
+
+  /**
+   * Simple ACL tests on a file:  Set an acl, add an acl, remove one acl,
+   * and remove all acls.
+   * @throws Exception
+   */
+  private void testFileAcls() throws Exception {
+    if ( isLocalFS() ) {
+      return;
+    }
+
+    final String aclUser1 = "user:foo:rw-";
+    final String aclUser2 = "user:bar:r--";
+    final String aclGroup1 = "group::r--";
+    final String aclSet = "user::rwx," + aclUser1 + ","
+            + aclGroup1 + ",other::---";
+
+    FileSystem proxyFs = FileSystem.get(getProxiedFSConf());
+    FileSystem httpfs = getHttpFSFileSystem();
+
+    Path path = new Path(getProxiedFSTestDir(), "testAclStatus.txt");
+    OutputStream os = proxyFs.create(path);
+    os.write(1);
+    os.close();
+
+    AclStatus proxyAclStat = proxyFs.getAclStatus(path);
+    AclStatus httpfsAclStat = httpfs.getAclStatus(path);
+    assertSameAcls(httpfsAclStat, proxyAclStat);
+
+    httpfs.setAcl(path, AclEntry.parseAclSpec(aclSet,true));
+    proxyAclStat = proxyFs.getAclStatus(path);
+    httpfsAclStat = httpfs.getAclStatus(path);
+    assertSameAcls(httpfsAclStat, proxyAclStat);
+
+    httpfs.modifyAclEntries(path, AclEntry.parseAclSpec(aclUser2, true));
+    proxyAclStat = proxyFs.getAclStatus(path);
+    httpfsAclStat = httpfs.getAclStatus(path);
+    assertSameAcls(httpfsAclStat, proxyAclStat);
+
+    httpfs.removeAclEntries(path, AclEntry.parseAclSpec(aclUser1, true));
+    proxyAclStat = proxyFs.getAclStatus(path);
+    httpfsAclStat = httpfs.getAclStatus(path);
+    assertSameAcls(httpfsAclStat, proxyAclStat);
+
+    httpfs.removeAcl(path);
+    proxyAclStat = proxyFs.getAclStatus(path);
+    httpfsAclStat = httpfs.getAclStatus(path);
+    assertSameAcls(httpfsAclStat, proxyAclStat);
+  }
+
+  /**
+   * Simple acl tests on a directory: set a default acl, remove default acls.
+   * @throws Exception
+   */
+  private void testDirAcls() throws Exception {
+    if ( isLocalFS() ) {
+      return;
+    }
+
+    final String defUser1 = "default:user:glarch:r-x";
+
+    FileSystem proxyFs = FileSystem.get(getProxiedFSConf());
+    FileSystem httpfs = getHttpFSFileSystem();
+
+    Path dir = getProxiedFSTestDir();
+
+    /* ACL Status on a directory */
+    AclStatus proxyAclStat = proxyFs.getAclStatus(dir);
+    AclStatus httpfsAclStat = httpfs.getAclStatus(dir);
+    assertSameAcls(httpfsAclStat, proxyAclStat);
+
+    /* Set a default ACL on the directory */
+    httpfs.setAcl(dir, (AclEntry.parseAclSpec(defUser1,true)));
+    proxyAclStat = proxyFs.getAclStatus(dir);
+    httpfsAclStat = httpfs.getAclStatus(dir);
+    assertSameAcls(httpfsAclStat, proxyAclStat);
+
+    /* Remove the default ACL */
+    httpfs.removeDefaultAcl(dir);
+    proxyAclStat = proxyFs.getAclStatus(dir);
+    httpfsAclStat = httpfs.getAclStatus(dir);
+    assertSameAcls(httpfsAclStat, proxyAclStat);
+  }
+
   protected enum Operation {
     GET, OPEN, CREATE, APPEND, CONCAT, RENAME, DELETE, LIST_STATUS, WORKING_DIRECTORY, MKDIRS,
-    SET_TIMES, SET_PERMISSION, SET_OWNER, SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY
+    SET_TIMES, SET_PERMISSION, SET_OWNER, SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY,
+    FILEACLS, DIRACLS
   }
 
   private void operation(Operation op) throws Exception {
@@ -533,6 +639,12 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
       case CONTENT_SUMMARY:
         testContentSummary();
         break;
+      case FILEACLS:
+        testFileAcls();
+        break;
+      case DIRACLS:
+        testDirAcls();
+        break;
     }
   }
 

+ 213 - 9
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServer.java

@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.fs.http.server;
 
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.json.simple.JSONArray;
 import org.junit.Assert;
 
 import java.io.BufferedReader;
@@ -31,6 +33,7 @@ import java.io.Writer;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.text.MessageFormat;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 
@@ -128,6 +131,7 @@ public class TestHttpFSServer extends HFSTestCase {
     String fsDefaultName = TestHdfsHelper.getHdfsConf().get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
     Configuration conf = new Configuration(false);
     conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
     File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
     OutputStream os = new FileOutputStream(hdfsSite);
     conf.writeXml(os);
@@ -241,6 +245,10 @@ public class TestHttpFSServer extends HFSTestCase {
   private void createWithHttp ( String filename, String perms )
           throws Exception {
     String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
+    // Remove leading / from filename
+    if ( filename.charAt(0) == '/' ) {
+      filename = filename.substring(1);
+    }
     String pathOps;
     if ( perms == null ) {
       pathOps = MessageFormat.format(
@@ -260,18 +268,24 @@ public class TestHttpFSServer extends HFSTestCase {
   }
 
   /**
-   * Talks to the http interface to get the json output of the GETFILESTATUS
-   * command on the given file.
+   * Talks to the http interface to get the json output of a *STATUS command
+   * on the given file.
    *
    * @param filename The file to query.
+   * @param command Either GETFILESTATUS, LISTSTATUS, or ACLSTATUS
    * @return A string containing the JSON output describing the file.
    * @throws Exception
    */
-  private String getFileStatus ( String filename ) throws Exception {
+  private String getStatus(String filename, String command)
+          throws Exception {
     String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
+    // Remove leading / from filename
+    if ( filename.charAt(0) == '/' ) {
+      filename = filename.substring(1);
+    }
     String pathOps = MessageFormat.format(
-            "/webhdfs/v1/{0}?user.name={1}&op=GETFILESTATUS",
-            filename, user);
+            "/webhdfs/v1/{0}?user.name={1}&op={2}",
+            filename, user, command);
     URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
     HttpURLConnection conn = (HttpURLConnection) url.openConnection();
     conn.connect();
@@ -283,6 +297,30 @@ public class TestHttpFSServer extends HFSTestCase {
     return reader.readLine();
   }
 
+  /**
+   * General-purpose http PUT command to the httpfs server.
+   * @param filename The file to operate upon
+   * @param command The command to perform (SETACL, etc)
+   * @param params Parameters, like "aclspec=..."
+   */
+  private void putCmd(String filename, String command,
+                      String params) throws Exception {
+    String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
+    // Remove leading / from filename
+    if ( filename.charAt(0) == '/' ) {
+      filename = filename.substring(1);
+    }
+    String pathOps = MessageFormat.format(
+            "/webhdfs/v1/{0}?user.name={1}{2}{3}&op={4}",
+            filename, user, (params == null) ? "" : "&",
+            (params == null) ? "" : params, command);
+    URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    conn.setRequestMethod("PUT");
+    conn.connect();
+    Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
+  }
+
   /**
    * Given the JSON output from the GETFILESTATUS call, return the
    * 'permission' value.
@@ -298,6 +336,27 @@ public class TestHttpFSServer extends HFSTestCase {
     return (String) details.get("permission");
   }
 
+  /**
+   * Given the JSON output from the GETACLSTATUS call, return the
+   * 'entries' value as a List<String>.
+   * @param statusJson JSON from GETACLSTATUS
+   * @return A List of Strings which are the elements of the ACL entries
+   * @throws Exception
+   */
+  private List<String> getAclEntries ( String statusJson ) throws Exception {
+    List<String> entries = new ArrayList<String>();
+    JSONParser parser = new JSONParser();
+    JSONObject jsonObject = (JSONObject) parser.parse(statusJson);
+    JSONObject details = (JSONObject) jsonObject.get("AclStatus");
+    JSONArray jsonEntries = (JSONArray) details.get("entries");
+    if ( jsonEntries != null ) {
+      for (Object e : jsonEntries) {
+        entries.add(e.toString());
+      }
+    }
+    return entries;
+  }
+
   /**
    * Validate that files are created with 755 permissions when no
    * 'permissions' attribute is specified, and when 'permissions'
@@ -314,22 +373,167 @@ public class TestHttpFSServer extends HFSTestCase {
     fs.mkdirs(new Path("/perm"));
 
     createWithHttp("/perm/none", null);
-    String statusJson = getFileStatus("/perm/none");
+    String statusJson = getStatus("/perm/none", "GETFILESTATUS");
     Assert.assertTrue("755".equals(getPerms(statusJson)));
 
     createWithHttp("/perm/p-777", "777");
-    statusJson = getFileStatus("/perm/p-777");
+    statusJson = getStatus("/perm/p-777", "GETFILESTATUS");
     Assert.assertTrue("777".equals(getPerms(statusJson)));
 
     createWithHttp("/perm/p-654", "654");
-    statusJson = getFileStatus("/perm/p-654");
+    statusJson = getStatus("/perm/p-654", "GETFILESTATUS");
     Assert.assertTrue("654".equals(getPerms(statusJson)));
 
     createWithHttp("/perm/p-321", "321");
-    statusJson = getFileStatus("/perm/p-321");
+    statusJson = getStatus("/perm/p-321", "GETFILESTATUS");
     Assert.assertTrue("321".equals(getPerms(statusJson)));
   }
 
+  /**
+   * Validate the various ACL set/modify/remove calls.  General strategy is
+   * to verify each of the following steps with GETFILESTATUS, LISTSTATUS,
+   * and GETACLSTATUS:
+   * <ol>
+   *   <li>Create a file with no ACLs</li>
+   *   <li>Add a user + group ACL</li>
+   *   <li>Add another user ACL</li>
+   *   <li>Remove the first user ACL</li>
+   *   <li>Remove all ACLs</li>
+   * </ol>
+   */
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testFileAcls() throws Exception {
+    final String aclUser1 = "user:foo:rw-";
+    final String aclUser2 = "user:bar:r--";
+    final String aclGroup1 = "group::r--";
+    final String aclSpec = "aclspec=user::rwx," + aclUser1 + ","
+            + aclGroup1 + ",other::---";
+    final String modAclSpec = "aclspec=" + aclUser2;
+    final String remAclSpec = "aclspec=" + aclUser1;
+    final String dir = "/aclFileTest";
+    final String path = dir + "/test";
+    String statusJson;
+    List<String> aclEntries;
+
+    createHttpFSServer(false);
+
+    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    fs.mkdirs(new Path(dir));
+
+    createWithHttp(path, null);
+
+    /* getfilestatus and liststatus don't have 'aclBit' in their reply */
+    statusJson = getStatus(path, "GETFILESTATUS");
+    Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
+    statusJson = getStatus(dir, "LISTSTATUS");
+    Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
+
+    /* getaclstatus works and returns no entries */
+    statusJson = getStatus(path, "GETACLSTATUS");
+    aclEntries = getAclEntries(statusJson);
+    Assert.assertTrue(aclEntries.size() == 0);
+
+    /*
+     * Now set an ACL on the file.  (getfile|list)status have aclBit,
+     * and aclstatus has entries that looks familiar.
+     */
+    putCmd(path, "SETACL", aclSpec);
+    statusJson = getStatus(path, "GETFILESTATUS");
+    Assert.assertNotEquals(-1, statusJson.indexOf("aclBit"));
+    statusJson = getStatus(dir, "LISTSTATUS");
+    Assert.assertNotEquals(-1, statusJson.indexOf("aclBit"));
+    statusJson = getStatus(path, "GETACLSTATUS");
+    aclEntries = getAclEntries(statusJson);
+    Assert.assertTrue(aclEntries.size() == 2);
+    Assert.assertTrue(aclEntries.contains(aclUser1));
+    Assert.assertTrue(aclEntries.contains(aclGroup1));
+
+    /* Modify acl entries to add another user acl */
+    putCmd(path, "MODIFYACLENTRIES", modAclSpec);
+    statusJson = getStatus(path, "GETACLSTATUS");
+    aclEntries = getAclEntries(statusJson);
+    Assert.assertTrue(aclEntries.size() == 3);
+    Assert.assertTrue(aclEntries.contains(aclUser1));
+    Assert.assertTrue(aclEntries.contains(aclUser2));
+    Assert.assertTrue(aclEntries.contains(aclGroup1));
+
+    /* Remove the first user acl entry and verify */
+    putCmd(path, "REMOVEACLENTRIES", remAclSpec);
+    statusJson = getStatus(path, "GETACLSTATUS");
+    aclEntries = getAclEntries(statusJson);
+    Assert.assertTrue(aclEntries.size() == 2);
+    Assert.assertTrue(aclEntries.contains(aclUser2));
+    Assert.assertTrue(aclEntries.contains(aclGroup1));
+
+    /* Remove all acls and verify */
+    putCmd(path, "REMOVEACL", null);
+    statusJson = getStatus(path, "GETACLSTATUS");
+    aclEntries = getAclEntries(statusJson);
+    Assert.assertTrue(aclEntries.size() == 0);
+    statusJson = getStatus(path, "GETFILESTATUS");
+    Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
+    statusJson = getStatus(dir, "LISTSTATUS");
+    Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
+  }
+
+  /**
+   * Test ACL operations on a directory, including default ACLs.
+   * General strategy is to use GETFILESTATUS and GETACLSTATUS to verify:
+   * <ol>
+   *   <li>Initial status with no ACLs</li>
+   *   <li>The addition of a default ACL</li>
+   *   <li>The removal of default ACLs</li>
+   * </ol>
+   *
+   * @throws Exception
+   */
+  @Test
+  @TestDir
+  @TestJetty
+  @TestHdfs
+  public void testDirAcls() throws Exception {
+    final String defUser1 = "default:user:glarch:r-x";
+    final String defSpec1 = "aclspec=" + defUser1;
+    final String dir = "/aclDirTest";
+    String statusJson;
+    List<String> aclEntries;
+
+    createHttpFSServer(false);
+
+    FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf());
+    fs.mkdirs(new Path(dir));
+
+    /* getfilestatus and liststatus don't have 'aclBit' in their reply */
+    statusJson = getStatus(dir, "GETFILESTATUS");
+    Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
+
+    /* No ACLs, either */
+    statusJson = getStatus(dir, "GETACLSTATUS");
+    aclEntries = getAclEntries(statusJson);
+    Assert.assertTrue(aclEntries.size() == 0);
+
+    /* Give it a default ACL and verify */
+    putCmd(dir, "SETACL", defSpec1);
+    statusJson = getStatus(dir, "GETFILESTATUS");
+    Assert.assertNotEquals(-1, statusJson.indexOf("aclBit"));
+    statusJson = getStatus(dir, "GETACLSTATUS");
+    aclEntries = getAclEntries(statusJson);
+    Assert.assertTrue(aclEntries.size() == 5);
+    /* 4 Entries are default:(user|group|mask|other):perm */
+    Assert.assertTrue(aclEntries.contains(defUser1));
+
+    /* Remove the default ACL and re-verify */
+    putCmd(dir, "REMOVEDEFAULTACL", null);
+    statusJson = getStatus(dir, "GETFILESTATUS");
+    Assert.assertEquals(-1, statusJson.indexOf("aclBit"));
+    statusJson = getStatus(dir, "GETACLSTATUS");
+    aclEntries = getAclEntries(statusJson);
+    Assert.assertTrue(aclEntries.size() == 0);
+  }
+
   @Test
   @TestDir
   @TestJetty

+ 283 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestHttpFSServerNoACLs.java

@@ -0,0 +1,283 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.http.server;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.test.HTestCase;
+import org.apache.hadoop.test.HadoopUsersConfTestHelper;
+import org.apache.hadoop.test.TestDir;
+import org.apache.hadoop.test.TestDirHelper;
+import org.apache.hadoop.test.TestJetty;
+import org.apache.hadoop.test.TestJettyHelper;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mortbay.jetty.Server;
+import org.mortbay.jetty.webapp.WebAppContext;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.io.OutputStream;
+import java.io.Writer;
+import java.net.HttpURLConnection;
+import java.net.URL;
+import java.text.MessageFormat;
+
+/**
+ * This test class ensures that everything works as expected when ACL
+ * support is turned off HDFS.  This is the default configuration.  The other
+ * tests operate with ACL support turned on.
+ */
+public class TestHttpFSServerNoACLs extends HTestCase {
+
+  private MiniDFSCluster miniDfs;
+  private Configuration nnConf;
+
+  /**
+   * Fire up our own hand-rolled MiniDFSCluster.  We do this here instead
+   * of relying on TestHdfsHelper because we don't want to turn on ACL
+   * support.
+   *
+   * @throws Exception
+   */
+  private void startMiniDFS() throws Exception {
+
+    File testDirRoot = TestDirHelper.getTestDir();
+
+    if (System.getProperty("hadoop.log.dir") == null) {
+      System.setProperty("hadoop.log.dir",
+              new File(testDirRoot, "hadoop-log").getAbsolutePath());
+    }
+    if (System.getProperty("test.build.data") == null) {
+      System.setProperty("test.build.data",
+              new File(testDirRoot, "hadoop-data").getAbsolutePath());
+    }
+
+    Configuration conf = HadoopUsersConfTestHelper.getBaseConf();
+    HadoopUsersConfTestHelper.addUserConf(conf);
+    conf.set("fs.hdfs.impl.disable.cache", "true");
+    conf.set("dfs.block.access.token.enable", "false");
+    conf.set("dfs.permissions", "true");
+    conf.set("hadoop.security.authentication", "simple");
+
+    // Explicitly turn off ACL support
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, false);
+
+    MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
+    builder.numDataNodes(2);
+    miniDfs = builder.build();
+    nnConf = miniDfs.getConfiguration(0);
+  }
+
+  /**
+   * Create an HttpFS Server to talk to the MiniDFSCluster we created.
+   * @throws Exception
+   */
+  private void createHttpFSServer() throws Exception {
+    File homeDir = TestDirHelper.getTestDir();
+    Assert.assertTrue(new File(homeDir, "conf").mkdir());
+    Assert.assertTrue(new File(homeDir, "log").mkdir());
+    Assert.assertTrue(new File(homeDir, "temp").mkdir());
+    HttpFSServerWebApp.setHomeDirForCurrentThread(homeDir.getAbsolutePath());
+
+    File secretFile = new File(new File(homeDir, "conf"), "secret");
+    Writer w = new FileWriter(secretFile);
+    w.write("secret");
+    w.close();
+
+    // HDFS configuration
+    File hadoopConfDir = new File(new File(homeDir, "conf"), "hadoop-conf");
+    if ( !hadoopConfDir.mkdirs() ) {
+      throw new IOException();
+    }
+
+    String fsDefaultName =
+            nnConf.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY);
+    Configuration conf = new Configuration(false);
+    conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsDefaultName);
+
+    // Explicitly turn off ACLs, just in case the default becomes true later
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, false);
+
+    File hdfsSite = new File(hadoopConfDir, "hdfs-site.xml");
+    OutputStream os = new FileOutputStream(hdfsSite);
+    conf.writeXml(os);
+    os.close();
+
+    // HTTPFS configuration
+    conf = new Configuration(false);
+    conf.set("httpfs.hadoop.config.dir", hadoopConfDir.toString());
+    conf.set("httpfs.proxyuser." +
+                    HadoopUsersConfTestHelper.getHadoopProxyUser() + ".groups",
+            HadoopUsersConfTestHelper.getHadoopProxyUserGroups());
+    conf.set("httpfs.proxyuser." +
+                    HadoopUsersConfTestHelper.getHadoopProxyUser() + ".hosts",
+            HadoopUsersConfTestHelper.getHadoopProxyUserHosts());
+    conf.set("httpfs.authentication.signature.secret.file",
+            secretFile.getAbsolutePath());
+
+    File httpfsSite = new File(new File(homeDir, "conf"), "httpfs-site.xml");
+    os = new FileOutputStream(httpfsSite);
+    conf.writeXml(os);
+    os.close();
+
+    ClassLoader cl = Thread.currentThread().getContextClassLoader();
+    URL url = cl.getResource("webapp");
+    if ( url == null ) {
+      throw new IOException();
+    }
+    WebAppContext context = new WebAppContext(url.getPath(), "/webhdfs");
+    Server server = TestJettyHelper.getJettyServer();
+    server.addHandler(context);
+    server.start();
+  }
+
+  /**
+   * Talks to the http interface to get the json output of a *STATUS command
+   * on the given file.
+   *
+   * @param filename The file to query.
+   * @param command Either GETFILESTATUS, LISTSTATUS, or ACLSTATUS
+   * @param expectOK Is this operation expected to succeed?
+   * @throws Exception
+   */
+  private void getStatus(String filename, String command, boolean expectOK)
+          throws Exception {
+    String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
+    // Remove leading / from filename
+    if ( filename.charAt(0) == '/' ) {
+      filename = filename.substring(1);
+    }
+    String pathOps = MessageFormat.format(
+            "/webhdfs/v1/{0}?user.name={1}&op={2}",
+            filename, user, command);
+    URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    conn.connect();
+    int resp = conn.getResponseCode();
+    BufferedReader reader;
+    if ( expectOK ) {
+      Assert.assertEquals(HttpURLConnection.HTTP_OK, resp);
+      reader = new BufferedReader(new InputStreamReader(conn.getInputStream()));
+      String res = reader.readLine();
+      Assert.assertTrue(!res.contains("aclBit"));
+      Assert.assertTrue(res.contains("owner")); // basic sanity check
+    } else {
+      Assert.assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR, resp);
+      reader = new BufferedReader(new InputStreamReader(conn.getErrorStream()));
+      String res = reader.readLine();
+      Assert.assertTrue(res.contains("RemoteException"));
+      Assert.assertTrue(res.contains("ACL"));
+      Assert.assertTrue(res.contains("rejected"));
+    }
+  }
+
+  /**
+   * General-purpose http PUT command to the httpfs server.
+   * @param filename The file to operate upon
+   * @param command The command to perform (SETACL, etc)
+   * @param params Parameters, like "aclspec=..."
+   */
+  private void putCmd(String filename, String command,
+                      String params, boolean expectOK) throws Exception {
+    String user = HadoopUsersConfTestHelper.getHadoopUsers()[0];
+    // Remove leading / from filename
+    if ( filename.charAt(0) == '/' ) {
+      filename = filename.substring(1);
+    }
+    String pathOps = MessageFormat.format(
+            "/webhdfs/v1/{0}?user.name={1}{2}{3}&op={4}",
+            filename, user, (params == null) ? "" : "&",
+            (params == null) ? "" : params, command);
+    URL url = new URL(TestJettyHelper.getJettyURL(), pathOps);
+    HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+    conn.setRequestMethod("PUT");
+    conn.connect();
+    int resp = conn.getResponseCode();
+    if ( expectOK ) {
+      Assert.assertEquals(HttpURLConnection.HTTP_OK, resp);
+    } else {
+      Assert.assertEquals(HttpURLConnection.HTTP_INTERNAL_ERROR, resp);
+      BufferedReader reader;
+      reader = new BufferedReader(new InputStreamReader(conn.getErrorStream()));
+      String err = reader.readLine();
+      Assert.assertTrue(err.contains("RemoteException"));
+      Assert.assertTrue(err.contains("ACL"));
+      Assert.assertTrue(err.contains("rejected"));
+    }
+  }
+
+  /**
+   * Ensure that
+   * <ol>
+   *   <li>GETFILESTATUS and LISTSTATUS work happily</li>
+   *   <li>ACLSTATUS throws an exception</li>
+   *   <li>The ACL SET, REMOVE, etc calls all fail</li>
+   * </ol>
+   *
+   * @throws Exception
+   */
+  @Test
+  @TestDir
+  @TestJetty
+  public void testWithNoAcls() throws Exception {
+    final String aclUser1 = "user:foo:rw-";
+    final String aclUser2 = "user:bar:r--";
+    final String aclGroup1 = "group::r--";
+    final String aclSpec = "aclspec=user::rwx," + aclUser1 + ","
+            + aclGroup1 + ",other::---";
+    final String modAclSpec = "aclspec=" + aclUser2;
+    final String remAclSpec = "aclspec=" + aclUser1;
+    final String defUser1 = "default:user:glarch:r-x";
+    final String defSpec1 = "aclspec=" + defUser1;
+    final String dir = "/noACLs";
+    final String path = dir + "/foo";
+
+    startMiniDFS();
+    createHttpFSServer();
+
+    FileSystem fs = FileSystem.get(nnConf);
+    fs.mkdirs(new Path(dir));
+    OutputStream os = fs.create(new Path(path));
+    os.write(1);
+    os.close();
+
+    /* The normal status calls work as expected; GETACLSTATUS fails */
+    getStatus(path, "GETFILESTATUS", true);
+    getStatus(dir, "LISTSTATUS", true);
+    getStatus(path, "GETACLSTATUS", false);
+
+    /* All the ACL-based PUT commands fail with ACL exceptions */
+    putCmd(path, "SETACL", aclSpec, false);
+    putCmd(path, "MODIFYACLENTRIES", modAclSpec, false);
+    putCmd(path, "REMOVEACLENTRIES", remAclSpec, false);
+    putCmd(path, "REMOVEACL", null, false);
+    putCmd(dir, "SETACL", defSpec1, false);
+    putCmd(dir, "REMOVEDEFAULTACL", null, false);
+
+    miniDfs.shutdown();
+  }
+}

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/test/TestHdfsHelper.java

@@ -24,6 +24,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.junit.Test;
 import org.junit.runners.model.FrameworkMethod;
@@ -145,6 +146,7 @@ public class TestHdfsHelper extends TestDirHelper {
       conf.set("dfs.block.access.token.enable", "false");
       conf.set("dfs.permissions", "true");
       conf.set("hadoop.security.authentication", "simple");
+      conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
       MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
       builder.numDataNodes(2);
       MiniDFSCluster miniHdfs = builder.build();

+ 112 - 67
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -254,72 +254,6 @@ Trunk (Unreleased)
     HDFS-5794. Fix the inconsistency of layout version number of 
     ADD_DATANODE_AND_STORAGE_UUIDS between trunk and branch-2. (jing9)
 
-    HDFS-6464. Support multiple xattr.name parameters for WebHDFS getXAttrs.
-    (Yi Liu via umamahesh)
-
-  BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
-
-    HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
-
-    HDFS-6302. Implement XAttr as a INode feature. (Yi Liu via umamahesh)
-
-    HDFS-6309. Javadocs for Xattrs apis in DFSClient and other minor fixups. (Charles Lamb via umamahesh)
-
-    HDFS-6258. Namenode server-side storage for XAttrs. (Yi Liu via umamahesh)
-
-    HDFS-6303. HDFS implementation of FileContext API for XAttrs. (Yi Liu and Charles Lamb via umamahesh)
-
-    HDFS-6324. Shift XAttr helper code out for reuse. (Yi Liu via umamahesh)
-
-    HDFS-6301. NameNode: persist XAttrs in fsimage and record XAttrs modifications to edit log.
-    (Yi Liu via umamahesh)
-
-    HDFS-6298. XML based End-to-End test for getfattr and setfattr commands. (Yi Liu via umamahesh)
-
-    HDFS-6314. Test cases for XAttrs. (Yi Liu via umamahesh)
-
-    HDFS-6344. Maximum limit on the size of an xattr. (Yi Liu via umamahesh)
-
-    HDFS-6377. Unify xattr name and value limits into a single limit. (wang)
-
-    HDFS-6373. Remove support for extended attributes on symlinks. (Charles Lamb via wang)
-
-    HDFS-6283. Write end user documentation for xattrs. (wang)
-
-    HDFS-6412. Interface audience and stability annotations missing from
-    several new classes related to xattrs. (wang)
-
-    HDFS-6259. Support extended attributes via WebHDFS. (yliu)
-
-    HDFS-6346. Optimize OP_SET_XATTRS by persisting single Xattr entry per setXattr/removeXattr api call
-    (Yi Liu via umamahesh)
-
-    HDFS-6331. ClientProtocol#setXattr should not be annotated idempotent.
-    (umamahesh via wang)
-
-    HDFS-6335. TestOfflineEditsViewer for XAttr. (Yi Liu via umamahesh)
-
-    HDFS-6343. fix TestNamenodeRetryCache and TestRetryCacheWithHA failures. (umamahesh)
-
-    HDFS-6366. FsImage loading failed with RemoveXattr op (umamahesh)
-
-    HDFS-6357. SetXattr should persist rpcIDs for handling retrycache with Namenode restart and HA
-    (umamahesh)
-
-    HDFS-6372. Handle setXattr rpcIDs for OfflineEditsViewer. (umamahesh)
-
-    HDFS-6410. DFSClient unwraps AclException in xattr methods, but those
-    methods cannot throw AclException. (wang)
-
-    HDFS-6413. xattr names erroneously handled as case-insensitive.
-    (Charles Lamb via cnauroth)
-
-    HDFS-6414. xattr modification operations are based on state of latest
-    snapshot instead of current version of inode. (Andrew Wang via cnauroth)
-
-    HDFS-6374. setXAttr should require the user to be the owner of the file
-    or directory (Charles Lamb via wang)
-
     HDFS-6375. Listing extended attributes with the search permission.
     (Charles Lamb via wang)
 
@@ -485,6 +419,39 @@ Release 2.5.0 - UNRELEASED
     HDFS-6399. Add note about setfacl in HDFS permissions guide.
     (cnauroth via wang)
 
+    HDFS-6315. Decouple recording edit logs from FSDirectory. (wheat9)
+
+    HDFS-6379. HTTPFS - Implement ACLs support. (yoderme via tucu)
+
+    HDFS-6471. Make moveFromLocal CLI testcases to be non-disruptive
+    (Dasha Boudnik via cos)
+
+    HDFS-6395. Skip checking xattr limits for non-user-visible namespaces.
+    (Yi Liu via wang).
+
+    HDFS-3493. Invalidate excess corrupted blocks as long as minimum
+    replication is satisfied. (Juan Yu and Vinayakumar B via wang)
+
+    HDFS-6330. Move mkdirs() to FSNamesystem. (wheat9)
+
+    HDFS-6470. TestBPOfferService.testBPInitErrorHandling is flaky.
+    (Ming Ma via wang)
+
+    HDFS-6529. Trace logging for RemoteBlockReader2 to identify remote datanode
+    and file being read. (Anubhav Dhoot via atm)
+
+    HDFS-6499. Use NativeIO#renameTo instead of File#renameTo in
+    FileJournalManager. (Yongjun Zhang via atm)
+
+    HDFS-6518. TestCacheDirectives#testExceedsCapacity should
+    take FSN read lock when accessing pendingCached list.
+    (wang)
+
+    HDFS-6528. Add XAttrs to TestOfflineImageViewer. (Stephen Chu via wang)
+
+    HDFS-6545. Finalizing rolling upgrade can make NN unavailable for a long
+    duration. (kihwal)
+
   OPTIMIZATIONS
 
     HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)
@@ -669,7 +636,85 @@ Release 2.5.0 - UNRELEASED
     HDFS-6364. Incorrect check for unknown datanode in Balancer. (Benoy
     Antony via Arpit Agarwal)
 
-Release 2.4.1 - UNRELEASED
+    HDFS-6503. Fix typo of DFSAdmin restoreFailedStorage.
+    (Zesheng Wu via wheat9)
+
+    HDFS-6464. Support multiple xattr.name parameters for WebHDFS getXAttrs.
+    (Yi Liu via umamahesh)
+
+    HDFS-6375. Listing extended attributes with the search permission.
+    (Charles Lamb via wang)
+
+    HDFS-6539. test_native_mini_dfs is skipped in hadoop-hdfs/pom.xml
+    (decstery via cmccabe)
+
+    HDFS-6527. Edit log corruption due to defered INode removal. (kihwal and
+    jing9 via jing9)
+
+  BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
+
+    HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
+
+    HDFS-6302. Implement XAttr as a INode feature. (Yi Liu via umamahesh)
+
+    HDFS-6309. Javadocs for Xattrs apis in DFSClient and other minor fixups. (Charles Lamb via umamahesh)
+
+    HDFS-6258. Namenode server-side storage for XAttrs. (Yi Liu via umamahesh)
+
+    HDFS-6303. HDFS implementation of FileContext API for XAttrs. (Yi Liu and Charles Lamb via umamahesh)
+
+    HDFS-6324. Shift XAttr helper code out for reuse. (Yi Liu via umamahesh)
+
+    HDFS-6301. NameNode: persist XAttrs in fsimage and record XAttrs modifications to edit log.
+    (Yi Liu via umamahesh)
+
+    HDFS-6298. XML based End-to-End test for getfattr and setfattr commands. (Yi Liu via umamahesh)
+
+    HDFS-6314. Test cases for XAttrs. (Yi Liu via umamahesh)
+
+    HDFS-6344. Maximum limit on the size of an xattr. (Yi Liu via umamahesh)
+
+    HDFS-6377. Unify xattr name and value limits into a single limit. (wang)
+
+    HDFS-6373. Remove support for extended attributes on symlinks. (Charles Lamb via wang)
+
+    HDFS-6283. Write end user documentation for xattrs. (wang)
+
+    HDFS-6412. Interface audience and stability annotations missing from
+    several new classes related to xattrs. (wang)
+
+    HDFS-6259. Support extended attributes via WebHDFS. (yliu)
+
+    HDFS-6346. Optimize OP_SET_XATTRS by persisting single Xattr entry per setXattr/removeXattr api call
+    (Yi Liu via umamahesh)
+
+    HDFS-6331. ClientProtocol#setXattr should not be annotated idempotent.
+    (umamahesh via wang)
+
+    HDFS-6335. TestOfflineEditsViewer for XAttr. (Yi Liu via umamahesh)
+
+    HDFS-6343. fix TestNamenodeRetryCache and TestRetryCacheWithHA failures. (umamahesh)
+
+    HDFS-6366. FsImage loading failed with RemoveXattr op (umamahesh)
+
+    HDFS-6357. SetXattr should persist rpcIDs for handling retrycache with Namenode restart and HA
+    (umamahesh)
+
+    HDFS-6372. Handle setXattr rpcIDs for OfflineEditsViewer. (umamahesh)
+
+    HDFS-6410. DFSClient unwraps AclException in xattr methods, but those
+    methods cannot throw AclException. (wang)
+
+    HDFS-6413. xattr names erroneously handled as case-insensitive.
+    (Charles Lamb via cnauroth)
+
+    HDFS-6414. xattr modification operations are based on state of latest
+    snapshot instead of current version of inode. (Andrew Wang via cnauroth)
+
+    HDFS-6374. setXAttr should require the user to be the owner of the file
+    or directory (Charles Lamb via wang)
+
+Release 2.4.1 - 2014-06-23 
 
   INCOMPATIBLE CHANGES
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -408,7 +408,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
                     </exec>
                     <exec executable="sh" failonerror="true" dir="${project.build.directory}/native/">
                         <arg value="-c"/>
-                        <arg value="[ x$SKIPTESTS = xtrue ] || ${project.build.directory}/native/test_libhdfs_threaded"/>
+                        <arg value="[ x$SKIPTESTS = xtrue ] || ${project.build.directory}/native/test_native_mini_dfs"/>
                       <env key="CLASSPATH" value="${test_classpath}:${compile_classpath}"/>
                       <env key="SKIPTESTS" value="${skipTests}"/>
                     </exec>

+ 5 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java

@@ -33,6 +33,7 @@ import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.security.authorize.Service;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
 import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
+import org.apache.hadoop.ipc.GenericRefreshProtocol;
 
 /**
  * {@link PolicyProvider} for HDFS protocols.
@@ -68,7 +69,10 @@ public class HDFSPolicyProvider extends PolicyProvider {
         GetUserMappingsProtocol.class),
     new Service(
         CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_CALLQUEUE,
-        RefreshCallQueueProtocol.class)
+        RefreshCallQueueProtocol.class),
+    new Service(
+        CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_GENERIC_REFRESH,
+        GenericRefreshProtocol.class)
   };
   
   @Override

+ 14 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java

@@ -26,6 +26,7 @@ import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
 import java.nio.channels.ReadableByteChannel;
 import java.util.EnumSet;
+import java.util.UUID;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -133,9 +134,22 @@ public class RemoteBlockReader2  implements BlockReader {
   public synchronized int read(byte[] buf, int off, int len) 
                                throws IOException {
 
+    UUID randomId = null;
+    if (LOG.isTraceEnabled()) {
+      randomId = UUID.randomUUID();
+      LOG.trace(String.format("Starting read #%s file %s from datanode %s",
+        randomId.toString(), this.filename,
+        this.datanodeID.getHostName()));
+    }
+
     if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
       readNextPacket();
     }
+
+    if (LOG.isTraceEnabled()) {
+      LOG.trace(String.format("Finishing read #" + randomId));
+    }
+
     if (curDataSlice.remaining() == 0) {
       // we're at EOF now
       return -1;

+ 22 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -1096,8 +1096,9 @@ public class BlockManager {
           + blk + " not found");
       return;
     }
-    markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock, reason,
-        Reason.CORRUPTION_REPORTED), dn, storageID);
+    markBlockAsCorrupt(new BlockToMarkCorrupt(storedBlock,
+        blk.getGenerationStamp(), reason, Reason.CORRUPTION_REPORTED),
+        dn, storageID);
   }
 
   private void markBlockAsCorrupt(BlockToMarkCorrupt b,
@@ -1123,7 +1124,25 @@ public class BlockManager {
     // Add this replica to corruptReplicas Map
     corruptReplicas.addToCorruptReplicasMap(b.corrupted, node, b.reason,
         b.reasonCode);
-    if (countNodes(b.stored).liveReplicas() >= bc.getBlockReplication()) {
+
+    NumberReplicas numberOfReplicas = countNodes(b.stored);
+    boolean hasEnoughLiveReplicas = numberOfReplicas.liveReplicas() >= bc
+        .getBlockReplication();
+    boolean minReplicationSatisfied =
+        numberOfReplicas.liveReplicas() >= minReplication;
+    boolean hasMoreCorruptReplicas = minReplicationSatisfied &&
+        (numberOfReplicas.liveReplicas() + numberOfReplicas.corruptReplicas()) >
+        bc.getBlockReplication();
+    boolean corruptedDuringWrite = minReplicationSatisfied &&
+        (b.stored.getGenerationStamp() > b.corrupted.getGenerationStamp());
+    // case 1: have enough number of live replicas
+    // case 2: corrupted replicas + live replicas > Replication factor
+    // case 3: Block is marked corrupt due to failure while writing. In this
+    //         case genstamp will be different than that of valid block.
+    // In all these cases we can delete the replica.
+    // In case of 3, rbw block will be deleted and valid block can be replicated
+    if (hasEnoughLiveReplicas || hasMoreCorruptReplicas
+        || corruptedDuringWrite) {
       // the block is over-replicated so invalidate the replicas immediately
       invalidateBlock(b, node);
     } else if (namesystem.isPopulatingReplQueues()) {

+ 59 - 300
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -50,7 +50,6 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.AclException;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
@@ -265,11 +264,6 @@ public class FSDirectory implements Closeable {
     ready = flag;
   }
 
-  private void incrDeletedFileCount(long count) {
-    if (getFSNamesystem() != null)
-      NameNode.getNameNodeMetrics().incrFilesDeleted(count);
-  }
-    
   /**
    * Shutdown the filestore
    */
@@ -321,19 +315,7 @@ public class FSDirectory implements Closeable {
       UnresolvedLinkException, SnapshotAccessControlException, AclException {
     waitForReady();
 
-    // Always do an implicit mkdirs for parent directory tree.
     long modTime = now();
-    
-    Path parent = new Path(path).getParent();
-    if (parent == null) {
-      // Trying to add "/" as a file - this path has no
-      // parent -- avoids an NPE below.
-      return null;
-    }
-    
-    if (!mkdirs(parent.toString(), permissions, true, modTime)) {
-      return null;
-    }
     INodeFile newNode = new INodeFile(namesystem.allocateNewInodeId(), null,
         permissions, modTime, modTime, BlockInfo.EMPTY_ARRAY, replication,
         preferredBlockSize);
@@ -436,65 +418,6 @@ public class FSDirectory implements Closeable {
     }
   }
 
-  /**
-   * Persist the block list for the inode.
-   */
-  void persistBlocks(String path, INodeFile file, boolean logRetryCache) {
-    Preconditions.checkArgument(file.isUnderConstruction());
-    waitForReady();
-
-    writeLock();
-    try {
-      fsImage.getEditLog().logUpdateBlocks(path, file, logRetryCache);
-      if(NameNode.stateChangeLog.isDebugEnabled()) {
-        NameNode.stateChangeLog.debug("DIR* FSDirectory.persistBlocks: "
-            +path+" with "+ file.getBlocks().length 
-            +" blocks is persisted to the file system");
-      }
-    } finally {
-      writeUnlock();
-    }
-  }
-  
-  /**
-   * Persist the new block (the last block of the given file).
-   */
-  void persistNewBlock(String path, INodeFile file) {
-    Preconditions.checkArgument(file.isUnderConstruction());
-    waitForReady();
-
-    writeLock();
-    try {
-      fsImage.getEditLog().logAddBlock(path, file);
-    } finally {
-      writeUnlock();
-    }
-    if (NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("DIR* FSDirectory.persistNewBlock: "
-          + path + " with new block " + file.getLastBlock().toString()
-          + ", current total block count is " + file.getBlocks().length);
-    }
-  }
-  
-  /**
-   * Close file.
-   */
-  void closeFile(String path, INodeFile file) {
-    waitForReady();
-    writeLock();
-    try {
-      // file is closed
-      fsImage.getEditLog().logCloseFile(path, file);
-      if (NameNode.stateChangeLog.isDebugEnabled()) {
-        NameNode.stateChangeLog.debug("DIR* FSDirectory.closeFile: "
-            +path+" with "+ file.getBlocks().length 
-            +" blocks is persisted to the file system");
-      }
-    } finally {
-      writeUnlock();
-    }
-  }
-
   /**
    * Remove a block from the file.
    * @return Whether the block exists in the corresponding file
@@ -540,7 +463,7 @@ public class FSDirectory implements Closeable {
    * @deprecated Use {@link #renameTo(String, String, boolean, Rename...)}
    */
   @Deprecated
-  boolean renameTo(String src, String dst, boolean logRetryCache) 
+  boolean renameTo(String src, String dst, long mtime)
       throws QuotaExceededException, UnresolvedLinkException, 
       FileAlreadyExistsException, SnapshotAccessControlException, IOException {
     if (NameNode.stateChangeLog.isDebugEnabled()) {
@@ -548,22 +471,20 @@ public class FSDirectory implements Closeable {
           +src+" to "+dst);
     }
     waitForReady();
-    long now = now();
     writeLock();
     try {
-      if (!unprotectedRenameTo(src, dst, now))
+      if (!unprotectedRenameTo(src, dst, mtime))
         return false;
     } finally {
       writeUnlock();
     }
-    fsImage.getEditLog().logRename(src, dst, now, logRetryCache);
     return true;
   }
 
   /**
    * @see #unprotectedRenameTo(String, String, long, Options.Rename...)
    */
-  void renameTo(String src, String dst, boolean logRetryCache, 
+  void renameTo(String src, String dst, long mtime,
       Options.Rename... options)
       throws FileAlreadyExistsException, FileNotFoundException,
       ParentNotDirectoryException, QuotaExceededException,
@@ -573,16 +494,14 @@ public class FSDirectory implements Closeable {
           + " to " + dst);
     }
     waitForReady();
-    long now = now();
     writeLock();
     try {
-      if (unprotectedRenameTo(src, dst, now, options)) {
-        incrDeletedFileCount(1);
+      if (unprotectedRenameTo(src, dst, mtime, options)) {
+        namesystem.incrDeletedFileCount(1);
       }
     } finally {
       writeUnlock();
     }
-    fsImage.getEditLog().logRename(src, dst, now, logRetryCache, options);
   }
 
   /**
@@ -1106,11 +1025,7 @@ public class FSDirectory implements Closeable {
     waitForReady();
     writeLock();
     try {
-      final Block[] fileBlocks = unprotectedSetReplication(
-          src, replication, blockRepls);
-      if (fileBlocks != null)  // log replication change
-        fsImage.getEditLog().logSetReplication(src, replication);
-      return fileBlocks;
+      return unprotectedSetReplication(src, replication, blockRepls);
     } finally {
       writeUnlock();
     }
@@ -1178,7 +1093,6 @@ public class FSDirectory implements Closeable {
     } finally {
       writeUnlock();
     }
-    fsImage.getEditLog().logSetPermissions(src, permission);
   }
   
   void unprotectedSetPermission(String src, FsPermission permissions)
@@ -1203,7 +1117,6 @@ public class FSDirectory implements Closeable {
     } finally {
       writeUnlock();
     }
-    fsImage.getEditLog().logSetOwner(src, username, groupname);
   }
 
   void unprotectedSetOwner(String src, String username, String groupname)
@@ -1226,18 +1139,14 @@ public class FSDirectory implements Closeable {
   /**
    * Concat all the blocks from srcs to trg and delete the srcs files
    */
-  void concat(String target, String [] srcs, boolean supportRetryCache) 
+  void concat(String target, String[] srcs, long timestamp)
       throws UnresolvedLinkException, QuotaExceededException,
       SnapshotAccessControlException, SnapshotException {
     writeLock();
     try {
       // actual move
       waitForReady();
-      long timestamp = now();
       unprotectedConcat(target, srcs, timestamp);
-      // do the commit
-      fsImage.getEditLog().logConcat(target, srcs, timestamp, 
-          supportRetryCache);
     } finally {
       writeUnlock();
     }
@@ -1312,17 +1221,14 @@ public class FSDirectory implements Closeable {
    * @param src Path of a directory to delete
    * @param collectedBlocks Blocks under the deleted directory
    * @param removedINodes INodes that should be removed from {@link #inodeMap}
-   * @param logRetryCache Whether to record RPC IDs in editlog to support retry
-   *                      cache rebuilding.
-   * @return true on successful deletion; else false
+   * @return the number of files that have been removed
    */
-  boolean delete(String src, BlocksMapUpdateInfo collectedBlocks,
-      List<INode> removedINodes, boolean logRetryCache) throws IOException {
+  long delete(String src, BlocksMapUpdateInfo collectedBlocks,
+              List<INode> removedINodes, long mtime) throws IOException {
     if (NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: " + src);
     }
     waitForReady();
-    long now = now();
     final long filesRemoved;
     writeLock();
     try {
@@ -1335,20 +1241,13 @@ public class FSDirectory implements Closeable {
             new ArrayList<INodeDirectorySnapshottable>();
         checkSnapshot(inodesInPath.getLastINode(), snapshottableDirs);
         filesRemoved = unprotectedDelete(inodesInPath, collectedBlocks,
-            removedINodes, now);
+            removedINodes, mtime);
         namesystem.removeSnapshottableDirs(snapshottableDirs);
       }
     } finally {
       writeUnlock();
     }
-    if (filesRemoved < 0) {
-      return false;
-    }
-    fsImage.getEditLog().logDelete(src, now, logRetryCache);
-    incrDeletedFileCount(filesRemoved);
-    // Blocks/INodes will be handled later by the caller of this method
-    getFSNamesystem().removePathAndBlocks(src, null, null);
-    return true;
+    return filesRemoved;
   }
   
   private static boolean deleteAllowed(final INodesInPath iip,
@@ -1895,112 +1794,6 @@ public class FSDirectory implements Closeable {
     // inodes can be null only when its called without holding lock
     return inodes == null ? "" : getFullPathName(inodes, inodes.length - 1);
   }
-  
-  /**
-   * Create a directory 
-   * If ancestor directories do not exist, automatically create them.
-
-   * @param src string representation of the path to the directory
-   * @param permissions the permission of the directory
-   * @param inheritPermission if the permission of the directory should inherit
-   *                          from its parent or not. u+wx is implicitly added to
-   *                          the automatically created directories, and to the
-   *                          given directory if inheritPermission is true
-   * @param now creation time
-   * @return true if the operation succeeds false otherwise
-   * @throws QuotaExceededException if directory creation violates
-   *                                any quota limit
-   * @throws UnresolvedLinkException if a symlink is encountered in src.                      
-   * @throws SnapshotAccessControlException if path is in RO snapshot
-   */
-  boolean mkdirs(String src, PermissionStatus permissions,
-      boolean inheritPermission, long now)
-      throws FileAlreadyExistsException, QuotaExceededException, 
-             UnresolvedLinkException, SnapshotAccessControlException,
-             AclException {
-    src = normalizePath(src);
-    String[] names = INode.getPathNames(src);
-    byte[][] components = INode.getPathComponents(names);
-    final int lastInodeIndex = components.length - 1;
-
-    writeLock();
-    try {
-      INodesInPath iip = getExistingPathINodes(components);
-      if (iip.isSnapshot()) {
-        throw new SnapshotAccessControlException(
-            "Modification on RO snapshot is disallowed");
-      }
-      INode[] inodes = iip.getINodes();
-
-      // find the index of the first null in inodes[]
-      StringBuilder pathbuilder = new StringBuilder();
-      int i = 1;
-      for(; i < inodes.length && inodes[i] != null; i++) {
-        pathbuilder.append(Path.SEPARATOR).append(names[i]);
-        if (!inodes[i].isDirectory()) {
-          throw new FileAlreadyExistsException("Parent path is not a directory: "
-              + pathbuilder+ " "+inodes[i].getLocalName());
-        }
-      }
-
-      // default to creating parent dirs with the given perms
-      PermissionStatus parentPermissions = permissions;
-
-      // if not inheriting and it's the last inode, there's no use in
-      // computing perms that won't be used
-      if (inheritPermission || (i < lastInodeIndex)) {
-        // if inheriting (ie. creating a file or symlink), use the parent dir,
-        // else the supplied permissions
-        // NOTE: the permissions of the auto-created directories violate posix
-        FsPermission parentFsPerm = inheritPermission
-            ? inodes[i-1].getFsPermission() : permissions.getPermission();
-        
-        // ensure that the permissions allow user write+execute
-        if (!parentFsPerm.getUserAction().implies(FsAction.WRITE_EXECUTE)) {
-          parentFsPerm = new FsPermission(
-              parentFsPerm.getUserAction().or(FsAction.WRITE_EXECUTE),
-              parentFsPerm.getGroupAction(),
-              parentFsPerm.getOtherAction()
-          );
-        }
-        
-        if (!parentPermissions.getPermission().equals(parentFsPerm)) {
-          parentPermissions = new PermissionStatus(
-              parentPermissions.getUserName(),
-              parentPermissions.getGroupName(),
-              parentFsPerm
-          );
-          // when inheriting, use same perms for entire path
-          if (inheritPermission) permissions = parentPermissions;
-        }
-      }
-      
-      // create directories beginning from the first null index
-      for(; i < inodes.length; i++) {
-        pathbuilder.append(Path.SEPARATOR).append(names[i]);
-        unprotectedMkdir(namesystem.allocateNewInodeId(), iip, i,
-            components[i], (i < lastInodeIndex) ? parentPermissions
-                : permissions, null, now);
-        if (inodes[i] == null) {
-          return false;
-        }
-        // Directory creation also count towards FilesCreated
-        // to match count of FilesDeleted metric.
-        if (getFSNamesystem() != null)
-          NameNode.getNameNodeMetrics().incrFilesCreated();
-
-        final String cur = pathbuilder.toString();
-        fsImage.getEditLog().logMkDir(cur, inodes[i]);
-        if(NameNode.stateChangeLog.isDebugEnabled()) {
-          NameNode.stateChangeLog.debug(
-              "DIR* FSDirectory.mkdirs: created directory " + cur);
-        }
-      }
-    } finally {
-      writeUnlock();
-    }
-    return true;
-  }
 
   INode unprotectedMkdir(long inodeId, String src, PermissionStatus permissions,
                           List<AclEntry> aclEntries, long timestamp)
@@ -2019,7 +1812,7 @@ public class FSDirectory implements Closeable {
    * The parent path to the directory is at [0, pos-1].
    * All ancestors exist. Newly created one stored at index pos.
    */
-  private void unprotectedMkdir(long inodeId, INodesInPath inodesInPath,
+  void unprotectedMkdir(long inodeId, INodesInPath inodesInPath,
       int pos, byte[] name, PermissionStatus permission,
       List<AclEntry> aclEntries, long timestamp)
       throws QuotaExceededException, AclException {
@@ -2331,10 +2124,8 @@ public class FSDirectory implements Closeable {
     }
     return 1;
   }
-  
-  /**
-   */
-  String normalizePath(String src) {
+
+  static String normalizePath(String src) {
     if (src.length() > 1 && src.endsWith("/")) {
       src = src.substring(0, src.length() - 1);
     }
@@ -2419,7 +2210,7 @@ public class FSDirectory implements Closeable {
   /**
    * See {@link ClientProtocol#setQuota(String, long, long)} for the contract.
    * Sets quota for for a directory.
-   * @return INodeDirectory if any of the quotas have changed. null other wise.
+   * @return INodeDirectory if any of the quotas have changed. null otherwise.
    * @throws FileNotFoundException if the path does not exist.
    * @throws PathIsNotDirectoryException if the path is not a directory.
    * @throws QuotaExceededException if the directory tree size is 
@@ -2470,21 +2261,17 @@ public class FSDirectory implements Closeable {
   
   /**
    * See {@link ClientProtocol#setQuota(String, long, long)} for the contract.
+   * @return INodeDirectory if any of the quotas have changed. null otherwise.
    * @throws SnapshotAccessControlException if path is in RO snapshot
    * @see #unprotectedSetQuota(String, long, long)
    */
-  void setQuota(String src, long nsQuota, long dsQuota) 
+  INodeDirectory setQuota(String src, long nsQuota, long dsQuota)
       throws FileNotFoundException, PathIsNotDirectoryException,
       QuotaExceededException, UnresolvedLinkException,
       SnapshotAccessControlException {
     writeLock();
     try {
-      INodeDirectory dir = unprotectedSetQuota(src, nsQuota, dsQuota);
-      if (dir != null) {
-        final Quota.Counts q = dir.getQuotaCounts();
-        fsImage.getEditLog().logSetQuota(src,
-            q.get(Quota.NAMESPACE), q.get(Quota.DISKSPACE));
-      }
+      return unprotectedSetQuota(src, nsQuota, dsQuota);
     } finally {
       writeUnlock();
     }
@@ -2503,18 +2290,14 @@ public class FSDirectory implements Closeable {
   /**
    * Sets the access time on the file/directory. Logs it in the transaction log.
    */
-  void setTimes(String src, INode inode, long mtime, long atime, boolean force,
-      int latestSnapshotId) throws QuotaExceededException {
-    boolean status = false;
+  boolean setTimes(INode inode, long mtime, long atime, boolean force,
+                   int latestSnapshotId) throws QuotaExceededException {
     writeLock();
     try {
-      status = unprotectedSetTimes(inode, mtime, atime, force, latestSnapshotId);
+      return unprotectedSetTimes(inode, mtime, atime, force, latestSnapshotId);
     } finally {
       writeUnlock();
     }
-    if (status) {
-      fsImage.getEditLog().logTimes(src, mtime, atime);
-    }
   }
 
   boolean unprotectedSetTimes(String src, long mtime, long atime, boolean force) 
@@ -2678,49 +2461,21 @@ public class FSDirectory implements Closeable {
     }
     return perm;
   }
-    
+
   /**
-   * Add the given symbolic link to the fs. Record it in the edits log.
+   * Add the specified path into the namespace.
    */
-  INodeSymlink addSymlink(String path, String target,
-      PermissionStatus dirPerms, boolean createParent, boolean logRetryCache)
-      throws UnresolvedLinkException, FileAlreadyExistsException,
-      QuotaExceededException, SnapshotAccessControlException, AclException {
-    waitForReady();
-
-    final long modTime = now();
-    if (createParent) {
-      final String parent = new Path(path).getParent().toString();
-      if (!mkdirs(parent, dirPerms, true, modTime)) {
-        return null;
-      }
-    }
-    final String userName = dirPerms.getUserName();
-    INodeSymlink newNode  = null;
-    long id = namesystem.allocateNewInodeId();
+  INodeSymlink addSymlink(long id, String path, String target,
+                          long mtime, long atime, PermissionStatus perm)
+          throws UnresolvedLinkException, QuotaExceededException {
     writeLock();
     try {
-      newNode = unprotectedAddSymlink(id, path, target, modTime, modTime,
-          new PermissionStatus(userName, null, FsPermission.getDefault()));
+      return unprotectedAddSymlink(id, path, target, mtime, atime, perm);
     } finally {
       writeUnlock();
     }
-    if (newNode == null) {
-      NameNode.stateChangeLog.info("DIR* addSymlink: failed to add " + path);
-      return null;
-    }
-    fsImage.getEditLog().logSymlink(path, target, modTime, modTime, newNode,
-        logRetryCache);
-    
-    if(NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug("DIR* addSymlink: " + path + " is added");
-    }
-    return newNode;
   }
 
-  /**
-   * Add the specified path into the namespace. Invoked from edit log processing.
-   */
   INodeSymlink unprotectedAddSymlink(long id, String path, String target,
       long mtime, long atime, PermissionStatus perm)
       throws UnresolvedLinkException, QuotaExceededException {
@@ -2730,11 +2485,10 @@ public class FSDirectory implements Closeable {
     return addINode(path, symlink) ? symlink : null;
   }
 
-  void modifyAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
+  List<AclEntry> modifyAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
     writeLock();
     try {
-      List<AclEntry> newAcl = unprotectedModifyAclEntries(src, aclSpec);
-      fsImage.getEditLog().logSetAcl(src, newAcl);
+      return unprotectedModifyAclEntries(src, aclSpec);
     } finally {
       writeUnlock();
     }
@@ -2753,11 +2507,10 @@ public class FSDirectory implements Closeable {
     return newAcl;
   }
 
-  void removeAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
+  List<AclEntry> removeAclEntries(String src, List<AclEntry> aclSpec) throws IOException {
     writeLock();
     try {
-      List<AclEntry> newAcl = unprotectedRemoveAclEntries(src, aclSpec);
-      fsImage.getEditLog().logSetAcl(src, newAcl);
+      return unprotectedRemoveAclEntries(src, aclSpec);
     } finally {
       writeUnlock();
     }
@@ -2776,11 +2529,10 @@ public class FSDirectory implements Closeable {
     return newAcl;
   }
 
-  void removeDefaultAcl(String src) throws IOException {
+  List<AclEntry> removeDefaultAcl(String src) throws IOException {
     writeLock();
     try {
-      List<AclEntry> newAcl = unprotectedRemoveDefaultAcl(src);
-      fsImage.getEditLog().logSetAcl(src, newAcl);
+      return unprotectedRemoveDefaultAcl(src);
     } finally {
       writeUnlock();
     }
@@ -2803,7 +2555,6 @@ public class FSDirectory implements Closeable {
     writeLock();
     try {
       unprotectedRemoveAcl(src);
-      fsImage.getEditLog().logSetAcl(src, AclFeature.EMPTY_ENTRY_LIST);
     } finally {
       writeUnlock();
     }
@@ -2817,11 +2568,10 @@ public class FSDirectory implements Closeable {
     AclStorage.removeINodeAcl(inode, snapshotId);
   }
 
-  void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
+  List<AclEntry> setAcl(String src, List<AclEntry> aclSpec) throws IOException {
     writeLock();
     try {
-      List<AclEntry> newAcl = unprotectedSetAcl(src, aclSpec);
-      fsImage.getEditLog().logSetAcl(src, newAcl);
+      return unprotectedSetAcl(src, aclSpec);
     } finally {
       writeUnlock();
     }
@@ -2868,18 +2618,11 @@ public class FSDirectory implements Closeable {
       readUnlock();
     }
   }
-  
-  void removeXAttr(String src, XAttr xAttr) throws IOException {
+
+  XAttr removeXAttr(String src, XAttr xAttr) throws IOException {
     writeLock();
     try {
-      XAttr removedXAttr = unprotectedRemoveXAttr(src, xAttr);
-      if (removedXAttr != null) {
-        fsImage.getEditLog().logRemoveXAttr(src, removedXAttr);
-      } else {
-        NameNode.stateChangeLog.info("DIR* FSDirectory.removeXAttr: XAttr " +
-        		XAttrHelper.getPrefixName(xAttr) + 
-        		" does not exist on the path " + src);
-      }
+      return unprotectedRemoveXAttr(src, xAttr);
     } finally {
       writeUnlock();
     }
@@ -2917,12 +2660,11 @@ public class FSDirectory implements Closeable {
     return xAttrs;
   }
   
-  void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag,
-      boolean logRetryCache) throws IOException {
+  void setXAttr(String src, XAttr xAttr, EnumSet<XAttrSetFlag> flag)
+          throws IOException {
     writeLock();
     try {
       unprotectedSetXAttr(src, xAttr, flag);
-      fsImage.getEditLog().logSetXAttr(src, xAttr, logRetryCache);
     } finally {
       writeUnlock();
     }
@@ -2943,6 +2685,7 @@ public class FSDirectory implements Closeable {
       EnumSet<XAttrSetFlag> flag) throws QuotaExceededException, IOException {
     List<XAttr> xAttrs = Lists.newArrayListWithCapacity(
         existingXAttrs != null ? existingXAttrs.size() + 1 : 1);
+    int userVisibleXAttrsNum = 0; // Number of user visible xAttrs
     boolean exist = false;
     if (existingXAttrs != null) {
       for (XAttr a: existingXAttrs) {
@@ -2951,6 +2694,10 @@ public class FSDirectory implements Closeable {
           exist = true;
         } else {
           xAttrs.add(a);
+          
+          if (isUserVisible(a)) {
+            userVisibleXAttrsNum++;
+          }
         }
       }
     }
@@ -2958,7 +2705,11 @@ public class FSDirectory implements Closeable {
     XAttrSetFlag.validate(xAttr.getName(), exist, flag);
     xAttrs.add(xAttr);
     
-    if (xAttrs.size() > inodeXAttrsLimit) {
+    if (isUserVisible(xAttr)) {
+      userVisibleXAttrsNum++;
+    }
+    
+    if (userVisibleXAttrsNum > inodeXAttrsLimit) {
       throw new IOException("Cannot add additional XAttr to inode, "
           + "would exceed limit of " + inodeXAttrsLimit);
     }
@@ -2966,6 +2717,14 @@ public class FSDirectory implements Closeable {
     return xAttrs;
   }
   
+  private boolean isUserVisible(XAttr xAttr) {
+    if (xAttr.getNameSpace() == XAttr.NameSpace.USER || 
+        xAttr.getNameSpace() == XAttr.NameSpace.TRUSTED) {
+      return true;
+    }
+    return false;
+  }
+  
   List<XAttr> getXAttrs(String src) throws IOException {
     String srcs = normalizePath(src);
     readLock();

+ 289 - 42
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -145,7 +145,7 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.StorageType;
-import org.apache.hadoop.hdfs.XAttrHelper;
+import org.apache.hadoop.hdfs.protocol.AclException;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
@@ -167,6 +167,7 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeException;
 import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
+import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
@@ -1567,6 +1568,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       src = FSDirectory.resolvePath(src, pathComponents, dir);
       checkOwner(pc, src);
       dir.setPermission(src, permission);
+      getEditLog().logSetPermissions(src, permission);
       resultingStat = getAuditFileInfo(src, false);
     } finally {
       writeUnlock();
@@ -1612,6 +1614,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         }
       }
       dir.setOwner(src, username, group);
+      getEditLog().logSetOwner(src, username, group);
       resultingStat = getAuditFileInfo(src, false);
     } finally {
       writeUnlock();
@@ -1742,7 +1745,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
             if (isReadOp) {
               continue;
             }
-            dir.setTimes(src, inode, -1, now, false, iip.getLatestSnapshotId());
+            boolean changed = dir.setTimes(inode, -1, now, false,
+                    iip.getLatestSnapshotId());
+            if (changed) {
+              getEditLog().logTimes(src, -1, now);
+            }
           }
         }
         final long fileSize = iip.isSnapshot() ?
@@ -1953,7 +1960,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
           Arrays.toString(srcs) + " to " + target);
     }
 
-    dir.concat(target,srcs, logRetryCache);
+    long timestamp = now();
+    dir.concat(target, srcs, timestamp);
+    getEditLog().logConcat(target, srcs, timestamp, logRetryCache);
   }
   
   /**
@@ -1994,7 +2003,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       final INodesInPath iip = dir.getINodesInPath4Write(src);
       final INode inode = iip.getLastINode();
       if (inode != null) {
-        dir.setTimes(src, inode, mtime, atime, true, iip.getLatestSnapshotId());
+        boolean changed = dir.setTimes(inode, mtime, atime, true,
+                iip.getLatestSnapshotId());
+        if (changed) {
+          getEditLog().logTimes(src, mtime, atime);
+        }
         resultingStat = getAuditFileInfo(src, false);
       } else {
         throw new FileNotFoundException("File/Directory " + src + " does not exist.");
@@ -2063,7 +2076,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       checkFsObjectLimit();
 
       // add symbolic link to namespace
-      dir.addSymlink(link, target, dirPerms, createParent, logRetryCache);
+      addSymlink(link, target, dirPerms, createParent, logRetryCache);
       resultingStat = getAuditFileInfo(link, false);
     } finally {
       writeUnlock();
@@ -2115,6 +2128,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       final Block[] blocks = dir.setReplication(src, replication, blockRepls);
       isFile = blocks != null;
       if (isFile) {
+        getEditLog().logSetReplication(src, replication);
         blockManager.setReplication(blockRepls[0], blockRepls[1], src, blocks);
       }
     } finally {
@@ -2315,8 +2329,16 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       final DatanodeDescriptor clientNode = 
           blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
 
-      INodeFile newNode = dir.addFile(src, permissions, replication, blockSize,
-          holder, clientMachine, clientNode);
+      INodeFile newNode = null;
+
+      // Always do an implicit mkdirs for parent directory tree.
+      Path parent = new Path(src).getParent();
+      if (parent != null && mkdirsRecursively(parent.toString(),
+              permissions, true, now())) {
+        newNode = dir.addFile(src, permissions, replication, blockSize,
+                holder, clientMachine, clientNode);
+      }
+
       if (newNode == null) {
         throw new IOException("Unable to add " + src +  " to namespace");
       }
@@ -2740,7 +2762,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       INodesInPath inodesInPath = INodesInPath.fromINode(pendingFile);
       saveAllocatedBlock(src, inodesInPath, newBlock, targets);
 
-      dir.persistNewBlock(src, pendingFile);
+      persistNewBlock(src, pendingFile);
       offset = pendingFile.computeFileSize();
     } finally {
       writeUnlock();
@@ -2960,7 +2982,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         NameNode.stateChangeLog.debug("BLOCK* NameSystem.abandonBlock: "
                                       + b + " is removed from pendingCreates");
       }
-      dir.persistBlocks(src, file, false);
+      persistBlocks(src, file, false);
     } finally {
       writeUnlock();
     }
@@ -2996,6 +3018,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
           + (lease != null ? lease.toString()
               : "Holder " + holder + " does not have any open files."));
     }
+    // No further modification is allowed on a deleted file.
+    // A file is considered deleted, if it has no parent or is marked
+    // as deleted in the snapshot feature.
+    if (file.getParent() == null || (file.isWithSnapshot() &&
+        file.getFileWithSnapshotFeature().isCurrentFileDeleted())) {
+      throw new FileNotFoundException(src);
+    }
     String clientName = file.getFileUnderConstructionFeature().getClientName();
     if (holder != null && !clientName.equals(holder)) {
       throw new LeaseExpiredException("Lease mismatch on " + ident +
@@ -3260,7 +3289,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
           false, false);
     }
 
-    if (dir.renameTo(src, dst, logRetryCache)) {
+    long mtime = now();
+    if (dir.renameTo(src, dst, mtime)) {
+      getEditLog().logRename(src, dst, mtime, logRetryCache);
       return true;
     }
     return false;
@@ -3325,7 +3356,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
           false);
     }
 
-    dir.renameTo(src, dst, logRetryCache, options);
+    long mtime = now();
+    dir.renameTo(src, dst, mtime, options);
+    getEditLog().logRename(src, dst, mtime, logRetryCache, options);
   }
   
   /**
@@ -3408,10 +3441,17 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         checkPermission(pc, src, false, null, FsAction.WRITE, null,
             FsAction.ALL, true, false);
       }
+      long mtime = now();
       // Unlink the target directory from directory tree
-      if (!dir.delete(src, collectedBlocks, removedINodes, logRetryCache)) {
+      long filesRemoved = dir.delete(src, collectedBlocks, removedINodes,
+              mtime);
+      if (filesRemoved < 0) {
         return false;
       }
+      getEditLog().logDelete(src, mtime, logRetryCache);
+      incrDeletedFileCount(filesRemoved);
+      // Blocks/INodes will be handled later
+      removePathAndBlocks(src, null, null);
       ret = true;
     } finally {
       writeUnlock();
@@ -3419,6 +3459,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     getEditLog().logSync(); 
     removeBlocks(collectedBlocks); // Incremental deletion of blocks
     collectedBlocks.clear();
+
     dir.writeLock();
     try {
       dir.removeFromInodeMap(removedINodes);
@@ -3671,12 +3712,118 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     // create multiple inodes.
     checkFsObjectLimit();
 
-    if (!dir.mkdirs(src, permissions, false, now())) {
+    if (!mkdirsRecursively(src, permissions, false, now())) {
       throw new IOException("Failed to create directory: " + src);
     }
     return true;
   }
 
+  /**
+   * Create a directory
+   * If ancestor directories do not exist, automatically create them.
+
+   * @param src string representation of the path to the directory
+   * @param permissions the permission of the directory
+   * @param inheritPermission if the permission of the directory should inherit
+   *                          from its parent or not. u+wx is implicitly added to
+   *                          the automatically created directories, and to the
+   *                          given directory if inheritPermission is true
+   * @param now creation time
+   * @return true if the operation succeeds false otherwise
+   * @throws QuotaExceededException if directory creation violates
+   *                                any quota limit
+   * @throws UnresolvedLinkException if a symlink is encountered in src.
+   * @throws SnapshotAccessControlException if path is in RO snapshot
+   */
+  private boolean mkdirsRecursively(String src, PermissionStatus permissions,
+                 boolean inheritPermission, long now)
+          throws FileAlreadyExistsException, QuotaExceededException,
+                 UnresolvedLinkException, SnapshotAccessControlException,
+                 AclException {
+    src = FSDirectory.normalizePath(src);
+    String[] names = INode.getPathNames(src);
+    byte[][] components = INode.getPathComponents(names);
+    final int lastInodeIndex = components.length - 1;
+
+    dir.writeLock();
+    try {
+      INodesInPath iip = dir.getExistingPathINodes(components);
+      if (iip.isSnapshot()) {
+        throw new SnapshotAccessControlException(
+                "Modification on RO snapshot is disallowed");
+      }
+      INode[] inodes = iip.getINodes();
+
+      // find the index of the first null in inodes[]
+      StringBuilder pathbuilder = new StringBuilder();
+      int i = 1;
+      for(; i < inodes.length && inodes[i] != null; i++) {
+        pathbuilder.append(Path.SEPARATOR).append(names[i]);
+        if (!inodes[i].isDirectory()) {
+          throw new FileAlreadyExistsException(
+                  "Parent path is not a directory: "
+                  + pathbuilder + " "+inodes[i].getLocalName());
+        }
+      }
+
+      // default to creating parent dirs with the given perms
+      PermissionStatus parentPermissions = permissions;
+
+      // if not inheriting and it's the last inode, there's no use in
+      // computing perms that won't be used
+      if (inheritPermission || (i < lastInodeIndex)) {
+        // if inheriting (ie. creating a file or symlink), use the parent dir,
+        // else the supplied permissions
+        // NOTE: the permissions of the auto-created directories violate posix
+        FsPermission parentFsPerm = inheritPermission
+                ? inodes[i-1].getFsPermission() : permissions.getPermission();
+
+        // ensure that the permissions allow user write+execute
+        if (!parentFsPerm.getUserAction().implies(FsAction.WRITE_EXECUTE)) {
+          parentFsPerm = new FsPermission(
+                  parentFsPerm.getUserAction().or(FsAction.WRITE_EXECUTE),
+                  parentFsPerm.getGroupAction(),
+                  parentFsPerm.getOtherAction()
+          );
+        }
+
+        if (!parentPermissions.getPermission().equals(parentFsPerm)) {
+          parentPermissions = new PermissionStatus(
+                  parentPermissions.getUserName(),
+                  parentPermissions.getGroupName(),
+                  parentFsPerm
+          );
+          // when inheriting, use same perms for entire path
+          if (inheritPermission) permissions = parentPermissions;
+        }
+      }
+
+      // create directories beginning from the first null index
+      for(; i < inodes.length; i++) {
+        pathbuilder.append(Path.SEPARATOR).append(names[i]);
+        dir.unprotectedMkdir(allocateNewInodeId(), iip, i, components[i],
+                (i < lastInodeIndex) ? parentPermissions : permissions, null,
+                now);
+        if (inodes[i] == null) {
+          return false;
+        }
+        // Directory creation also count towards FilesCreated
+        // to match count of FilesDeleted metric.
+        NameNode.getNameNodeMetrics().incrFilesCreated();
+
+        final String cur = pathbuilder.toString();
+        getEditLog().logMkDir(cur, inodes[i]);
+        if(NameNode.stateChangeLog.isDebugEnabled()) {
+          NameNode.stateChangeLog.debug(
+                  "mkdirs: created directory " + cur);
+        }
+      }
+    } finally {
+      dir.writeUnlock();
+    }
+    return true;
+  }
+
   /**
    * Get the content summary for a specific file/dir.
    *
@@ -3721,7 +3868,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
    * 
    * Note: This does not support ".inodes" relative path.
    */
-  void setQuota(String path, long nsQuota, long dsQuota) 
+  void setQuota(String path, long nsQuota, long dsQuota)
       throws IOException, UnresolvedLinkException {
     checkSuperuserPrivilege();
     checkOperation(OperationCategory.WRITE);
@@ -3729,7 +3876,12 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     try {
       checkOperation(OperationCategory.WRITE);
       checkNameNodeSafeMode("Cannot set quota on " + path);
-      dir.setQuota(path, nsQuota, dsQuota);
+      INodeDirectory changed = dir.setQuota(path, nsQuota, dsQuota);
+      if (changed != null) {
+        final Quota.Counts q = changed.getQuotaCounts();
+        getEditLog().logSetQuota(path,
+                q.get(Quota.NAMESPACE), q.get(Quota.DISKSPACE));
+      }
     } finally {
       writeUnlock();
     }
@@ -3770,7 +3922,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         pendingFile.getFileUnderConstructionFeature().updateLengthOfLastBlock(
             pendingFile, lastBlockLength);
       }
-      dir.persistBlocks(src, pendingFile, false);
+      persistBlocks(src, pendingFile, false);
     } finally {
       writeUnlock();
     }
@@ -3963,7 +4115,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     final INodeFile newFile = pendingFile.toCompleteFile(now());
 
     // close file and persist block allocations for this file
-    dir.closeFile(src, newFile);
+    closeFile(src, newFile);
 
     blockManager.checkReplication(newFile);
   }
@@ -4114,7 +4266,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         src = closeFileCommitBlocks(iFile, storedBlock);
       } else {
         // If this commit does not want to close the file, persist blocks
-        src = persistBlocks(iFile, false);
+        src = iFile.getFullPathName();
+        persistBlocks(src, iFile, false);
       }
     } finally {
       writeUnlock();
@@ -4152,21 +4305,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     return src;
   }
 
-  /**
-   * Persist the block list for the given file.
-   *
-   * @param pendingFile
-   * @return Path to the given file.
-   * @throws IOException
-   */
-  @VisibleForTesting
-  String persistBlocks(INodeFile pendingFile, boolean logRetryCache)
-      throws IOException {
-    String src = pendingFile.getFullPathName();
-    dir.persistBlocks(src, pendingFile, logRetryCache);
-    return src;
-  }
-
   /**
    * Renew the lease(s) held by the given client
    */
@@ -4350,6 +4488,85 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     hasResourcesAvailable = nnResourceChecker.hasAvailableDiskSpace();
   }
 
+  /**
+   * Persist the block list for the inode.
+   * @param path
+   * @param file
+   * @param logRetryCache
+   */
+  private void persistBlocks(String path, INodeFile file,
+                             boolean logRetryCache) {
+    assert hasWriteLock();
+    Preconditions.checkArgument(file.isUnderConstruction());
+    getEditLog().logUpdateBlocks(path, file, logRetryCache);
+    if(NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("persistBlocks: " + path
+              + " with " + file.getBlocks().length + " blocks is persisted to" +
+              " the file system");
+    }
+  }
+
+  void incrDeletedFileCount(long count) {
+    NameNode.getNameNodeMetrics().incrFilesDeleted(count);
+  }
+
+  /**
+   * Close file.
+   * @param path
+   * @param file
+   */
+  private void closeFile(String path, INodeFile file) {
+    assert hasWriteLock();
+    dir.waitForReady();
+    // file is closed
+    getEditLog().logCloseFile(path, file);
+    if (NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("closeFile: "
+              +path+" with "+ file.getBlocks().length
+              +" blocks is persisted to the file system");
+    }
+  }
+
+  /**
+   * Add the given symbolic link to the fs. Record it in the edits log.
+   * @param path
+   * @param target
+   * @param dirPerms
+   * @param createParent
+   * @param logRetryCache
+   * @param dir
+   */
+  private INodeSymlink addSymlink(String path, String target,
+                                  PermissionStatus dirPerms,
+                                  boolean createParent, boolean logRetryCache)
+      throws UnresolvedLinkException, FileAlreadyExistsException,
+      QuotaExceededException, SnapshotAccessControlException, AclException {
+    dir.waitForReady();
+
+    final long modTime = now();
+    if (createParent) {
+      final String parent = new Path(path).getParent().toString();
+      if (!mkdirsRecursively(parent, dirPerms, true, modTime)) {
+        return null;
+      }
+    }
+    final String userName = dirPerms.getUserName();
+    long id = allocateNewInodeId();
+    INodeSymlink newNode = dir.addSymlink(id, path, target, modTime, modTime,
+            new PermissionStatus(userName, null, FsPermission.getDefault()));
+    if (newNode == null) {
+      NameNode.stateChangeLog.info("addSymlink: failed to add " + path);
+      return null;
+    }
+    getEditLog().logSymlink(path, target, modTime, modTime, newNode,
+        logRetryCache);
+
+    if(NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("addSymlink: " + path + " is added");
+    }
+    return newNode;
+  }
+
   /**
    * Periodically calls hasAvailableResources of NameNodeResourceChecker, and if
    * there are found to be insufficient resources available, causes the NN to
@@ -4682,6 +4899,21 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth);
   }
 
+  /**
+   * Persist the new block (the last block of the given file).
+   * @param path
+   * @param file
+   */
+  private void persistNewBlock(String path, INodeFile file) {
+    Preconditions.checkArgument(file.isUnderConstruction());
+    getEditLog().logAddBlock(path, file);
+    if (NameNode.stateChangeLog.isDebugEnabled()) {
+      NameNode.stateChangeLog.debug("persistNewBlock: "
+              + path + " with new block " + file.getLastBlock().toString()
+              + ", current total block count is " + file.getBlocks().length);
+    }
+  }
+
   /**
    * SafeModeInfo contains information related to the safe mode.
    * <p>
@@ -6090,7 +6322,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     blockinfo.setExpectedLocations(storages);
 
     String src = pendingFile.getFullPathName();
-    dir.persistBlocks(src, pendingFile, logRetryCache);
+    persistBlocks(src, pendingFile, logRetryCache);
   }
 
   // rename was successful. If any part of the renamed subtree had
@@ -7415,14 +7647,20 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
 
       returnInfo = finalizeRollingUpgradeInternal(now());
       getEditLog().logFinalizeRollingUpgrade(returnInfo.getFinalizeTime());
-      getFSImage().saveNamespace(this);
+      if (haEnabled) {
+        // roll the edit log to make sure the standby NameNode can tail
+        getFSImage().rollEditLog();
+      }
       getFSImage().renameCheckpoint(NameNodeFile.IMAGE_ROLLBACK,
           NameNodeFile.IMAGE);
     } finally {
       writeUnlock();
     }
 
-    // getEditLog().logSync() is not needed since it does saveNamespace 
+    if (!haEnabled) {
+      // Sync not needed for ha since the edit was rolled after logging.
+      getEditLog().logSync();
+    }
 
     if (auditLog.isInfoEnabled() && isExternalInvocation()) {
       logAuditEvent(true, "finalizeRollingUpgrade", null, null, null);
@@ -7718,7 +7956,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       checkNameNodeSafeMode("Cannot modify ACL entries on " + src);
       src = FSDirectory.resolvePath(src, pathComponents, dir);
       checkOwner(pc, src);
-      dir.modifyAclEntries(src, aclSpec);
+      List<AclEntry> newAcl = dir.modifyAclEntries(src, aclSpec);
+      getEditLog().logSetAcl(src, newAcl);
       resultingStat = getAuditFileInfo(src, false);
     } finally {
       writeUnlock();
@@ -7739,7 +7978,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       checkNameNodeSafeMode("Cannot remove ACL entries on " + src);
       src = FSDirectory.resolvePath(src, pathComponents, dir);
       checkOwner(pc, src);
-      dir.removeAclEntries(src, aclSpec);
+      List<AclEntry> newAcl = dir.removeAclEntries(src, aclSpec);
+      getEditLog().logSetAcl(src, newAcl);
       resultingStat = getAuditFileInfo(src, false);
     } finally {
       writeUnlock();
@@ -7760,7 +8000,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       checkNameNodeSafeMode("Cannot remove default ACL entries on " + src);
       src = FSDirectory.resolvePath(src, pathComponents, dir);
       checkOwner(pc, src);
-      dir.removeDefaultAcl(src);
+      List<AclEntry> newAcl = dir.removeDefaultAcl(src);
+      getEditLog().logSetAcl(src, newAcl);
       resultingStat = getAuditFileInfo(src, false);
     } finally {
       writeUnlock();
@@ -7782,6 +8023,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       src = FSDirectory.resolvePath(src, pathComponents, dir);
       checkOwner(pc, src);
       dir.removeAcl(src);
+      getEditLog().logSetAcl(src, AclFeature.EMPTY_ENTRY_LIST);
       resultingStat = getAuditFileInfo(src, false);
     } finally {
       writeUnlock();
@@ -7802,7 +8044,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       checkNameNodeSafeMode("Cannot set ACL on " + src);
       src = FSDirectory.resolvePath(src, pathComponents, dir);
       checkOwner(pc, src);
-      dir.setAcl(src, aclSpec);
+      List<AclEntry> newAcl = dir.setAcl(src, aclSpec);
+      getEditLog().logSetAcl(src, newAcl);
       resultingStat = getAuditFileInfo(src, false);
     } finally {
       writeUnlock();
@@ -7878,7 +8121,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         checkOwner(pc, src);
         checkPathAccess(pc, src, FsAction.WRITE);
       }
-      dir.setXAttr(src, xAttr, flag, logRetryCache);
+      dir.setXAttr(src, xAttr, flag);
+      getEditLog().logSetXAttr(src, xAttr, logRetryCache);
       resultingStat = getAuditFileInfo(src, false);
     } finally {
       writeUnlock();
@@ -7999,7 +8243,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         checkPathAccess(pc, src, FsAction.WRITE);
       }
       
-      dir.removeXAttr(src, xAttr);
+      XAttr removedXAttr = dir.removeXAttr(src, xAttr);
+      if (removedXAttr != null) {
+        getEditLog().logRemoveXAttr(src, removedXAttr);
+      }
       resultingStat = getAuditFileInfo(src, false);
     } catch (AccessControlException e) {
       logAuditEvent(false, "removeXAttr", src);

+ 17 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java

@@ -43,6 +43,8 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
+import org.apache.hadoop.io.nativeio.NativeIO;
+
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
@@ -132,10 +134,14 @@ public class FileJournalManager implements JournalManager {
     Preconditions.checkState(!dstFile.exists(),
         "Can't finalize edits file " + inprogressFile + " since finalized file " +
         "already exists");
-    if (!inprogressFile.renameTo(dstFile)) {
+
+    try {
+      NativeIO.renameTo(inprogressFile, dstFile);
+    } catch (IOException e) {
       errorReporter.reportErrorOnFile(dstFile);
-      throw new IllegalStateException("Unable to finalize edits file " + inprogressFile);
+      throw new IllegalStateException("Unable to finalize edits file " + inprogressFile, e);
     }
+
     if (inprogressFile.equals(currentInProgress)) {
       currentInProgress = null;
     }
@@ -513,11 +519,16 @@ public class FileJournalManager implements JournalManager {
       File src = file;
       File dst = new File(src.getParent(), src.getName() + newSuffix);
       // renameTo fails on Windows if the destination file already exists.
-      if (!src.renameTo(dst)) {
-        if (!dst.delete() || !src.renameTo(dst)) {
-          throw new IOException(
-            "Couldn't rename log " + src + " to " + dst);
+      try {
+        if (dst.exists()) {
+          if (!dst.delete()) {
+            throw new IOException("Couldn't delete " + dst);
+          }
         }
+        NativeIO.renameTo(src, dst);
+      } catch (IOException e) {
+        throw new IOException(
+            "Couldn't rename log " + src + " to " + dst, e);
       }
       file = dst;
     }

+ 20 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

@@ -132,6 +132,8 @@ import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.WritableRpcEngine;
+import org.apache.hadoop.ipc.RefreshRegistry;
+import org.apache.hadoop.ipc.RefreshResponse;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.Groups;
@@ -147,6 +149,9 @@ import org.apache.hadoop.security.protocolPB.RefreshUserMappingsProtocolServerSi
 import org.apache.hadoop.ipc.protocolPB.RefreshCallQueueProtocolPB;
 import org.apache.hadoop.ipc.protocolPB.RefreshCallQueueProtocolServerSideTranslatorPB;
 import org.apache.hadoop.ipc.proto.RefreshCallQueueProtocolProtos.RefreshCallQueueProtocolService;
+import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolPB;
+import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolServerSideTranslatorPB;
+import org.apache.hadoop.ipc.proto.GenericRefreshProtocolProtos.GenericRefreshProtocolService;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.tools.proto.GetUserMappingsProtocolProtos.GetUserMappingsProtocolService;
@@ -229,6 +234,11 @@ class NameNodeRpcServer implements NamenodeProtocols {
     BlockingService refreshCallQueueService = RefreshCallQueueProtocolService
         .newReflectiveBlockingService(refreshCallQueueXlator);
 
+    GenericRefreshProtocolServerSideTranslatorPB genericRefreshXlator =
+        new GenericRefreshProtocolServerSideTranslatorPB(this);
+    BlockingService genericRefreshService = GenericRefreshProtocolService
+        .newReflectiveBlockingService(genericRefreshXlator);
+
     GetUserMappingsProtocolServerSideTranslatorPB getUserMappingXlator = 
         new GetUserMappingsProtocolServerSideTranslatorPB(this);
     BlockingService getUserMappingService = GetUserMappingsProtocolService
@@ -278,6 +288,8 @@ class NameNodeRpcServer implements NamenodeProtocols {
       // We support Refreshing call queue here in case the client RPC queue is full
       DFSUtil.addPBProtocol(conf, RefreshCallQueueProtocolPB.class,
           refreshCallQueueService, serviceRpcServer);
+      DFSUtil.addPBProtocol(conf, GenericRefreshProtocolPB.class,
+          genericRefreshService, serviceRpcServer);
       DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class, 
           getUserMappingService, serviceRpcServer);
   
@@ -322,6 +334,8 @@ class NameNodeRpcServer implements NamenodeProtocols {
         refreshUserMappingService, clientRpcServer);
     DFSUtil.addPBProtocol(conf, RefreshCallQueueProtocolPB.class,
         refreshCallQueueService, clientRpcServer);
+    DFSUtil.addPBProtocol(conf, GenericRefreshProtocolPB.class,
+        genericRefreshService, clientRpcServer);
     DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class, 
         getUserMappingService, clientRpcServer);
 
@@ -1154,6 +1168,12 @@ class NameNodeRpcServer implements NamenodeProtocols {
       serviceRpcServer.refreshCallQueue(conf);
     }
   }
+
+  @Override // GenericRefreshProtocol
+  public Collection<RefreshResponse> refresh(String identifier, String[] args) {
+    // Let the registry handle as needed
+    return RefreshRegistry.defaultRegistry().dispatch(identifier, args);
+  }
   
   @Override // GetUserMappingsProtocol
   public String[] getGroupsForUser(String user) throws IOException {

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocols.java

@@ -24,6 +24,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.security.RefreshUserMappingsProtocol;
 import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
+import org.apache.hadoop.ipc.GenericRefreshProtocol;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
 
 /** The full set of RPC methods implemented by the Namenode.  */
@@ -35,6 +36,7 @@ public interface NamenodeProtocols
           RefreshAuthorizationPolicyProtocol,
           RefreshUserMappingsProtocol,
           RefreshCallQueueProtocol,
+          GenericRefreshProtocol,
           GetUserMappingsProtocol,
           HAServiceProtocol {
 }

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java

@@ -40,7 +40,8 @@ import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
 import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
 import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
 import org.apache.hadoop.hdfs.protocol.CachePoolStats;
-import org.apache.hadoop.hdfs.tools.TableListing.Justification;
+import org.apache.hadoop.tools.TableListing;
+import org.apache.hadoop.tools.TableListing.Justification;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 

+ 78 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java

@@ -26,6 +26,7 @@ import java.net.URL;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
@@ -62,12 +63,17 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.TransferFsImage;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
+import org.apache.hadoop.ipc.GenericRefreshProtocol;
+import org.apache.hadoop.ipc.RefreshResponse;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.RefreshUserMappingsProtocol;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
-import org.apache.hadoop.ipc.RefreshCallQueueProtocol;
+import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolClientSideTranslatorPB;
+import org.apache.hadoop.ipc.protocolPB.GenericRefreshProtocolPB;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ToolRunner;
 
@@ -575,7 +581,7 @@ public class DFSAdmin extends FsShell {
    * @exception IOException 
    * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#restoreFailedStorage(String arg)
    */
-  public int restoreFaileStorage(String arg) throws IOException {
+  public int restoreFailedStorage(String arg) throws IOException {
     int exitCode = -1;
 
     if(!arg.equals("check") && !arg.equals("true") && !arg.equals("false")) {
@@ -688,6 +694,7 @@ public class DFSAdmin extends FsShell {
       "\t[-refreshUserToGroupsMappings]\n" +
       "\t[-refreshSuperUserGroupsConfiguration]\n" +
       "\t[-refreshCallQueue]\n" +
+      "\t[-refresh <host:ipc_port> <key> [arg1..argn]\n" +
       "\t[-printTopology]\n" +
       "\t[-refreshNamenodes datanodehost:port]\n"+
       "\t[-deleteBlockPool datanodehost:port blockpoolId [force]]\n"+
@@ -764,6 +771,10 @@ public class DFSAdmin extends FsShell {
 
     String refreshCallQueue = "-refreshCallQueue: Reload the call queue from config\n";
 
+    String genericRefresh = "-refresh: Arguments are <hostname:port> <resource_identifier> [arg1..argn]\n" +
+      "\tTriggers a runtime-refresh of the resource specified by <resource_identifier>\n" +
+      "\ton <hostname:port>. All other args after are sent to the host.";
+
     String printTopology = "-printTopology: Print a tree of the racks and their\n" +
                            "\t\tnodes as reported by the Namenode\n";
     
@@ -848,6 +859,8 @@ public class DFSAdmin extends FsShell {
       System.out.println(refreshSuperUserGroupsConfiguration);
     } else if ("refreshCallQueue".equals(cmd)) {
       System.out.println(refreshCallQueue);
+    } else if ("refresh".equals(cmd)) {
+      System.out.println(genericRefresh);
     } else if ("printTopology".equals(cmd)) {
       System.out.println(printTopology);
     } else if ("refreshNamenodes".equals(cmd)) {
@@ -887,6 +900,7 @@ public class DFSAdmin extends FsShell {
       System.out.println(refreshUserToGroupsMappings);
       System.out.println(refreshSuperUserGroupsConfiguration);
       System.out.println(refreshCallQueue);
+      System.out.println(genericRefresh);
       System.out.println(printTopology);
       System.out.println(refreshNamenodes);
       System.out.println(deleteBlockPool);
@@ -1100,6 +1114,56 @@ public class DFSAdmin extends FsShell {
     return 0;
   }
 
+  public int genericRefresh(String[] argv, int i) throws IOException {
+    String hostport = argv[i++];
+    String identifier = argv[i++];
+    String[] args = Arrays.copyOfRange(argv, i, argv.length);
+
+    // Get the current configuration
+    Configuration conf = getConf();
+
+    // for security authorization
+    // server principal for this call
+    // should be NN's one.
+    conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
+      conf.get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY, ""));
+
+    // Create the client
+    Class<?> xface = GenericRefreshProtocolPB.class;
+    InetSocketAddress address = NetUtils.createSocketAddr(hostport);
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+
+    RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine.class);
+    GenericRefreshProtocolPB proxy = (GenericRefreshProtocolPB)
+      RPC.getProxy(xface, RPC.getProtocolVersion(xface), address,
+        ugi, conf, NetUtils.getDefaultSocketFactory(conf), 0);
+
+    GenericRefreshProtocol xlator =
+      new GenericRefreshProtocolClientSideTranslatorPB(proxy);
+
+    // Refresh
+    Collection<RefreshResponse> responses = xlator.refresh(identifier, args);
+
+    int returnCode = 0;
+
+    // Print refresh responses
+    System.out.println("Refresh Responses:\n");
+    for (RefreshResponse response : responses) {
+      System.out.println(response.toString());
+
+      if (returnCode == 0 && response.getReturnCode() != 0) {
+        // This is the first non-zero return code, so we should return this
+        returnCode = response.getReturnCode();
+      } else if (returnCode != 0 && response.getReturnCode() != 0) {
+        // Then now we have multiple non-zero return codes,
+        // so we merge them into -1
+        returnCode = -1;
+      }
+    }
+
+    return returnCode;
+  }
+
   /**
    * Displays format of commands.
    * @param cmd The command that is being executed.
@@ -1162,6 +1226,9 @@ public class DFSAdmin extends FsShell {
     } else if ("-refreshCallQueue".equals(cmd)) {
       System.err.println("Usage: java DFSAdmin"
                          + " [-refreshCallQueue]");
+    } else if ("-refresh".equals(cmd)) {
+      System.err.println("Usage: java DFSAdmin"
+                         + " [-refresh <hostname:port> <resource_identifier> [arg1..argn]");
     } else if ("-printTopology".equals(cmd)) {
       System.err.println("Usage: java DFSAdmin"
                          + " [-printTopology]");
@@ -1195,6 +1262,7 @@ public class DFSAdmin extends FsShell {
       System.err.println("           [-refreshUserToGroupsMappings]");
       System.err.println("           [-refreshSuperUserGroupsConfiguration]");
       System.err.println("           [-refreshCallQueue]");
+      System.err.println("           [-refresh]");
       System.err.println("           [-printTopology]");
       System.err.println("           [-refreshNamenodes datanodehost:port]");
       System.err.println("           [-deleteBlockPool datanode-host:port blockpoolId [force]]");
@@ -1292,6 +1360,11 @@ public class DFSAdmin extends FsShell {
         printUsage(cmd);
         return exitCode;
       }
+    } else if ("-refresh".equals(cmd)) {
+      if (argv.length < 3) {
+        printUsage(cmd);
+        return exitCode;
+      }
     } else if ("-refreshUserToGroupsMappings".equals(cmd)) {
       if (argv.length != 1) {
         printUsage(cmd);
@@ -1362,7 +1435,7 @@ public class DFSAdmin extends FsShell {
       } else if ("-rollEdits".equals(cmd)) {
         exitCode = rollEdits();
       } else if ("-restoreFailedStorage".equals(cmd)) {
-        exitCode = restoreFaileStorage(argv[i]);
+        exitCode = restoreFailedStorage(argv[i]);
       } else if ("-refreshNodes".equals(cmd)) {
         exitCode = refreshNodes();
       } else if ("-finalizeUpgrade".equals(cmd)) {
@@ -1387,6 +1460,8 @@ public class DFSAdmin extends FsShell {
         exitCode = refreshSuperUserGroupsConfiguration();
       } else if ("-refreshCallQueue".equals(cmd)) {
         exitCode = refreshCallQueue();
+      } else if ("-refresh".equals(cmd)) {
+        exitCode = genericRefresh(argv, i);
       } else if ("-printTopology".equals(cmd)) {
         exitCode = printTopology();
       } else if ("-refreshNamenodes".equals(cmd)) {

+ 227 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/TestGenericRefresh.java

@@ -0,0 +1,227 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.apache.hadoop.ipc.RefreshHandler;
+
+import org.apache.hadoop.ipc.RefreshRegistry;
+import org.apache.hadoop.ipc.RefreshResponse;
+import org.junit.Test;
+import org.junit.Before;
+import org.junit.After;
+import org.junit.BeforeClass;
+import org.junit.AfterClass;
+import org.mockito.Mockito;
+
+/**
+ * Before all tests, a MiniDFSCluster is spun up.
+ * Before each test, mock refresh handlers are created and registered.
+ * After each test, the mock handlers are unregistered.
+ * After all tests, the cluster is spun down.
+ */
+public class TestGenericRefresh {
+  private static MiniDFSCluster cluster;
+  private static Configuration config;
+  private static final int NNPort = 54222;
+
+  private static RefreshHandler firstHandler;
+  private static RefreshHandler secondHandler;
+
+  @BeforeClass
+  public static void setUpBeforeClass() throws Exception {
+    config = new Configuration();
+    config.set("hadoop.security.authorization", "true");
+
+    FileSystem.setDefaultUri(config, "hdfs://localhost:" + NNPort);
+    cluster = new MiniDFSCluster.Builder(config).nameNodePort(NNPort).build();
+    cluster.waitActive();
+  }
+
+  @AfterClass
+  public static void tearDownBeforeClass() throws Exception {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    // Register Handlers, first one just sends an ok response
+    firstHandler = Mockito.mock(RefreshHandler.class);
+    Mockito.stub(firstHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
+      .toReturn(RefreshResponse.successResponse());
+    RefreshRegistry.defaultRegistry().register("firstHandler", firstHandler);
+
+    // Second handler has conditional response for testing args
+    secondHandler = Mockito.mock(RefreshHandler.class);
+    Mockito.stub(secondHandler.handleRefresh("secondHandler", new String[]{"one", "two"}))
+      .toReturn(new RefreshResponse(3, "three"));
+    Mockito.stub(secondHandler.handleRefresh("secondHandler", new String[]{"one"}))
+      .toReturn(new RefreshResponse(2, "two"));
+    RefreshRegistry.defaultRegistry().register("secondHandler", secondHandler);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    RefreshRegistry.defaultRegistry().unregisterAll("firstHandler");
+    RefreshRegistry.defaultRegistry().unregisterAll("secondHandler");
+  }
+
+  @Test
+  public void testInvalidCommand() throws Exception {
+    DFSAdmin admin = new DFSAdmin(config);
+    String [] args = new String[]{"-refresh", "nn"};
+    int exitCode = admin.run(args);
+    assertEquals("DFSAdmin should fail due to bad args", -1, exitCode);
+  }
+
+  @Test
+  public void testInvalidIdentifier() throws Exception {
+    DFSAdmin admin = new DFSAdmin(config);
+    String [] args = new String[]{"-refresh", "localhost:" + NNPort, "unregisteredIdentity"};
+    int exitCode = admin.run(args);
+    assertEquals("DFSAdmin should fail due to no handler registered", -1, exitCode);
+  }
+
+  @Test
+  public void testValidIdentifier() throws Exception {
+    DFSAdmin admin = new DFSAdmin(config);
+    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "firstHandler"};
+    int exitCode = admin.run(args);
+    assertEquals("DFSAdmin should succeed", 0, exitCode);
+
+    Mockito.verify(firstHandler).handleRefresh("firstHandler", new String[]{});
+    // Second handler was never called
+    Mockito.verify(secondHandler, Mockito.never())
+      .handleRefresh(Mockito.anyString(), Mockito.any(String[].class));
+  }
+
+  @Test
+  public void testVariableArgs() throws Exception {
+    DFSAdmin admin = new DFSAdmin(config);
+    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "secondHandler", "one"};
+    int exitCode = admin.run(args);
+    assertEquals("DFSAdmin should return 2", 2, exitCode);
+
+    exitCode = admin.run(new String[]{"-refresh", "localhost:" + NNPort, "secondHandler", "one", "two"});
+    assertEquals("DFSAdmin should now return 3", 3, exitCode);
+
+    Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[]{"one"});
+    Mockito.verify(secondHandler).handleRefresh("secondHandler", new String[]{"one", "two"});
+  }
+
+  @Test
+  public void testUnregistration() throws Exception {
+    RefreshRegistry.defaultRegistry().unregisterAll("firstHandler");
+
+    // And now this should fail
+    DFSAdmin admin = new DFSAdmin(config);
+    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "firstHandler"};
+    int exitCode = admin.run(args);
+    assertEquals("DFSAdmin should return -1", -1, exitCode);
+  }
+
+  @Test
+  public void testUnregistrationReturnValue() {
+    RefreshHandler mockHandler = Mockito.mock(RefreshHandler.class);
+    RefreshRegistry.defaultRegistry().register("test", mockHandler);
+    boolean ret = RefreshRegistry.defaultRegistry().unregister("test", mockHandler);
+    assertTrue(ret);
+  }
+
+  @Test
+  public void testMultipleRegistration() throws Exception {
+    RefreshRegistry.defaultRegistry().register("sharedId", firstHandler);
+    RefreshRegistry.defaultRegistry().register("sharedId", secondHandler);
+
+    // this should trigger both
+    DFSAdmin admin = new DFSAdmin(config);
+    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "sharedId", "one"};
+    int exitCode = admin.run(args);
+    assertEquals(-1, exitCode); // -1 because one of the responses is unregistered
+
+    // verify we called both
+    Mockito.verify(firstHandler).handleRefresh("sharedId", new String[]{"one"});
+    Mockito.verify(secondHandler).handleRefresh("sharedId", new String[]{"one"});
+
+    RefreshRegistry.defaultRegistry().unregisterAll("sharedId");
+  }
+
+  @Test
+  public void testMultipleReturnCodeMerging() throws Exception {
+    // Two handlers which return two non-zero values
+    RefreshHandler handlerOne = Mockito.mock(RefreshHandler.class);
+    Mockito.stub(handlerOne.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
+      .toReturn(new RefreshResponse(23, "Twenty Three"));
+
+    RefreshHandler handlerTwo = Mockito.mock(RefreshHandler.class);
+    Mockito.stub(handlerTwo.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
+      .toReturn(new RefreshResponse(10, "Ten"));
+
+    // Then registered to the same ID
+    RefreshRegistry.defaultRegistry().register("shared", handlerOne);
+    RefreshRegistry.defaultRegistry().register("shared", handlerTwo);
+
+    // We refresh both
+    DFSAdmin admin = new DFSAdmin(config);
+    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "shared"};
+    int exitCode = admin.run(args);
+    assertEquals(-1, exitCode); // We get -1 because of our logic for melding non-zero return codes
+
+    // Verify we called both
+    Mockito.verify(handlerOne).handleRefresh("shared", new String[]{});
+    Mockito.verify(handlerTwo).handleRefresh("shared", new String[]{});
+
+    RefreshRegistry.defaultRegistry().unregisterAll("shared");
+  }
+
+  @Test
+  public void testExceptionResultsInNormalError() throws Exception {
+    // In this test, we ensure that all handlers are called even if we throw an exception in one
+    RefreshHandler exceptionalHandler = Mockito.mock(RefreshHandler.class);
+    Mockito.stub(exceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
+      .toThrow(new RuntimeException("Exceptional Handler Throws Exception"));
+
+    RefreshHandler otherExceptionalHandler = Mockito.mock(RefreshHandler.class);
+    Mockito.stub(otherExceptionalHandler.handleRefresh(Mockito.anyString(), Mockito.any(String[].class)))
+      .toThrow(new RuntimeException("More Exceptions"));
+
+    RefreshRegistry.defaultRegistry().register("exceptional", exceptionalHandler);
+    RefreshRegistry.defaultRegistry().register("exceptional", otherExceptionalHandler);
+
+    DFSAdmin admin = new DFSAdmin(config);
+    String[] args = new String[]{"-refresh", "localhost:" + NNPort, "exceptional"};
+    int exitCode = admin.run(args);
+    assertEquals(-1, exitCode); // Exceptions result in a -1
+
+    Mockito.verify(exceptionalHandler).handleRefresh("exceptional", new String[]{});
+    Mockito.verify(otherExceptionalHandler).handleRefresh("exceptional", new String[]{});
+
+    RefreshRegistry.defaultRegistry().unregisterAll("exceptional");
+  }
+}

+ 241 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java

@@ -34,6 +34,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@@ -54,10 +55,16 @@ import org.apache.hadoop.util.ToolRunner;
 import org.junit.Test;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
+import static org.apache.hadoop.fs.permission.AclEntryType.*;
+import static org.apache.hadoop.fs.permission.FsAction.*;
+import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
 import static org.hamcrest.CoreMatchers.is;
 import static org.hamcrest.CoreMatchers.not;
 import static org.junit.Assert.*;
 
+import com.google.common.collect.Lists;
+
 /**
  * This class tests commands from DFSShell.
  */
@@ -1620,6 +1627,240 @@ public class TestDFSShell {
     int res = admin.run(new String[] {"-refreshNodes"});
     assertEquals("expected to fail -1", res , -1);
   }
+  
+  // Preserve Copy Option is -ptopxa (timestamps, ownership, permission, XATTR,
+  // ACLs)
+  @Test (timeout = 120000)
+  public void testCopyCommandsWithPreserveOption() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+        .format(true).build();
+    FsShell shell = null;
+    FileSystem fs = null;
+    final String testdir = "/tmp/TestDFSShell-testCopyCommandsWithPreserveOption-"
+        + counter.getAndIncrement();
+    final Path hdfsTestDir = new Path(testdir);
+    try {
+      fs = cluster.getFileSystem();
+      fs.mkdirs(hdfsTestDir);
+      Path src = new Path(hdfsTestDir, "srcfile");
+      fs.create(src).close();
+
+      fs.setAcl(src, Lists.newArrayList(
+          aclEntry(ACCESS, USER, ALL),
+          aclEntry(ACCESS, USER, "foo", ALL),
+          aclEntry(ACCESS, GROUP, READ_EXECUTE),
+          aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE),
+          aclEntry(ACCESS, OTHER, EXECUTE)));
+
+      FileStatus status = fs.getFileStatus(src);
+      final long mtime = status.getModificationTime();
+      final long atime = status.getAccessTime();
+      final String owner = status.getOwner();
+      final String group = status.getGroup();
+      final FsPermission perm = status.getPermission();
+      
+      fs.setXAttr(src, "user.a1", new byte[]{0x31, 0x32, 0x33});
+      fs.setXAttr(src, "trusted.a1", new byte[]{0x31, 0x31, 0x31});
+      
+      shell = new FsShell(conf);
+      
+      // -p
+      Path target1 = new Path(hdfsTestDir, "targetfile1");
+      String[] argv = new String[] { "-cp", "-p", src.toUri().toString(), 
+          target1.toUri().toString() };
+      int ret = ToolRunner.run(shell, argv);
+      assertEquals("cp -p is not working", SUCCESS, ret);
+      FileStatus targetStatus = fs.getFileStatus(target1);
+      assertEquals(mtime, targetStatus.getModificationTime());
+      assertEquals(atime, targetStatus.getAccessTime());
+      assertEquals(owner, targetStatus.getOwner());
+      assertEquals(group, targetStatus.getGroup());
+      FsPermission targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
+      Map<String, byte[]> xattrs = fs.getXAttrs(target1);
+      assertTrue(xattrs.isEmpty());
+      List<AclEntry> acls = fs.getAclStatus(target1).getEntries();
+      assertTrue(acls.isEmpty());
+      assertFalse(targetPerm.getAclBit());
+
+      // -ptop
+      Path target2 = new Path(hdfsTestDir, "targetfile2");
+      argv = new String[] { "-cp", "-ptop", src.toUri().toString(), 
+          target2.toUri().toString() };
+      ret = ToolRunner.run(shell, argv);
+      assertEquals("cp -ptop is not working", SUCCESS, ret);
+      targetStatus = fs.getFileStatus(target2);
+      assertEquals(mtime, targetStatus.getModificationTime());
+      assertEquals(atime, targetStatus.getAccessTime());
+      assertEquals(owner, targetStatus.getOwner());
+      assertEquals(group, targetStatus.getGroup());
+      targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
+      xattrs = fs.getXAttrs(target2);
+      assertTrue(xattrs.isEmpty());
+      acls = fs.getAclStatus(target2).getEntries();
+      assertTrue(acls.isEmpty());
+      assertFalse(targetPerm.getAclBit());
+
+      // -ptopx
+      Path target3 = new Path(hdfsTestDir, "targetfile3");
+      argv = new String[] { "-cp", "-ptopx", src.toUri().toString(), 
+          target3.toUri().toString() };
+      ret = ToolRunner.run(shell, argv);
+      assertEquals("cp -ptopx is not working", SUCCESS, ret);
+      targetStatus = fs.getFileStatus(target3);
+      assertEquals(mtime, targetStatus.getModificationTime());
+      assertEquals(atime, targetStatus.getAccessTime());
+      assertEquals(owner, targetStatus.getOwner());
+      assertEquals(group, targetStatus.getGroup());
+      targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
+      xattrs = fs.getXAttrs(target3);
+      assertEquals(xattrs.size(), 2);
+      assertArrayEquals(new byte[]{0x31, 0x32, 0x33}, xattrs.get("user.a1"));
+      assertArrayEquals(new byte[]{0x31, 0x31, 0x31}, xattrs.get("trusted.a1"));
+      acls = fs.getAclStatus(target3).getEntries();
+      assertTrue(acls.isEmpty());
+      assertFalse(targetPerm.getAclBit());
+
+      // -ptopa
+      Path target4 = new Path(hdfsTestDir, "targetfile4");
+      argv = new String[] { "-cp", "-ptopa", src.toUri().toString(),
+          target4.toUri().toString() };
+      ret = ToolRunner.run(shell, argv);
+      assertEquals("cp -ptopa is not working", SUCCESS, ret);
+      targetStatus = fs.getFileStatus(target4);
+      assertEquals(mtime, targetStatus.getModificationTime());
+      assertEquals(atime, targetStatus.getAccessTime());
+      assertEquals(owner, targetStatus.getOwner());
+      assertEquals(group, targetStatus.getGroup());
+      targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
+      xattrs = fs.getXAttrs(target4);
+      assertTrue(xattrs.isEmpty());
+      acls = fs.getAclStatus(target4).getEntries();
+      assertFalse(acls.isEmpty());
+      assertTrue(targetPerm.getAclBit());
+      assertEquals(fs.getAclStatus(src), fs.getAclStatus(target4));
+
+      // -ptoa (verify -pa option will preserve permissions also)
+      Path target5 = new Path(hdfsTestDir, "targetfile5");
+      argv = new String[] { "-cp", "-ptoa", src.toUri().toString(),
+          target5.toUri().toString() };
+      ret = ToolRunner.run(shell, argv);
+      assertEquals("cp -ptoa is not working", SUCCESS, ret);
+      targetStatus = fs.getFileStatus(target5);
+      assertEquals(mtime, targetStatus.getModificationTime());
+      assertEquals(atime, targetStatus.getAccessTime());
+      assertEquals(owner, targetStatus.getOwner());
+      assertEquals(group, targetStatus.getGroup());
+      targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
+      xattrs = fs.getXAttrs(target5);
+      assertTrue(xattrs.isEmpty());
+      acls = fs.getAclStatus(target5).getEntries();
+      assertFalse(acls.isEmpty());
+      assertTrue(targetPerm.getAclBit());
+      assertEquals(fs.getAclStatus(src), fs.getAclStatus(target5));
+    } finally {
+      if (null != shell) {
+        shell.close();
+      }
+
+      if (null != fs) {
+        fs.delete(hdfsTestDir, true);
+        fs.close();
+      }
+      cluster.shutdown();
+    }
+  }
+
+  // Verify cp -pa option will preserve both ACL and sticky bit.
+  @Test (timeout = 120000)
+  public void testCopyCommandsPreserveAclAndStickyBit() throws Exception {
+    Configuration conf = new Configuration();
+    conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
+        .format(true).build();
+    FsShell shell = null;
+    FileSystem fs = null;
+    final String testdir =
+        "/tmp/TestDFSShell-testCopyCommandsPreserveAclAndStickyBit-"
+        + counter.getAndIncrement();
+    final Path hdfsTestDir = new Path(testdir);
+    try {
+      fs = cluster.getFileSystem();
+      fs.mkdirs(hdfsTestDir);
+      Path src = new Path(hdfsTestDir, "srcfile");
+      fs.create(src).close();
+
+      fs.setAcl(src, Lists.newArrayList(
+          aclEntry(ACCESS, USER, ALL),
+          aclEntry(ACCESS, USER, "foo", ALL),
+          aclEntry(ACCESS, GROUP, READ_EXECUTE),
+          aclEntry(ACCESS, GROUP, "bar", READ_EXECUTE),
+          aclEntry(ACCESS, OTHER, EXECUTE)));
+      // set sticky bit
+      fs.setPermission(src,
+          new FsPermission(ALL, READ_EXECUTE, EXECUTE, true));
+
+      FileStatus status = fs.getFileStatus(src);
+      final long mtime = status.getModificationTime();
+      final long atime = status.getAccessTime();
+      final String owner = status.getOwner();
+      final String group = status.getGroup();
+      final FsPermission perm = status.getPermission();
+
+      shell = new FsShell(conf);
+
+      // -p preserves sticky bit and doesn't preserve ACL
+      Path target1 = new Path(hdfsTestDir, "targetfile1");
+      String[] argv = new String[] { "-cp", "-p", src.toUri().toString(),
+          target1.toUri().toString() };
+      int ret = ToolRunner.run(shell, argv);
+      assertEquals("cp is not working", SUCCESS, ret);
+      FileStatus targetStatus = fs.getFileStatus(target1);
+      assertEquals(mtime, targetStatus.getModificationTime());
+      assertEquals(atime, targetStatus.getAccessTime());
+      assertEquals(owner, targetStatus.getOwner());
+      assertEquals(group, targetStatus.getGroup());
+      FsPermission targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
+      List<AclEntry> acls = fs.getAclStatus(target1).getEntries();
+      assertTrue(acls.isEmpty());
+      assertFalse(targetPerm.getAclBit());
+
+      // -ptopa preserves both sticky bit and ACL
+      Path target2 = new Path(hdfsTestDir, "targetfile2");
+      argv = new String[] { "-cp", "-ptopa", src.toUri().toString(),
+          target2.toUri().toString() };
+      ret = ToolRunner.run(shell, argv);
+      assertEquals("cp -ptopa is not working", SUCCESS, ret);
+      targetStatus = fs.getFileStatus(target2);
+      assertEquals(mtime, targetStatus.getModificationTime());
+      assertEquals(atime, targetStatus.getAccessTime());
+      assertEquals(owner, targetStatus.getOwner());
+      assertEquals(group, targetStatus.getGroup());
+      targetPerm = targetStatus.getPermission();
+      assertTrue(perm.equals(targetPerm));
+      acls = fs.getAclStatus(target2).getEntries();
+      assertFalse(acls.isEmpty());
+      assertTrue(targetPerm.getAclBit());
+      assertEquals(fs.getAclStatus(src), fs.getAclStatus(target2));
+    } finally {
+      if (null != shell) {
+        shell.close();
+      }
+      if (null != fs) {
+        fs.delete(hdfsTestDir, true);
+        fs.close();
+      }
+      cluster.shutdown();
+    }
+  }
 
   // force Copy Option is -f
   @Test (timeout = 30000)

+ 65 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java

@@ -25,13 +25,16 @@ import java.io.IOException;
 import java.io.OutputStream;
 import java.io.RandomAccessFile;
 import java.net.InetSocketAddress;
+import java.util.ArrayList;
 import java.util.Iterator;
+import java.util.List;
 import java.util.concurrent.TimeoutException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -453,4 +456,66 @@ public class TestReplication {
     }
     fs.delete(fileName, true);
   }
+
+  /**
+   * Test that blocks should get replicated if we have corrupted blocks and
+   * having good replicas at least equal or greater to minreplication
+   *
+   * Simulate rbw blocks by creating dummy copies, then a DN restart to detect
+   * those corrupted blocks asap.
+   */
+  @Test(timeout=30000)
+  public void testReplicationWhenBlockCorruption() throws Exception {
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new HdfsConfiguration();
+      conf.setLong(
+          DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+      FileSystem fs = cluster.getFileSystem();
+      FSDataOutputStream create = fs.create(new Path("/test"));
+      fs.setReplication(new Path("/test"), (short) 1);
+      create.write(new byte[1024]);
+      create.close();
+
+      List<File> nonParticipatedNodeDirs = new ArrayList<File>();
+      File participatedNodeDirs = null;
+      for (int i = 0; i < cluster.getDataNodes().size(); i++) {
+        File storageDir = cluster.getInstanceStorageDir(i, 0);
+        String bpid = cluster.getNamesystem().getBlockPoolId();
+        File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
+        if (data_dir.listFiles().length == 0) {
+          nonParticipatedNodeDirs.add(data_dir);
+        } else {
+          participatedNodeDirs = data_dir;
+        }
+      }
+
+      String blockFile = null;
+      File[] listFiles = participatedNodeDirs.listFiles();
+      for (File file : listFiles) {
+        if (file.getName().startsWith("blk_")
+            && !file.getName().endsWith("meta")) {
+          blockFile = file.getName();
+          for (File file1 : nonParticipatedNodeDirs) {
+            file1.mkdirs();
+            new File(file1, blockFile).createNewFile();
+            new File(file1, blockFile + "_1000.meta").createNewFile();
+          }
+          break;
+        }
+      }
+
+      fs.setReplication(new Path("/test"), (short) 3);
+      cluster.restartDataNodes(); // Lets detect all DNs about dummy copied
+      // blocks
+      cluster.waitActive();
+      cluster.triggerBlockReports();
+      DFSTestUtil.waitReplication(fs, new Path("/test"), (short) 3);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
 }

+ 6 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/BlockReportTestBase.java

@@ -410,6 +410,7 @@ public abstract class BlockReportTestBase {
    * The second datanode is started in the cluster.
    * As soon as the replication process is completed test finds a block from
    * the second DN and sets its GS to be < of original one.
+   * this is the markBlockAsCorrupt case 3 so we expect one pending deletion
    * Block report is forced and the check for # of currupted blocks is performed.
    * Another block is chosen and its length is set to a lesser than original.
    * A check for another corrupted block is performed after yet another
@@ -436,20 +437,20 @@ public abstract class BlockReportTestBase {
     printStats();
 
     assertThat("Wrong number of corrupt blocks",
-               cluster.getNamesystem().getCorruptReplicaBlocks(), is(1L));
+               cluster.getNamesystem().getCorruptReplicaBlocks(), is(0L));
     assertThat("Wrong number of PendingDeletion blocks",
-               cluster.getNamesystem().getPendingDeletionBlocks(), is(0L));
+               cluster.getNamesystem().getPendingDeletionBlocks(), is(1L));
     assertThat("Wrong number of PendingReplication blocks",
                cluster.getNamesystem().getPendingReplicationBlocks(), is(0L));
 
-    reports = getBlockReports(dn, poolId, true, true);
+    reports = getBlockReports(dn, poolId, false, true);
     sendBlockReports(dnR, poolId, reports);
     printStats();
 
     assertThat("Wrong number of corrupt blocks",
-               cluster.getNamesystem().getCorruptReplicaBlocks(), is(2L));
+               cluster.getNamesystem().getCorruptReplicaBlocks(), is(1L));
     assertThat("Wrong number of PendingDeletion blocks",
-               cluster.getNamesystem().getPendingDeletionBlocks(), is(0L));
+               cluster.getNamesystem().getPendingDeletionBlocks(), is(1L));
     assertThat("Wrong number of PendingReplication blocks",
                cluster.getNamesystem().getPendingReplicationBlocks(), is(0L));
 

+ 31 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBPOfferService.java

@@ -325,15 +325,14 @@ public class TestBPOfferService {
       }
     }).when(mockDn).initBlockPool(Mockito.any(BPOfferService.class));
     BPOfferService bpos = setupBPOSForNNs(mockDn, mockNN1, mockNN2);
+    List<BPServiceActor> actors = bpos.getBPServiceActors();
+    assertEquals(2, actors.size());
     bpos.start();
     try {
       waitForInitialization(bpos);
-      List<BPServiceActor> actors = bpos.getBPServiceActors();
-      // even if one of the actor initialization fails also other will be
-      // running until both failed.
-      assertEquals(2, actors.size());
-      BPServiceActor actor = actors.get(0);
-      waitForBlockReport(actor.getNameNodeProxy());
+      // even if one of the actor initialization fails, the other one will be
+      // finish block report.
+      waitForBlockReport(mockNN1, mockNN2);
     } finally {
       bpos.stop();
     }
@@ -409,7 +408,32 @@ public class TestBPOfferService {
       }
     }, 500, 10000);
   }
-  
+
+  private void waitForBlockReport(
+      final DatanodeProtocolClientSideTranslatorPB mockNN1,
+      final DatanodeProtocolClientSideTranslatorPB mockNN2)
+          throws Exception {
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      @Override
+      public Boolean get() {
+        return get(mockNN1) || get(mockNN2);
+      }
+
+      private Boolean get(DatanodeProtocolClientSideTranslatorPB mockNN) {
+        try {
+          Mockito.verify(mockNN).blockReport(
+                  Mockito.<DatanodeRegistration>anyObject(),
+                  Mockito.eq(FAKE_BPID),
+                  Mockito.<StorageBlockReport[]>anyObject());
+          return true;
+        } catch (Throwable t) {
+          LOG.info("waiting on block report: " + t.getMessage());
+          return false;
+        }
+      }
+    }, 500, 10000);
+  }
+
   private ReceivedDeletedBlockInfo[] waitForBlockReceived(
       ExtendedBlock fakeBlock,
       DatanodeProtocolClientSideTranslatorPB mockNN) throws Exception {

+ 11 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java

@@ -1408,12 +1408,17 @@ public class TestCacheDirectives {
    */
   private void checkPendingCachedEmpty(MiniDFSCluster cluster)
       throws Exception {
-    final DatanodeManager datanodeManager =
-        cluster.getNamesystem().getBlockManager().getDatanodeManager();
-    for (DataNode dn : cluster.getDataNodes()) {
-      DatanodeDescriptor descriptor =
-          datanodeManager.getDatanode(dn.getDatanodeId());
-      Assert.assertTrue(descriptor.getPendingCached().isEmpty());
+    cluster.getNamesystem().readLock();
+    try {
+      final DatanodeManager datanodeManager =
+          cluster.getNamesystem().getBlockManager().getDatanodeManager();
+      for (DataNode dn : cluster.getDataNodes()) {
+        DatanodeDescriptor descriptor =
+            datanodeManager.getDatanode(dn.getDatanodeId());
+        Assert.assertTrue(descriptor.getPendingCached().isEmpty());
+      }
+    } finally {
+      cluster.getNamesystem().readUnlock();
     }
   }
 

+ 0 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java

@@ -62,8 +62,6 @@ public class TestCommitBlockSynchronization {
     doReturn(blockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
     doReturn("").when(namesystemSpy).closeFileCommitBlocks(
         any(INodeFile.class), any(BlockInfo.class));
-    doReturn("").when(namesystemSpy).persistBlocks(
-        any(INodeFile.class), anyBoolean());
     doReturn(mock(FSEditLog.class)).when(namesystemSpy).getEditLog();
 
     return namesystemSpy;

+ 149 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java

@@ -0,0 +1,149 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.FileNotFoundException;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.StorageType;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
+import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
+import org.apache.hadoop.net.Node;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.internal.util.reflection.Whitebox;
+
+
+/**
+ * Test race between delete and other operations.  For now only addBlock()
+ * is tested since all others are acquiring FSNamesystem lock for the 
+ * whole duration.
+ */
+public class TestDeleteRace {
+  private static final Log LOG = LogFactory.getLog(TestDeleteRace.class);
+  private static final Configuration conf = new HdfsConfiguration();
+  private MiniDFSCluster cluster;
+
+  @Test  
+  public void testDeleteAddBlockRace() throws Exception {
+    testDeleteAddBlockRace(false);
+  }
+
+  @Test  
+  public void testDeleteAddBlockRaceWithSnapshot() throws Exception {
+    testDeleteAddBlockRace(true);
+  }
+
+  private void testDeleteAddBlockRace(boolean hasSnapshot) throws Exception {
+    try {
+      conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
+          SlowBlockPlacementPolicy.class, BlockPlacementPolicy.class);
+      cluster = new MiniDFSCluster.Builder(conf).build();
+      FileSystem fs = cluster.getFileSystem();
+      final String fileName = "/testDeleteAddBlockRace";
+      Path filePath = new Path(fileName);
+
+      FSDataOutputStream out = null;
+      out = fs.create(filePath);
+      if (hasSnapshot) {
+        SnapshotTestHelper.createSnapshot((DistributedFileSystem) fs, new Path(
+            "/"), "s1");
+      }
+
+      Thread deleteThread = new DeleteThread(fs, filePath);
+      deleteThread.start();
+
+      try {
+        // write data and syn to make sure a block is allocated.
+        out.write(new byte[32], 0, 32);
+        out.hsync();
+        Assert.fail("Should have failed.");
+      } catch (FileNotFoundException e) {
+        GenericTestUtils.assertExceptionContains(filePath.getName(), e);
+      }
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  private static class SlowBlockPlacementPolicy extends
+      BlockPlacementPolicyDefault {
+    @Override
+    public DatanodeStorageInfo[] chooseTarget(String srcPath,
+                                      int numOfReplicas,
+                                      Node writer,
+                                      List<DatanodeStorageInfo> chosenNodes,
+                                      boolean returnChosenNodes,
+                                      Set<Node> excludedNodes,
+                                      long blocksize,
+                                      StorageType storageType) {
+      DatanodeStorageInfo[] results = super.chooseTarget(srcPath,
+          numOfReplicas, writer, chosenNodes, returnChosenNodes, excludedNodes,
+          blocksize, storageType);
+      try {
+        Thread.sleep(3000);
+      } catch (InterruptedException e) {}
+      return results;
+    }
+  }
+
+  private class DeleteThread extends Thread {
+    private FileSystem fs;
+    private Path path;
+
+    DeleteThread(FileSystem fs, Path path) {
+      this.fs = fs;
+      this.path = path;
+    }
+
+    @Override
+    public void run() {
+      try {
+        Thread.sleep(1000);
+        LOG.info("Deleting" + path);
+        final FSDirectory fsdir = cluster.getNamesystem().dir;
+        INode fileINode = fsdir.getINode4Write(path.toString());
+        INodeMap inodeMap = (INodeMap) Whitebox.getInternalState(fsdir,
+            "inodeMap");
+
+        fs.delete(path, false);
+        // after deletion, add the inode back to the inodeMap
+        inodeMap.put(fileINode);
+        LOG.info("Deleted" + path);
+      } catch (Exception e) {
+        LOG.info(e);
+      }
+    }
+  }
+}

+ 41 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java

@@ -22,22 +22,29 @@ package org.apache.hadoop.hdfs.server.namenode;
 import java.io.BufferedReader;
 import java.io.IOException;
 import java.io.StringReader;
+import java.util.EnumSet;
+import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.XAttr;
+import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
+import com.google.common.collect.Lists;
+
 /**
  * Test {@link FSDirectory}, the in-memory namespace tree.
  */
@@ -70,6 +77,7 @@ public class TestFSDirectory {
   @Before
   public void setUp() throws Exception {
     conf = new Configuration();
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 2);
     cluster = new MiniDFSCluster.Builder(conf)
       .numDataNodes(REPLICATION)
       .build();
@@ -171,4 +179,36 @@ public class TestFSDirectory {
     Assert.assertTrue(classname.startsWith(INodeFile.class.getSimpleName())
         || classname.startsWith(INodeDirectory.class.getSimpleName()));
   }
+  
+  @Test
+  public void testINodeXAttrsLimit() throws Exception {
+    List<XAttr> existingXAttrs = Lists.newArrayListWithCapacity(2);
+    XAttr xAttr1 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).
+        setName("a1").setValue(new byte[]{0x31, 0x32, 0x33}).build();
+    XAttr xAttr2 = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.USER).
+        setName("a2").setValue(new byte[]{0x31, 0x31, 0x31}).build();
+    existingXAttrs.add(xAttr1);
+    existingXAttrs.add(xAttr2);
+    
+    // Adding a system namespace xAttr, isn't affected by inode xAttrs limit.
+    XAttr newXAttr = (new XAttr.Builder()).setNameSpace(XAttr.NameSpace.SYSTEM).
+        setName("a3").setValue(new byte[]{0x33, 0x33, 0x33}).build();
+    List<XAttr> xAttrs = fsdir.setINodeXAttr(existingXAttrs, newXAttr, 
+        EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
+    Assert.assertEquals(xAttrs.size(), 3);
+    
+    // Adding a trusted namespace xAttr, is affected by inode xAttrs limit.
+    XAttr newXAttr1 = (new XAttr.Builder()).setNameSpace(
+        XAttr.NameSpace.TRUSTED).setName("a4").
+        setValue(new byte[]{0x34, 0x34, 0x34}).build();
+    try {
+      fsdir.setINodeXAttr(existingXAttrs, newXAttr1, 
+          EnumSet.of(XAttrSetFlag.CREATE, XAttrSetFlag.REPLACE));
+      Assert.fail("Setting user visable xattr on inode should fail if " +
+          "reaching limit.");
+    } catch (IOException e) {
+      GenericTestUtils.assertExceptionContains("Cannot add additional XAttr " +
+          "to inode, would exceed limit", e);
+    }
+  }
 }

+ 13 - 28
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java

@@ -22,6 +22,7 @@ import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
 import static org.apache.hadoop.util.Time.now;
 import static org.junit.Assert.assertEquals;
 import static org.mockito.Matchers.anyObject;
+import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
@@ -45,37 +46,20 @@ import org.junit.Test;
 
 public class TestFsLimits {
   static Configuration conf;
-  static INode[] inodes;
-  static FSDirectory fs;
+  static FSNamesystem fs;
   static boolean fsIsReady;
   
   static final PermissionStatus perms
     = new PermissionStatus("admin", "admin", FsPermission.getDefault());
 
-  static private FSImage getMockFSImage() {
-    FSEditLog editLog = mock(FSEditLog.class);
+  static private FSNamesystem getMockNamesystem() throws IOException {
     FSImage fsImage = mock(FSImage.class);
-    when(fsImage.getEditLog()).thenReturn(editLog);
-    return fsImage;
-  }
-
-  static private FSNamesystem getMockNamesystem() {
-    FSNamesystem fsn = mock(FSNamesystem.class);
-    when(
-        fsn.createFsOwnerPermissions((FsPermission)anyObject())
-    ).thenReturn(
-         new PermissionStatus("root", "wheel", FsPermission.getDefault())
-    );
+    FSEditLog editLog = mock(FSEditLog.class);
+    doReturn(editLog).when(fsImage).getEditLog();
+    FSNamesystem fsn = new FSNamesystem(conf, fsImage);
+    fsn.getFSDirectory().setReady(fsIsReady);
     return fsn;
   }
-  
-  private static class MockFSDirectory extends FSDirectory {
-    public MockFSDirectory() throws IOException {
-      super(getMockFSImage(), getMockNamesystem(), conf);
-      setReady(fsIsReady);
-      NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
-    }
-  }
 
   @Before
   public void setUp() throws IOException {
@@ -83,7 +67,7 @@ public class TestFsLimits {
     conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
              fileAsURI(new File(MiniDFSCluster.getBaseDirectory(),
                                 "namenode")).toString());
-
+    NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
     fs = null;
     fsIsReady = true;
   }
@@ -197,9 +181,10 @@ public class TestFsLimits {
     lazyInitFSDirectory();
     Class<?> generated = null;
     try {
-      fs.mkdirs(name, perms, false, now());
+      fs.mkdirs(name, perms, false);
     } catch (Throwable e) {
       generated = e.getClass();
+      e.printStackTrace();
     }
     assertEquals(expected, generated);
   }
@@ -209,7 +194,7 @@ public class TestFsLimits {
     lazyInitFSDirectory();
     Class<?> generated = null;
     try {
-      fs.renameTo(src, dst, false, new Rename[] { });
+      fs.renameTo(src, dst, new Rename[] { });
     } catch (Throwable e) {
       generated = e.getClass();
     }
@@ -222,7 +207,7 @@ public class TestFsLimits {
     lazyInitFSDirectory();
     Class<?> generated = null;
     try {
-      fs.renameTo(src, dst, false);
+      fs.renameTo(src, dst);
     } catch (Throwable e) {
       generated = e.getClass();
     }
@@ -232,7 +217,7 @@ public class TestFsLimits {
   private static void lazyInitFSDirectory() throws IOException {
     // have to create after the caller has had a chance to set conf values
     if (fs == null) {
-      fs = new MockFSDirectory();
+      fs = getMockNamesystem();
     }
   }
 }

+ 10 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java

@@ -138,6 +138,13 @@ public class TestOfflineImageViewer {
       hdfs.mkdirs(new Path("/snapshot/1"));
       hdfs.delete(snapshot, true);
 
+      // Set XAttrs so the fsimage contains XAttr ops
+      final Path xattr = new Path("/xattr");
+      hdfs.mkdirs(xattr);
+      hdfs.setXAttr(xattr, "user.a1", new byte[]{ 0x31, 0x32, 0x33 });
+      hdfs.setXAttr(xattr, "user.a2", new byte[]{ 0x37, 0x38, 0x39 });
+      writtenFiles.put(xattr.toString(), hdfs.getFileStatus(xattr));
+
       // Write results to the fsimage file
       hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
       hdfs.saveNamespace();
@@ -210,8 +217,8 @@ public class TestOfflineImageViewer {
     matcher = p.matcher(output.getBuffer());
     assertTrue(matcher.find() && matcher.groupCount() == 1);
     int totalDirs = Integer.parseInt(matcher.group(1));
-    // totalDirs includes root directory and empty directory
-    assertEquals(NUM_DIRS + 2, totalDirs);
+    // totalDirs includes root directory, empty directory, and xattr directory
+    assertEquals(NUM_DIRS + 3, totalDirs);
 
     FileStatus maxFile = Collections.max(writtenFiles.values(),
         new Comparator<FileStatus>() {
@@ -264,7 +271,7 @@ public class TestOfflineImageViewer {
 
       // verify the number of directories
       FileStatus[] statuses = webhdfs.listStatus(new Path("/"));
-      assertEquals(NUM_DIRS + 1, statuses.length); // contains empty directory
+      assertEquals(NUM_DIRS + 2, statuses.length); // contains empty and xattr directory
 
       // verify the number of files in the directory
       statuses = webhdfs.listStatus(new Path("/dir0"));

+ 47 - 34
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml

@@ -16118,8 +16118,7 @@
       </comparators>
     </test>
 
-    <!-- DFS tests
-        Must come before moveFromLocal tests until HDFS-6471 is fixed -->
+    <!-- DFS tests -->
     <test>
       <description>appendToFile</description>
       <test-commands>
@@ -16480,10 +16479,11 @@
     <test> <!-- TESTED -->
       <description>moveFromLocal: moving non existent file(absolute path)</description>
       <test-commands>
-        <command>-fs NAMENODE -moveFromLocal /user/wrongdata file</command>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir</command>
+        <command>-fs NAMENODE -moveFromLocal /user/wrongdata /user/USERNAME/dir</command>
       </test-commands>
       <cleanup-commands>
-        <command>-fs NAMENODE -rm -r /user</command>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
       </cleanup-commands>
       <comparators>
         <comparator>
@@ -16496,12 +16496,11 @@
     <test> <!-- TESTED -->
       <description>moveFromLocal: moving non existent file(relative path)</description>
       <test-commands>
-        <command>-fs NAMENODE -mkdir -p dir</command> <!-- make sure user home dir exists -->
-        <command>-fs NAMENODE -touchz test</command>
-        <command>-fs NAMENODE -moveFromLocal wrongdata file</command>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir</command> <!-- make sure user home dir exists -->
+        <command>-fs NAMENODE -moveFromLocal wrongdata /user/USERNAME/dir</command>
       </test-commands>
       <cleanup-commands>
-        <command>-fs NAMENODE -rm -r /user</command>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
       </cleanup-commands>
       <comparators>
         <comparator>
@@ -16514,19 +16513,19 @@
     <test> <!-- TESTED -->
       <description>moveFromLocal: moving many files into an existing file</description>
       <test-commands>
-        <command>-fs NAMENODE -moveFromLocal CLITEST_DATA/data15bytes /data15bytes</command>
-        <command>-fs NAMENODE -moveFromLocal CLITEST_DATA/data30bytes /data30bytes</command>
-        <command>-fs NAMENODE -mkdir -p dir</command> <!-- make sure user home dir exists -->
-        <command>-fs NAMENODE -touchz file0</command>
-        <command>-fs NAMENODE -moveFromLocal /data15bytes /data30bytes file0</command>
+        <command>-fs NAMENODE -cp CLITEST_DATA/data15bytes data15bytes</command>
+        <command>-fs NAMENODE -cp CLITEST_DATA/data30bytes data30bytes</command>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir</command>
+        <command>-fs NAMENODE -touchz /user/USERNAME/dir/file0</command>
+        <command>-fs NAMENODE -moveFromLocal data15bytes data30bytes /user/USERNAME/dir/file0</command>
       </test-commands>
       <cleanup-commands>
-        <command>-fs NAMENODE -rm -r /user</command>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
       </cleanup-commands>
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^moveFromLocal: `file0': Is not a directory</expected-output>
+          <expected-output>moveFromLocal: `/user/USERNAME/dir/file0': Is not a directory</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -16534,17 +16533,17 @@
     <test> <!-- TESTED -->
       <description>moveFromLocal: moving many files into a non existent directory</description>
       <test-commands>
-        <command>-fs NAMENODE -moveFromLocal CLITEST_DATA/data15bytes /data15bytes</command>
-        <command>-fs NAMENODE -moveFromLocal CLITEST_DATA/data30bytes /data30bytes</command>
-        <command>-fs NAMENODE -moveFromLocal /data15bytes /data30bytes wrongdir</command>
+        <command>-fs NAMENODE -cp CLITEST_DATA/data15bytes data15bytes</command>
+        <command>-fs NAMENODE -cp CLITEST_DATA/data30bytes data30bytes</command>
+        <command>-fs NAMENODE -moveFromLocal data15bytes data30bytes /user/USERNAME/dir/wrongdir</command>
       </test-commands>
       <cleanup-commands>
-        <command>-fs NAMENODE -rm -r /user</command>
+        <command>-fs NAMENODE -rm -r /user/USERNAME</command>
       </cleanup-commands>
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^moveFromLocal: `wrongdir': No such file or directory</expected-output>
+          <expected-output>^moveFromLocal: `/user/USERNAME/dir/wrongdir': No such file or directory</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -16552,7 +16551,8 @@
     <test> <!-- TESTED -->
       <description>moveFromLocal: Test for hdfs:// path - moving non existent file</description>
       <test-commands>
-        <command>-fs NAMENODE -moveFromLocal /user/wrongdata hdfs:///file</command>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir</command>
+        <command>-fs NAMENODE -moveFromLocal /user/wrongdata hdfs:///user/USERNAME/dir/file</command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rm -r hdfs:///*</command>
@@ -16568,8 +16568,11 @@
     <test> <!-- TESTED -->
       <description>moveFromLocal: Test for hdfs:// path - moving many files into an existing file</description>
       <test-commands>
-        <command>-fs NAMENODE -touchz hdfs:///file0</command>
-        <command>-fs NAMENODE -moveFromLocal CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes hdfs:///file0</command>
+        <command>-fs NAMENODE -cp CLITEST_DATA/data15bytes data15bytes</command>
+        <command>-fs NAMENODE -cp CLITEST_DATA/data30bytes data30bytes</command>
+        <command>-fs NAMENODE -mkdir -p /user/USERNAME/dir</command>
+        <command>-fs NAMENODE -touchz hdfs:///user/USERNAME/dir/file</command>
+        <command>-fs NAMENODE -moveFromLocal data15bytes data30bytes hdfs:///user/USERNAME/dir/file</command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rm -r hdfs:///*</command>
@@ -16577,7 +16580,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^moveFromLocal: `hdfs:///file0': Is not a directory</expected-output>
+          <expected-output>moveFromLocal: `hdfs:///user/USERNAME/dir/file': Is not a directory</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -16585,15 +16588,17 @@
     <test> <!-- TESTED -->
       <description>moveFromLocal: Test for hdfs:// path - moving many files into a non existent directory</description>
       <test-commands>
-        <command>-fs NAMENODE -moveFromLocal CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes hdfs:///wrongdir</command>
+        <command>-fs NAMENODE -cp CLITEST_DATA/data15bytes data15bytes</command>
+        <command>-fs NAMENODE -cp CLITEST_DATA/data30bytes data30bytes</command>
+        <command>-fs NAMENODE -moveFromLocal data15bytes data30bytes hdfs:///user/USERNAME/dir/wrongdir</command>
       </test-commands>
       <cleanup-commands>
-        <command>-fs NAMENODE -rm -r /user</command>
+        <command>-fs NAMENODE -rm -r hdfs:///*</command>
       </cleanup-commands>
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^moveFromLocal: `hdfs:///wrongdir': No such file or directory</expected-output>
+          <expected-output>moveFromLocal: `hdfs:///user/USERNAME/dir/wrongdir': No such file or directory</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -16601,7 +16606,8 @@
     <test> <!-- TESTED -->
       <description>moveFromLocal: Test for Namenode's path - moving non existent file</description>
       <test-commands>
-        <command>-fs NAMENODE -moveFromLocal /user/wrongdata NAMENODE/file</command>
+        <command>-fs NAMENODE -mkdir -p NAMENODE/user/USERNAME/dir</command>
+        <command>-fs NAMENODE -moveFromLocal /user/wrongdata NAMENODE/user/USERNAME/dir</command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rm -r NAMENODE/*</command>
@@ -16617,8 +16623,11 @@
     <test> <!-- TESTED -->
       <description>moveFromLocal: Test for Namenode's path - moving many files into an existing file</description>
       <test-commands>
-        <command>-fs NAMENODE -touchz NAMENODE/file0</command>
-        <command>-fs NAMENODE -moveFromLocal CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes NAMENODE/file0</command>
+        <command>-fs NAMENODE -mkdir -p NAMENODE/user/USERNAME/dir</command>
+        <command>-fs NAMENODE -touchz NAMENODE/user/USERNAME/dir/file0</command>
+        <command>-fs NAMENODE -cp CLITEST_DATA/data15bytes data15bytes</command>
+        <command>-fs NAMENODE -cp CLITEST_DATA/data30bytes data30bytes</command>
+        <command>-fs NAMENODE -moveFromLocal data15bytes data30bytes NAMENODE/user/USERNAME/dir/file0</command>
       </test-commands>
       <cleanup-commands>
         <command>-fs NAMENODE -rm -r NAMENODE/*</command>
@@ -16626,7 +16635,7 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^moveFromLocal: `hdfs://\w+[-.a-z0-9]*:[0-9]+/file0': Is not a directory</expected-output>
+          <expected-output>moveFromLocal: `hdfs://\w+[-.a-z0-9]*:[0-9]+/user/USERNAME/dir/file0': Is not a directory</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -16634,15 +16643,19 @@
     <test> <!-- TESTED -->
       <description>moveFromLocal: Test for Namenode's path - moving many files into a non existent directory</description>
       <test-commands>
-        <command>-fs NAMENODE -moveFromLocal CLITEST_DATA/data15bytes CLITEST_DATA/data30bytes NAMENODE/wrongdir</command>
+        <command>-fs NAMENODE -mkdir -p NAMENODE/user/USERNAME</command>
+        <command>-fs NAMENODE -touchz NAMENODE/user/USERNAME/dir/file0</command>
+        <command>-fs NAMENODE -cp CLITEST_DATA/data15bytes data15bytes</command>
+        <command>-fs NAMENODE -cp CLITEST_DATA/data30bytes data30bytes</command>
+        <command>-fs NAMENODE -moveFromLocal data15bytes data30bytes NAMENODE/user/USERNAME/wrongdir</command>
       </test-commands>
       <cleanup-commands>
-        <command>-fs NAMENODE -rm -r /user</command>
+        <command>-fs NAMENODE -rm -r NAMENODE/*</command>
       </cleanup-commands>
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^moveFromLocal: `hdfs://\w+[-.a-z0-9]*:[0-9]+/wrongdir': No such file or directory</expected-output>
+          <expected-output>moveFromLocal: `hdfs://\w+[-.a-z0-9]*:[0-9]+/user/USERNAME/wrongdir': No such file or directory</expected-output>
         </comparator>
       </comparators>
     </test>

+ 12 - 4
hadoop-mapreduce-project/CHANGES.txt

@@ -77,6 +77,9 @@ Trunk (Unreleased)
     MAPREDUCE-5196. Add bookkeeping for managing checkpoints of task state.
     (Carlo Curino via cdouglas)
 
+    MAPREDUCE-5912. Task.calculateOutputSize does not handle Windows files after
+    MAPREDUCE-5196. (Remus Rusanu via cnauroth)
+
   BUG FIXES
 
     MAPREDUCE-5714. Removed forceful JVM exit in shutDownJob.  
@@ -142,9 +145,6 @@ Trunk (Unreleased)
     MAPREDUCE-5867. Fix NPE in KillAMPreemptionPolicy related to 
     ProportionalCapacityPreemptionPolicy (Sunil G via devaraj)
 
-    MAPREDUCE-5898. distcp to support preserving HDFS extended attributes(XAttrs)
-    (Yi Liu via umamahesh)
-
 Release 2.5.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -259,7 +259,15 @@ Release 2.5.0 - UNRELEASED
     MAPREDUCE-5777. Support utf-8 text with Byte Order Marker.
     (Zhihai Xu via kasha)
 
-Release 2.4.1 - UNRELEASED
+    MAPREDUCE-5898. distcp to support preserving HDFS extended attributes(XAttrs)
+    (Yi Liu via umamahesh)
+
+    MAPREDUCE-5920. Add Xattr option in DistCp docs. (Yi Liu via cnauroth)
+
+    MAPREDUCE-5924. Changed TaskAttemptImpl to ignore TA_COMMIT_PENDING event
+    at COMMIT_PENDING state. (Zhijie Shen via jianhe)
+
+Release 2.4.1 - 2014-06-23 
 
   INCOMPATIBLE CHANGES
 

+ 9 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java

@@ -335,6 +335,15 @@ public abstract class TaskAttemptImpl implements
      .addTransition(TaskAttemptStateInternal.COMMIT_PENDING,
          TaskAttemptStateInternal.FAIL_CONTAINER_CLEANUP,
          TaskAttemptEventType.TA_TIMED_OUT, CLEANUP_CONTAINER_TRANSITION)
+     // AM is likely to receive duplicate TA_COMMIT_PENDINGs as the task attempt
+     // will re-send the commit message until it doesn't encounter any
+     // IOException and succeeds in delivering the commit message.
+     // Ignoring the duplicate commit message is a short-term fix. In long term,
+     // we need to make use of retry cache to help this and other MR protocol
+     // APIs that can be considered as @AtMostOnce.
+     .addTransition(TaskAttemptStateInternal.COMMIT_PENDING,
+         TaskAttemptStateInternal.COMMIT_PENDING,
+         TaskAttemptEventType.TA_COMMIT_PENDING)
 
      // Transitions from SUCCESS_CONTAINER_CLEANUP state
      // kill and cleanup the container

+ 9 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java

@@ -112,6 +112,15 @@ public class TestMRApp {
     //wait for first attempt to commit pending
     app.waitForState(attempt, TaskAttemptState.COMMIT_PENDING);
 
+    //re-send the commit pending signal to the task
+    app.getContext().getEventHandler().handle(
+        new TaskAttemptEvent(
+            attempt.getID(),
+            TaskAttemptEventType.TA_COMMIT_PENDING));
+
+    //the task attempt should be still at COMMIT_PENDING
+    app.waitForState(attempt, TaskAttemptState.COMMIT_PENDING);
+
     //send the done signal to the task
     app.getContext().getEventHandler().handle(
         new TaskAttemptEvent(

+ 2 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java

@@ -112,7 +112,7 @@ import org.apache.log4j.Level;
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class JobConf extends Configuration {
-  
+
   private static final Log LOG = LogFactory.getLog(JobConf.class);
 
   static{
@@ -882,7 +882,7 @@ public class JobConf extends Configuration {
       JobContext.KEY_COMPARATOR, null, RawComparator.class);
     if (theClass != null)
       return ReflectionUtils.newInstance(theClass, this);
-    return WritableComparator.get(getMapOutputKeyClass().asSubclass(WritableComparable.class));
+    return WritableComparator.get(getMapOutputKeyClass().asSubclass(WritableComparable.class), this);
   }
 
   /**

+ 2 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java

@@ -1120,8 +1120,8 @@ abstract public class Task implements Writable, Configurable {
     if (isMapTask() && conf.getNumReduceTasks() > 0) {
       try {
         Path mapOutput =  mapOutputFile.getOutputFile();
-        FileSystem fs = mapOutput.getFileSystem(conf);
-        return fs.getFileStatus(mapOutput).getLen();
+        FileSystem localFS = FileSystem.getLocal(conf);
+        return localFS.getFileStatus(mapOutput).getLen();
       } catch (IOException e) {
         LOG.warn ("Could not find output size " , e);
       }

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/CompositeRecordReader.java

@@ -131,7 +131,7 @@ public abstract class CompositeRecordReader<
   public void add(ComposableRecordReader<K,? extends V> rr) throws IOException {
     kids[rr.id()] = rr;
     if (null == q) {
-      cmp = WritableComparator.get(rr.createKey().getClass());
+      cmp = WritableComparator.get(rr.createKey().getClass(), conf);
       q = new PriorityQueue<ComposableRecordReader<K,?>>(3,
           new Comparator<ComposableRecordReader<K,?>>() {
             public int compare(ComposableRecordReader<K,?> o1,

+ 21 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/join/WrappedRecordReader.java

@@ -22,6 +22,8 @@ import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.io.WritableComparator;
@@ -38,7 +40,7 @@ import org.apache.hadoop.mapred.RecordReader;
 @InterfaceStability.Stable
 public class WrappedRecordReader<K extends WritableComparable,
                           U extends Writable>
-    implements ComposableRecordReader<K,U> {
+    implements ComposableRecordReader<K,U>, Configurable {
 
   private boolean empty = false;
   private RecordReader<K,U> rr;
@@ -47,6 +49,7 @@ public class WrappedRecordReader<K extends WritableComparable,
   private K khead; // key at the top of this RR
   private U vhead; // value assoc with khead
   private WritableComparator cmp;
+  private Configuration conf;
 
   private ResetableIterator<U> vjoin;
 
@@ -55,13 +58,20 @@ public class WrappedRecordReader<K extends WritableComparable,
    */
   WrappedRecordReader(int id, RecordReader<K,U> rr,
       Class<? extends WritableComparator> cmpcl) throws IOException {
+    this(id, rr, cmpcl, null);
+  }
+
+  WrappedRecordReader(int id, RecordReader<K,U> rr,
+                      Class<? extends WritableComparator> cmpcl,
+                      Configuration conf) throws IOException {
     this.id = id;
     this.rr = rr;
+    this.conf = (conf == null) ? new Configuration() : conf;
     khead = rr.createKey();
     vhead = rr.createValue();
     try {
       cmp = (null == cmpcl)
-        ? WritableComparator.get(khead.getClass())
+        ? WritableComparator.get(khead.getClass(), this.conf)
         : cmpcl.newInstance();
     } catch (InstantiationException e) {
       throw (IOException)new IOException().initCause(e);
@@ -207,4 +217,13 @@ public class WrappedRecordReader<K extends WritableComparable,
     return 42;
   }
 
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+  }
+
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
 }

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/CompositeRecordReader.java

@@ -109,7 +109,7 @@ public abstract class CompositeRecordReader<
         }
         // create priority queue
         if (null == q) {
-          cmp = WritableComparator.get(keyclass);
+          cmp = WritableComparator.get(keyclass, conf);
           q = new PriorityQueue<ComposableRecordReader<K,?>>(3,
                 new Comparator<ComposableRecordReader<K,?>>() {
                   public int compare(ComposableRecordReader<K,?> o1,

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/join/WrappedRecordReader.java

@@ -92,7 +92,7 @@ public class WrappedRecordReader<K extends WritableComparable<?>,
       keyclass = key.getClass().asSubclass(WritableComparable.class);
       valueclass = value.getClass();
       if (cmp == null) {
-        cmp = WritableComparator.get(keyclass);
+        cmp = WritableComparator.get(keyclass, conf);
       }
     }
   }

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistCp.md.vm

@@ -196,7 +196,7 @@ Command Line Options
 
 Flag              | Description                          | Notes
 ----------------- | ------------------------------------ | --------
-`-p[rbugpca]` | Preserve r: replication number b: block size u: user g: group p: permission c: checksum-type a: ACL | Modification times are not preserved. Also, when `-update` is specified, status updates will **not** be synchronized unless the file sizes also differ (i.e. unless the file is re-created). If -pa is specified, DistCp preserves the permissions also because ACLs are a super-set of permissions.
+`-p[rbugpcax]` | Preserve r: replication number b: block size u: user g: group p: permission c: checksum-type a: ACL x: XAttr | Modification times are not preserved. Also, when `-update` is specified, status updates will **not** be synchronized unless the file sizes also differ (i.e. unless the file is re-created). If -pa is specified, DistCp preserves the permissions also because ACLs are a super-set of permissions.
 `-i` | Ignore failures | As explained in the Appendix, this option will keep more accurate statistics about the copy than the default case. It also preserves logs from failed copies, which can be valuable for debugging. Finally, a failing map will not cause the job to fail before all splits are attempted.
 `-log <logdir>` | Write logs to \<logdir\> | DistCp keeps logs of each file it attempts to copy as map output. If a map fails, the log output will not be retained if it is re-executed.
 `-m <num_maps>` | Maximum number of simultaneous copies | Specify the number of maps to copy data. Note that more maps may not necessarily improve throughput.

+ 0 - 2
hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java

@@ -1,6 +1,4 @@
 /*
- * Copyright 2012 The Apache Software Foundation.
- *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
  * You may obtain a copy of the License at

+ 0 - 2
hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/Exec.java

@@ -1,6 +1,4 @@
 /*
- * Copyright 2012 The Apache Software Foundation.
- *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
  * You may obtain a copy of the License at

+ 0 - 2
hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/FileSetUtils.java

@@ -1,6 +1,4 @@
 /*
- * Copyright 2012 The Apache Software Foundation.
- *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
  * You may obtain a copy of the License at

+ 0 - 2
hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/versioninfo/VersionInfoMojo.java

@@ -1,6 +1,4 @@
 /*
- * Copyright 2012 The Apache Software Foundation.
- *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
  * You may obtain a copy of the License at

部分文件因为文件数量过多而无法显示