Browse Source

Merge r1555021 through r1565516 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5535@1565519 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 11 years ago
parent
commit
ccf08d9dc8
100 changed files with 4483 additions and 384 deletions
  1. 35 14
      dev-support/test-patch.sh
  2. 16 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  3. 6 5
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java
  4. 19 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
  5. 8 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
  6. 11 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyUsers.java
  7. 20 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
  8. 2 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java
  9. 16 1
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
  10. 2 1
      hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm
  11. 7 7
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java
  12. 210 18
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestCompositeService.java
  13. 30 22
      hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java
  14. 26 15
      hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java
  15. 24 8
      hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java
  16. 33 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  17. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java
  18. 6 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  19. 6 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  20. 8 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
  21. 80 18
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
  22. 11 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java
  23. 18 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/PeerCache.java
  24. 11 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
  25. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
  26. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
  27. 6 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  28. 4 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
  29. 91 53
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
  30. 169 10
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
  31. 14 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  32. 32 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
  33. 2 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java
  34. 4 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
  35. 28 1
      hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm
  36. 14 7
      hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsSnapshots.xml
  37. 85 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
  38. 86 42
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
  39. 54 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHttpPolicy.java
  40. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java
  41. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
  42. 4 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServer.java
  43. 105 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java
  44. 5 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
  45. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java
  46. BIN
      hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-2-reserved.tgz
  47. 9 0
      hadoop-mapreduce-project/CHANGES.txt
  48. 3 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
  49. 8 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java
  50. 2 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
  51. 9 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml
  52. 7 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
  53. 55 0
      hadoop-yarn-project/CHANGES.txt
  54. 1 1
      hadoop-yarn-project/hadoop-yarn/bin/yarn
  55. 1 1
      hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd
  56. 38 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/ApplicationsRequestScope.java
  57. 116 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java
  58. 14 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
  59. 25 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java
  60. 109 22
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/apptimeline/ATSEntity.java
  61. 39 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/apptimeline/ATSEvent.java
  62. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/apptimeline/ATSEvents.java
  63. 163 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/apptimeline/ATSPutErrors.java
  64. 64 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/ConfigurationProvider.java
  65. 57 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/ConfigurationProviderFactory.java
  66. 43 6
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java
  67. 39 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
  68. 2 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
  69. 8 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto
  70. 8 7
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
  71. 5 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/ContainerLaunchFailAppMaster.java
  72. 7 6
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSFailedAppMaster.java
  73. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java
  74. 4 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
  75. 72 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java
  76. 48 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/LocalConfigurationProvider.java
  77. 61 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java
  78. 31 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java
  79. 59 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java
  80. 14 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java
  81. 58 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnJacksonJaxbJsonProvider.java
  82. 28 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
  83. 38 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/apptimeline/TestApplicationTimelineRecords.java
  84. 17 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java
  85. 125 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/ApplicationTimelineReader.java
  86. 29 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/ApplicationTimelineStore.java
  87. 43 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/ApplicationTimelineWriter.java
  88. 100 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/EntityId.java
  89. 288 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/MemoryApplicationTimelineStore.java
  90. 59 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/NameValuePair.java
  91. 20 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/package-info.java
  92. 9 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java
  93. 297 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ATSWebServices.java
  94. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java
  95. 532 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/ApplicationTimelineStoreTestUtils.java
  96. 73 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/TestMemoryApplicationTimelineStore.java
  97. 212 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestATSWebServices.java
  98. 3 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
  99. 86 35
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java
  100. 24 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java

+ 35 - 14
dev-support/test-patch.sh

@@ -300,6 +300,17 @@ prebuildWithoutPatch () {
     {color:red}-1 patch{color}.  Trunk compilation may be broken."
     return 1
   fi
+
+  echo "$MVN clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/trunkJavadocWarnings.txt 2>&1"
+  $MVN clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/trunkJavadocWarnings.txt 2>&1
+  if [[ $? != 0 ]] ; then
+    echo "Trunk javadoc compilation is broken?"
+    JIRA_COMMENT="$JIRA_COMMENT
+
+    {color:red}-1 patch{color}.  Trunk compilation may be broken."
+    return 1
+  fi
+
   return 0
 }
 
@@ -401,6 +412,11 @@ applyPatch () {
 }
 
 ###############################################################################
+calculateJavadocWarnings() {
+    WARNING_FILE="$1"
+    RET=$(egrep "^[0-9]+ warnings$" "$WARNING_FILE" | awk '{sum+=$1} END {print sum}')
+}
+
 ### Check there are no javadoc warnings
 checkJavadocWarnings () {
   echo ""
@@ -420,24 +436,29 @@ checkJavadocWarnings () {
     (cd hadoop-common-project/hadoop-annotations; $MVN install > /dev/null 2>&1)
   fi
   $MVN clean test javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1
-  javadocWarnings=`$GREP '\[WARNING\]' $PATCH_DIR/patchJavadocWarnings.txt | $AWK '/Javadoc Warnings/,EOF' | $GREP warning | $AWK 'BEGIN {total = 0} {total += 1} END {print total}'`
-  echo ""
-  echo ""
-  echo "There appear to be $javadocWarnings javadoc warnings generated by the patched build."
-
-  #There are 14 warnings that are caused by things that are caused by using sun internal APIs.
-  #There are 2 warnings that are caused by the Apache DS Dn class used in MiniKdc.
-  OK_JAVADOC_WARNINGS=16;
-  ### if current warnings greater than OK_JAVADOC_WARNINGS
-  if [[ $javadocWarnings -ne $OK_JAVADOC_WARNINGS ]] ; then
-    JIRA_COMMENT="$JIRA_COMMENT
+  calculateJavadocWarnings "$PATCH_DIR/trunkJavadocWarnings.txt"
+  numTrunkJavadocWarnings=$RET
+  calculateJavadocWarnings "$PATCH_DIR/patchJavadocWarnings.txt"
+  numPatchJavadocWarnings=$RET
+  grep -i warning "$PATCH_DIR/trunkJavadocWarnings.txt" > "$PATCH_DIR/trunkJavadocWarningsFiltered.txt"
+  grep -i warning "$PATCH_DIR/patchJavadocWarnings.txt" > "$PATCH_DIR/patchJavadocWarningsFiltered.txt"
+  diff -u "$PATCH_DIR/trunkJavadocWarningsFiltered.txt" \
+          "$PATCH_DIR/patchJavadocWarningsFiltered.txt" > \
+          "$PATCH_DIR/diffJavadocWarnings.txt"
+  rm -f "$PATCH_DIR/trunkJavadocWarningsFiltered.txt" "$PATCH_DIR/patchJavadocWarningsFiltered.txt"
+  echo "There appear to be $numTrunkJavadocWarnings javadoc warnings before the patch and $numPatchJavadocWarnings javadoc warnings after applying the patch."
+  if [[ $numTrunkJavadocWarnings != "" && $numPatchJavadocWarnings != "" ]] ; then
+    if [[ $numPatchJavadocWarnings -gt $numTrunkJavadocWarnings ]] ; then
+      JIRA_COMMENT="$JIRA_COMMENT
 
-    {color:red}-1 javadoc{color}.  The javadoc tool appears to have generated `expr $(($javadocWarnings-$OK_JAVADOC_WARNINGS))` warning messages."
-    return 1
+    {color:red}-1 javadoc{color}.  The javadoc tool appears to have generated `expr $(($numPatchJavadocWarnings-$numTrunkJavadocWarnings))` warning messages.
+        See $BUILD_URL/artifact/trunk/patchprocess/diffJavadocWarnings.txt for details."
+        return 1
+    fi
   fi
   JIRA_COMMENT="$JIRA_COMMENT
 
-    {color:green}+1 javadoc{color}.  The javadoc tool did not generate any warning messages."
+    {color:green}+1 javadoc{color}.  There were no new javadoc warning messages."
   return 0
 }
 

+ 16 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -113,6 +113,11 @@ Trunk (Unreleased)
 
     HADOOP-10177. Create CLI tools for managing keys. (Larry McCay via omalley)
 
+    HADOOP-10244. TestKeyShell improperly tests the results of delete (Larry
+    McCay via omalley)
+
+    HADOOP-10325. Improve jenkins javadoc warnings from test-patch.sh (cmccabe)
+
   BUG FIXES
 
     HADOOP-9451. Fault single-layer config if node group topology is enabled.
@@ -313,6 +318,15 @@ Release 2.4.0 - UNRELEASED
 
     HADOOP-10320. Javadoc in InterfaceStability.java lacks final </ul>.
     (René Nyffenegger via cnauroth)
+    
+    HADOOP-10085. CompositeService should allow adding services while being 
+    inited. (Steve Loughran via kasha)
+
+    HADOOP-10327. Trunk windows build broken after HDFS-5746.
+    (Vinay via cnauroth)
+
+    HADOOP-10330. TestFrameDecoder fails if it cannot bind port 12345.
+    (Arpit Agarwal)
 
 Release 2.3.0 - UNRELEASED
 
@@ -685,6 +699,8 @@ Release 2.3.0 - UNRELEASED
 
     HADOOP-10311. Cleanup vendor names from the code base. (tucu)
 
+    HADOOP-10273. Fix 'mvn site'. (Arpit Agarwal)
+
 Release 2.2.0 - 2013-10-13
 
   INCOMPATIBLE CHANGES

+ 6 - 5
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java

@@ -34,13 +34,14 @@ public class HttpConfig {
     HTTPS_ONLY,
     HTTP_AND_HTTPS;
 
+    private static final Policy[] VALUES = values();
     public static Policy fromString(String value) {
-      if (HTTPS_ONLY.name().equalsIgnoreCase(value)) {
-        return HTTPS_ONLY;
-      } else if (HTTP_AND_HTTPS.name().equalsIgnoreCase(value)) {
-        return HTTP_AND_HTTPS;
+      for (Policy p : VALUES) {
+        if (p.name().equalsIgnoreCase(value)) {
+          return p;
+        }
       }
-      return HTTP_ONLY;
+      return null;
     }
 
     public boolean isHttpEnabled() {

+ 19 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java

@@ -151,6 +151,13 @@ public class RetryPolicies {
         delayMillis, maxDelayBase);
   }
   
+  public static final RetryPolicy failoverOnNetworkException(
+      RetryPolicy fallbackPolicy, int maxFailovers, int maxRetries,
+      long delayMillis, long maxDelayBase) {
+    return new FailoverOnNetworkExceptionRetry(fallbackPolicy, maxFailovers,
+        maxRetries, delayMillis, maxDelayBase);
+  }
+  
   static class TryOnceThenFail implements RetryPolicy {
     @Override
     public RetryAction shouldRetry(Exception e, int retries, int failovers,
@@ -516,18 +523,25 @@ public class RetryPolicies {
     
     private RetryPolicy fallbackPolicy;
     private int maxFailovers;
+    private int maxRetries;
     private long delayMillis;
     private long maxDelayBase;
     
     public FailoverOnNetworkExceptionRetry(RetryPolicy fallbackPolicy,
         int maxFailovers) {
-      this(fallbackPolicy, maxFailovers, 0, 0);
+      this(fallbackPolicy, maxFailovers, 0, 0, 0);
     }
     
     public FailoverOnNetworkExceptionRetry(RetryPolicy fallbackPolicy,
         int maxFailovers, long delayMillis, long maxDelayBase) {
+      this(fallbackPolicy, maxFailovers, 0, delayMillis, maxDelayBase);
+    }
+    
+    public FailoverOnNetworkExceptionRetry(RetryPolicy fallbackPolicy,
+        int maxFailovers, int maxRetries, long delayMillis, long maxDelayBase) {
       this.fallbackPolicy = fallbackPolicy;
       this.maxFailovers = maxFailovers;
+      this.maxRetries = maxRetries;
       this.delayMillis = delayMillis;
       this.maxDelayBase = maxDelayBase;
     }
@@ -549,6 +563,10 @@ public class RetryPolicies {
             "failovers (" + failovers + ") exceeded maximum allowed ("
             + maxFailovers + ")");
       }
+      if (retries - failovers > maxRetries) {
+        return new RetryAction(RetryAction.RetryDecision.FAIL, 0, "retries ("
+            + retries + ") exceeded maximum allowed (" + maxRetries + ")");
+      }
       
       if (e instanceof ConnectException ||
           e instanceof NoRouteToHostException ||

+ 8 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

@@ -450,6 +450,14 @@ public abstract class Server {
     serviceAuthorizationManager.refresh(conf, provider);
   }
 
+  /**
+   * Refresh the service authorization ACL for the service handled by this server
+   * using the specified Configuration.
+   */
+  public void refreshServiceAclWithConfigration(Configuration conf,
+      PolicyProvider provider) {
+    serviceAuthorizationManager.refreshWithConfiguration(conf, provider);
+  }
   /**
    * Returns a handle to the serviceAuthorizationManager (required in tests)
    * @return instance of ServiceAuthorizationManager for this server

+ 11 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyUsers.java

@@ -30,6 +30,8 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 
+import com.google.common.annotations.VisibleForTesting;
+
 @InterfaceAudience.Private
 public class ProxyUsers {
 
@@ -177,4 +179,13 @@ public class ProxyUsers {
       (list.contains("*"));
   }
 
+  @VisibleForTesting
+  public static Map<String, Collection<String>> getProxyGroups() {
+    return proxyGroups;
+  }
+
+  @VisibleForTesting
+  public static Map<String, Collection<String>> getProxyHosts() {
+    return proxyHosts;
+  }
 }

+ 20 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java

@@ -33,6 +33,8 @@ import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
  * An authorization manager which handles service-level authorization
  * for incoming service requests.
@@ -120,19 +122,23 @@ public class ServiceAuthorizationManager {
     // Make a copy of the original config, and load the policy file
     Configuration policyConf = new Configuration(conf);
     policyConf.addResource(policyFile);
-    
+    refreshWithConfiguration(policyConf, provider);
+  }
+
+  public synchronized void refreshWithConfiguration(Configuration conf,
+      PolicyProvider provider) {
     final Map<Class<?>, AccessControlList> newAcls =
-      new IdentityHashMap<Class<?>, AccessControlList>();
+        new IdentityHashMap<Class<?>, AccessControlList>();
 
     // Parse the config file
     Service[] services = provider.getServices();
     if (services != null) {
       for (Service service : services) {
-        AccessControlList acl = 
-          new AccessControlList(
-              policyConf.get(service.getServiceKey(), 
-                             AccessControlList.WILDCARD_ACL_VALUE)
-              );
+        AccessControlList acl =
+            new AccessControlList(
+                conf.get(service.getServiceKey(),
+                    AccessControlList.WILDCARD_ACL_VALUE)
+            );
         newAcls.put(service.getProtocol(), acl);
       }
     }
@@ -141,8 +147,13 @@ public class ServiceAuthorizationManager {
     protocolToAcl = newAcls;
   }
 
-  // Package-protected for use in tests.
-  Set<Class<?>> getProtocolsWithAcls() {
+  @VisibleForTesting
+  public Set<Class<?>> getProtocolsWithAcls() {
     return protocolToAcl.keySet();
   }
+
+  @VisibleForTesting
+  public AccessControlList getProtocolsAcls(Class<?> className) {
+    return protocolToAcl.get(className);
+  }
 }

+ 2 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java

@@ -19,7 +19,6 @@
 package org.apache.hadoop.service;
 
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
@@ -54,13 +53,13 @@ public class CompositeService extends AbstractService {
   }
 
   /**
-   * Get an unmodifiable list of services
+   * Get a cloned list of services
    * @return a list of child services at the time of invocation -
    * added services will not be picked up.
    */
   public List<Service> getServices() {
     synchronized (serviceList) {
-      return Collections.unmodifiableList(serviceList);
+      return new ArrayList<Service>(serviceList);
     }
   }
 

+ 16 - 1
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c

@@ -671,6 +671,7 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_mmap(
   JNIEnv *env, jclass clazz, jobject jfd, jint jprot,
   jboolean jshared, jlong length)
 {
+#ifdef UNIX
   void *addr = 0;
   int prot, flags, fd;
   
@@ -684,18 +685,33 @@ Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_mmap(
     throw_ioe(env, errno);
   }
   return (jlong)(intptr_t)addr;
+#endif  //   UNIX
+
+#ifdef WINDOWS
+  THROW(env, "java/io/IOException",
+    "The function POSIX.mmap() is not supported on Windows");
+  return NULL;
+#endif
 }
 
 JNIEXPORT void JNICALL 
 Java_org_apache_hadoop_io_nativeio_NativeIO_00024POSIX_munmap(
   JNIEnv *env, jclass clazz, jlong jaddr, jlong length)
 {
+#ifdef UNIX
   void *addr;
 
   addr = (void*)(intptr_t)jaddr;
   if (munmap(addr, length) < 0) {
     throw_ioe(env, errno);
   }
+#endif  //   UNIX
+
+#ifdef WINDOWS
+  THROW(env, "java/io/IOException",
+    "The function POSIX.munmap() is not supported on Windows");
+  return NULL;
+#endif
 }
 
 
@@ -1050,4 +1066,3 @@ JNIEnv *env, jclass clazz)
 /**
  * vim: sw=2: ts=2: et:
  */
-

+ 2 - 1
hadoop-common-project/hadoop-common/src/site/apt/SecureMode.apt.vm

@@ -352,7 +352,8 @@ Configuration for <<<conf/core-site.xml>>>
 | | | This value is deprecated. Use dfs.http.policy |
 *-------------------------+-------------------------+------------------------+
 | <<<dfs.http.policy>>> | <HTTP_ONLY> or <HTTPS_ONLY> or <HTTP_AND_HTTPS> | |
-| | | HTTPS_ONLY turns off http access |
+| | | HTTPS_ONLY turns off http access. This option takes precedence over |
+| | | the deprecated configuration dfs.https.enable and hadoop.ssl.enabled. |
 *-------------------------+-------------------------+------------------------+
 | <<<dfs.namenode.https-address>>> | <nn_host_fqdn:50470> | |
 *-------------------------+-------------------------+------------------------+

+ 7 - 7
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyShell.java

@@ -41,7 +41,7 @@ public class TestKeyShell {
   
   @Test
   public void testKeySuccessfulKeyLifecycle() throws Exception {
-    outContent.flush();
+    outContent.reset();
     String[] args1 = {"create", "key1", "--provider", 
         "jceks://file" + tmpDir + "/keystore.jceks"};
     int rc = 0;
@@ -52,14 +52,14 @@ public class TestKeyShell {
     assertTrue(outContent.toString().contains("key1 has been successfully " +
     		"created."));
 
-    outContent.flush();
+    outContent.reset();
     String[] args2 = {"list", "--provider", 
         "jceks://file" + tmpDir + "/keystore.jceks"};
     rc = ks.run(args2);
     assertEquals(0, rc);
     assertTrue(outContent.toString().contains("key1"));
 
-    outContent.flush();
+    outContent.reset();
     String[] args3 = {"roll", "key1", "--provider", 
         "jceks://file" + tmpDir + "/keystore.jceks"};
     rc = ks.run(args3);
@@ -67,7 +67,7 @@ public class TestKeyShell {
     assertTrue(outContent.toString().contains("key1 has been successfully " +
     		"rolled."));
 
-    outContent.flush();
+    outContent.reset();
     String[] args4 = {"delete", "key1", "--provider", 
         "jceks://file" + tmpDir + "/keystore.jceks"};
     rc = ks.run(args4);
@@ -75,12 +75,12 @@ public class TestKeyShell {
     assertTrue(outContent.toString().contains("key1 has been successfully " +
     		"deleted."));
 
-    outContent.flush();
+    outContent.reset();
     String[] args5 = {"list", "--provider", 
         "jceks://file" + tmpDir + "/keystore.jceks"};
     rc = ks.run(args5);
     assertEquals(0, rc);
-    assertTrue(outContent.toString().contains("key1"));
+    assertFalse(outContent.toString(), outContent.toString().contains("key1"));
   }
   
   @Test
@@ -165,7 +165,7 @@ public class TestKeyShell {
     assertTrue(outContent.toString().contains("key1 has been successfully " +
     		"created."));
 
-    outContent.flush();
+    outContent.reset();
     String[] args2 = {"delete", "key1", "--provider", 
         "jceks://file" + tmpDir + "/keystore.jceks"};
     rc = ks.run(args2);

+ 210 - 18
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestCompositeService.java → hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/service/TestCompositeService.java

@@ -16,26 +16,20 @@
  * limitations under the License.
  */
 
-package org.apache.hadoop.yarn.util;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
+package org.apache.hadoop.service;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.service.AbstractService;
-import org.apache.hadoop.service.BreakableService;
-import org.apache.hadoop.service.CompositeService;
-import org.apache.hadoop.service.Service;
-import org.apache.hadoop.service.ServiceStateException;
 import org.apache.hadoop.service.Service.STATE;
-import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.junit.Before;
 import org.junit.Test;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
 public class TestCompositeService {
 
   private static final int NUM_OF_SERVICES = 5;
@@ -156,7 +150,7 @@ public class TestCompositeService {
     try {
       serviceManager.start();
       fail("Exception should have been thrown due to startup failure of last service");
-    } catch (YarnRuntimeException e) {
+    } catch (ServiceTestRuntimeException e) {
       for (int i = 0; i < NUM_OF_SERVICES - 1; i++) {
         if (i >= FAILED_SERVICE_SEQ_NUMBER && STOP_ONLY_STARTED_SERVICES) {
           // Failed service state should be INITED
@@ -197,7 +191,7 @@ public class TestCompositeService {
     // Stop the composite service
     try {
       serviceManager.stop();
-    } catch (YarnRuntimeException e) {
+    } catch (ServiceTestRuntimeException e) {
     }
     assertInState(STATE.STOPPED, services);
   }
@@ -335,7 +329,41 @@ public class TestCompositeService {
 
     testService.init(new Configuration());
     assertEquals("Incorrect number of services",
-        1, testService.getServices().size());
+                 1, testService.getServices().size());
+  }
+
+  @Test(timeout = 1000)
+  public void testAddInitedSiblingInInit() throws Throwable {
+    CompositeService parent = new CompositeService("parent");
+    BreakableService sibling = new BreakableService();
+    sibling.init(new Configuration());
+    parent.addService(new AddSiblingService(parent,
+                                            sibling,
+                                            STATE.INITED));
+    parent.init(new Configuration());
+    parent.start();
+    parent.stop();
+    assertEquals("Incorrect number of services",
+                 2, parent.getServices().size());
+  }
+
+  @Test(timeout = 1000)
+  public void testAddUninitedSiblingInInit() throws Throwable {
+    CompositeService parent = new CompositeService("parent");
+    BreakableService sibling = new BreakableService();
+    parent.addService(new AddSiblingService(parent,
+                                            sibling,
+                                            STATE.INITED));
+    parent.init(new Configuration());
+    try {
+      parent.start();
+      fail("Expected an exception, got " + parent);
+    } catch (ServiceStateException e) {
+      //expected
+    }
+    parent.stop();
+    assertEquals("Incorrect number of services",
+                 2, parent.getServices().size());
   }
 
   @Test
@@ -365,6 +393,105 @@ public class TestCompositeService {
         2, testService.getServices().size());
   }
 
+  @Test(timeout = 1000)
+  public void testAddStartedChildBeforeInit() throws Throwable {
+    CompositeService parent = new CompositeService("parent");
+    BreakableService child = new BreakableService();
+    child.init(new Configuration());
+    child.start();
+    AddSiblingService.addChildToService(parent, child);
+    try {
+      parent.init(new Configuration());
+      fail("Expected an exception, got " + parent);
+    } catch (ServiceStateException e) {
+      //expected
+    }
+    parent.stop();
+  }
+
+  @Test(timeout = 1000)
+  public void testAddStoppedChildBeforeInit() throws Throwable {
+    CompositeService parent = new CompositeService("parent");
+    BreakableService child = new BreakableService();
+    child.init(new Configuration());
+    child.start();
+    child.stop();
+    AddSiblingService.addChildToService(parent, child);
+    try {
+      parent.init(new Configuration());
+      fail("Expected an exception, got " + parent);
+    } catch (ServiceStateException e) {
+      //expected
+    }
+    parent.stop();
+  }
+
+  @Test(timeout = 1000)
+  public void testAddStartedSiblingInStart() throws Throwable {
+    CompositeService parent = new CompositeService("parent");
+    BreakableService sibling = new BreakableService();
+    sibling.init(new Configuration());
+    sibling.start();
+    parent.addService(new AddSiblingService(parent,
+                                            sibling,
+                                            STATE.STARTED));
+    parent.init(new Configuration());
+    parent.start();
+    parent.stop();
+    assertEquals("Incorrect number of services",
+                 2, parent.getServices().size());
+  }
+
+  @Test(timeout = 1000)
+  public void testAddUninitedSiblingInStart() throws Throwable {
+    CompositeService parent = new CompositeService("parent");
+    BreakableService sibling = new BreakableService();
+    parent.addService(new AddSiblingService(parent,
+                                            sibling,
+                                            STATE.STARTED));
+    parent.init(new Configuration());
+    assertInState(STATE.NOTINITED, sibling);
+    parent.start();
+    parent.stop();
+    assertEquals("Incorrect number of services",
+                 2, parent.getServices().size());
+  }
+
+  @Test(timeout = 1000)
+  public void testAddStartedSiblingInInit() throws Throwable {
+    CompositeService parent = new CompositeService("parent");
+    BreakableService sibling = new BreakableService();
+    sibling.init(new Configuration());
+    sibling.start();
+    parent.addService(new AddSiblingService(parent,
+                                            sibling,
+                                            STATE.INITED));
+    parent.init(new Configuration());
+    assertInState(STATE.STARTED, sibling);
+    parent.start();
+    assertInState(STATE.STARTED, sibling);
+    parent.stop();
+    assertEquals("Incorrect number of services",
+                 2, parent.getServices().size());
+    assertInState(STATE.STOPPED, sibling);
+  }
+
+  @Test(timeout = 1000)
+  public void testAddStartedSiblingInStop() throws Throwable {
+    CompositeService parent = new CompositeService("parent");
+    BreakableService sibling = new BreakableService();
+    sibling.init(new Configuration());
+    sibling.start();
+    parent.addService(new AddSiblingService(parent,
+                                            sibling,
+                                            STATE.STOPPED));
+    parent.init(new Configuration());
+    parent.start();
+    parent.stop();
+    assertEquals("Incorrect number of services",
+                 2, parent.getServices().size());
+  }
+
   public static class CompositeServiceAddingAChild extends CompositeService{
     Service child;
 
@@ -379,7 +506,18 @@ public class TestCompositeService {
       super.serviceInit(conf);
     }
   }
-  
+
+  public static class ServiceTestRuntimeException extends RuntimeException {
+    public ServiceTestRuntimeException(String message) {
+      super(message);
+    }
+  }
+
+  /**
+   * This is a composite service that keeps a count of the number of lifecycle
+   * events called, and can be set to throw a {@link ServiceTestRuntimeException }
+   * during service start or stop
+   */
   public static class CompositeServiceImpl extends CompositeService {
 
     public static boolean isPolicyToStopOnlyStartedServices() {
@@ -408,7 +546,7 @@ public class TestCompositeService {
     @Override
     protected void serviceStart() throws Exception {
       if (throwExceptionOnStart) {
-        throw new YarnRuntimeException("Fake service start exception");
+        throw new ServiceTestRuntimeException("Fake service start exception");
       }
       counter++;
       callSequenceNumber = counter;
@@ -420,7 +558,7 @@ public class TestCompositeService {
       counter++;
       callSequenceNumber = counter;
       if (throwExceptionOnStop) {
-        throw new YarnRuntimeException("Fake service stop exception");
+        throw new ServiceTestRuntimeException("Fake service stop exception");
       }
       super.serviceStop();
     }
@@ -457,6 +595,9 @@ public class TestCompositeService {
 
   }
 
+  /**
+   * Composite service that makes the addService method public to all
+   */
   public static class ServiceManager extends CompositeService {
 
     public void addTestService(CompositeService service) {
@@ -468,4 +609,55 @@ public class TestCompositeService {
     }
   }
 
+  public static class AddSiblingService extends CompositeService {
+    private final CompositeService parent;
+    private final Service serviceToAdd;
+    private STATE triggerState;
+
+    public AddSiblingService(CompositeService parent,
+                             Service serviceToAdd,
+                             STATE triggerState) {
+      super("ParentStateManipulatorService");
+      this.parent = parent;
+      this.serviceToAdd = serviceToAdd;
+      this.triggerState = triggerState;
+    }
+
+    /**
+     * Add the serviceToAdd to the parent if this service
+     * is in the state requested
+     */
+    private void maybeAddSibling() {
+      if (getServiceState() == triggerState) {
+        parent.addService(serviceToAdd);
+      }
+    }
+
+    @Override
+    protected void serviceInit(Configuration conf) throws Exception {
+      maybeAddSibling();
+      super.serviceInit(conf);
+    }
+
+    @Override
+    protected void serviceStart() throws Exception {
+      maybeAddSibling();
+      super.serviceStart();
+    }
+
+    @Override
+    protected void serviceStop() throws Exception {
+      maybeAddSibling();
+      super.serviceStop();
+    }
+
+    /**
+     * Expose addService method
+     * @param parent parent service
+     * @param child child to add
+     */
+    public static void addChildToService(CompositeService parent, Service child) {
+      parent.addService(child);
+    }
+  }
 }

+ 30 - 22
hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java

@@ -50,14 +50,6 @@ public class IdUserGroup {
   private BiMap<Integer, String> gidNameMap = HashBiMap.create();
 
   private long lastUpdateTime = 0; // Last time maps were updated
-
-  static public class DuplicateNameOrIdException extends IOException {
-    private static final long serialVersionUID = 1L;
-
-    public DuplicateNameOrIdException(String msg) {
-      super(msg);
-    }
-  }
   
   public IdUserGroup() throws IOException {
     updateMaps();
@@ -80,7 +72,8 @@ public class IdUserGroup {
     }
   }
 
-  private static final String DUPLICATE_NAME_ID_DEBUG_INFO = "NFS gateway can't start with duplicate name or id on the host system.\n"
+  private static final String DUPLICATE_NAME_ID_DEBUG_INFO =
+      "NFS gateway could have problem starting with duplicate name or id on the host system.\n"
       + "This is because HDFS (non-kerberos cluster) uses name as the only way to identify a user or group.\n"
       + "The host system with duplicated user/group name or id might work fine most of the time by itself.\n"
       + "However when NFS gateway talks to HDFS, HDFS accepts only user and group name.\n"
@@ -88,6 +81,16 @@ public class IdUserGroup {
       + "<getent passwd | cut -d: -f1,3> and <getent group | cut -d: -f1,3> on Linux systms,\n"
       + "<dscl . -list /Users UniqueID> and <dscl . -list /Groups PrimaryGroupID> on MacOS.";
   
+  private static void reportDuplicateEntry(final String header,
+      final Integer key, final String value,
+      final Integer ekey, final String evalue) {    
+      LOG.warn("\n" + header + String.format(
+          "new entry (%d, %s), existing entry: (%d, %s).\n%s\n%s",
+          key, value, ekey, evalue,
+          "The new entry is to be ignored for the following reason.",
+          DUPLICATE_NAME_ID_DEBUG_INFO));
+  }
+      
   /**
    * Get the whole list of users and groups and save them in the maps.
    * @throws IOException 
@@ -108,22 +111,27 @@ public class IdUserGroup {
         }
         LOG.debug("add to " + mapName + "map:" + nameId[0] + " id:" + nameId[1]);
         // HDFS can't differentiate duplicate names with simple authentication
-        Integer key = Integer.valueOf(nameId[1]);
-        String value = nameId[0];
+        final Integer key = Integer.valueOf(nameId[1]);
+        final String value = nameId[0];        
         if (map.containsKey(key)) {
-          LOG.error(String.format(
-              "Got duplicate id:(%d, %s), existing entry: (%d, %s).\n%s", key,
-              value, key, map.get(key), DUPLICATE_NAME_ID_DEBUG_INFO));
-          throw new DuplicateNameOrIdException("Got duplicate id.");
+          final String prevValue = map.get(key);
+          if (value.equals(prevValue)) {
+            // silently ignore equivalent entries
+            continue;
+          }
+          reportDuplicateEntry(
+              "Got multiple names associated with the same id: ",
+              key, value, key, prevValue);           
+          continue;
         }
-        if (map.containsValue(nameId[0])) {
-          LOG.error(String.format(
-              "Got duplicate name:(%d, %s), existing entry: (%d, %s) \n%s",
-              key, value, map.inverse().get(value), value,
-              DUPLICATE_NAME_ID_DEBUG_INFO));
-          throw new DuplicateNameOrIdException("Got duplicate name");
+        if (map.containsValue(value)) {
+          final Integer prevKey = map.inverse().get(value);
+          reportDuplicateEntry(
+              "Got multiple ids associated with the same name: ",
+              key, value, prevKey, value);
+          continue;
         }
-        map.put(Integer.valueOf(nameId[1]), nameId[0]);
+        map.put(key, value);
       }
       LOG.info("Updated " + mapName + " map size:" + map.size());
       

+ 26 - 15
hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/nfs3/TestIdUserGroup.java

@@ -17,11 +17,10 @@
  */
 package org.apache.hadoop.nfs.nfs3;
 
-import static org.junit.Assert.fail;
-
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
 import java.io.IOException;
 
-import org.apache.hadoop.nfs.nfs3.IdUserGroup.DuplicateNameOrIdException;
 import org.junit.Test;
 
 import com.google.common.collect.BiMap;
@@ -33,24 +32,36 @@ public class TestIdUserGroup {
   public void testDuplicates() throws IOException {
     String GET_ALL_USERS_CMD = "echo \"root:x:0:0:root:/root:/bin/bash\n"
         + "hdfs:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"
-        + "hdfs:x:11502:10788:Grid Distributed File System:/home/hdfs:/bin/bash\""
+        + "hdfs:x:11502:10788:Grid Distributed File System:/home/hdfs:/bin/bash\n"
+        + "hdfs1:x:11501:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"
+        + "hdfs2:x:11502:10787:Grid Distributed File System:/home/hdfs:/bin/bash\n"
+        + "bin:x:2:2:bin:/bin:/bin/sh\n"
+        + "bin:x:1:1:bin:/bin:/sbin/nologin\n"
+        + "daemon:x:1:1:daemon:/usr/sbin:/bin/sh\n"
+        + "daemon:x:2:2:daemon:/sbin:/sbin/nologin\""
         + " | cut -d: -f1,3";
     String GET_ALL_GROUPS_CMD = "echo \"hdfs:*:11501:hrt_hdfs\n"
-        + "mapred:x:497\n" + "mapred2:x:497\"" + " | cut -d: -f1,3";
+        + "mapred:x:497\n"
+        + "mapred2:x:497\n"
+        + "mapred:x:498\n" 
+        + "mapred3:x:498\"" 
+        + " | cut -d: -f1,3";
     // Maps for id to name map
     BiMap<Integer, String> uMap = HashBiMap.create();
     BiMap<Integer, String> gMap = HashBiMap.create();
 
-    try {
-      IdUserGroup.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":");
-      fail("didn't detect the duplicate name");
-    } catch (DuplicateNameOrIdException e) {
-    }
+    IdUserGroup.updateMapInternal(uMap, "user", GET_ALL_USERS_CMD, ":");
+    assertTrue(uMap.size() == 5);
+    assertEquals(uMap.get(0), "root");
+    assertEquals(uMap.get(11501), "hdfs");
+    assertEquals(uMap.get(11502), "hdfs2");
+    assertEquals(uMap.get(2), "bin");
+    assertEquals(uMap.get(1), "daemon");
     
-    try {
-      IdUserGroup.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":");
-      fail("didn't detect the duplicate id");
-    } catch (DuplicateNameOrIdException e) {
-    }
+    IdUserGroup.updateMapInternal(gMap, "group", GET_ALL_GROUPS_CMD, ":");
+    assertTrue(gMap.size() == 3);
+    assertEquals(gMap.get(11501), "hdfs");
+    assertEquals(gMap.get(497), "mapred");
+    assertEquals(gMap.get(498), "mapred3");    
   }
 }

+ 24 - 8
hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestFrameDecoder.java

@@ -23,6 +23,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.nio.ByteBuffer;
+import java.util.Random;
 
 import org.apache.hadoop.oncrpc.RpcUtil.RpcFrameDecoder;
 import org.apache.hadoop.oncrpc.security.CredentialsNone;
@@ -31,17 +32,17 @@ import org.jboss.netty.buffer.ByteBufferBackedChannelBuffer;
 import org.jboss.netty.buffer.ChannelBuffer;
 import org.jboss.netty.buffer.ChannelBuffers;
 import org.jboss.netty.channel.Channel;
+import org.jboss.netty.channel.ChannelException;
 import org.jboss.netty.channel.ChannelHandlerContext;
 import org.junit.Test;
 import org.mockito.Mockito;
 
 public class TestFrameDecoder {
 
-  private static int port = 12345; // some random server port
   private static int resultSize;
 
-  static void testRequest(XDR request) {
-    SimpleTcpClient tcpClient = new SimpleTcpClient("localhost", port, request,
+  static void testRequest(XDR request, int serverPort) {
+    SimpleTcpClient tcpClient = new SimpleTcpClient("localhost", serverPort, request,
         true);
     tcpClient.run();
   }
@@ -148,10 +149,25 @@ public class TestFrameDecoder {
   @Test
   public void testFrames() {
 
-    RpcProgram program = new TestFrameDecoder.TestRpcProgram("TestRpcProgram",
-        "localhost", port, 100000, 1, 2);
-    SimpleTcpServer tcpServer = new SimpleTcpServer(port, program, 1);
-    tcpServer.run();
+    Random rand = new Random();
+    int serverPort = 30000 + rand.nextInt(10000);
+    int retries = 10;    // A few retries in case initial choice is in use.
+
+    while (true) {
+      try {
+        RpcProgram program = new TestFrameDecoder.TestRpcProgram("TestRpcProgram",
+            "localhost", serverPort, 100000, 1, 2);
+        SimpleTcpServer tcpServer = new SimpleTcpServer(serverPort, program, 1);
+        tcpServer.run();
+        break;          // Successfully bound a port, break out.
+      } catch (ChannelException ce) {
+        if (retries-- > 0) {
+          serverPort += rand.nextInt(20); // Port in use? Try another.
+        } else {
+          throw ce;     // Out of retries.
+        }
+      }
+    }
 
     XDR xdrOut = createGetportMount();
     int headerSize = xdrOut.size();
@@ -161,7 +177,7 @@ public class TestFrameDecoder {
     int requestSize = xdrOut.size() - headerSize;
 
     // Send the request to the server
-    testRequest(xdrOut);
+    testRequest(xdrOut, serverPort);
 
     // Verify the server got the request with right size
     assertEquals(requestSize, resultSize);

+ 33 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -308,6 +308,9 @@ Release 2.4.0 - UNRELEASED
 
     HDFS-5746.  Add ShortCircuitSharedMemorySegment (cmccabe)
 
+    HDFS-4911.  Reduce PeerCache timeout to be commensurate with
+    dfs.datanode.socket.reuse.keepalive (cmccabe)
+
   OPTIMIZATIONS
 
     HDFS-5790. LeaseManager.findPath is very slow when many leases need recovery
@@ -324,6 +327,27 @@ Release 2.4.0 - UNRELEASED
     HDFS-5856. DataNode.checkDiskError might throw NPE.
     (Josh Elser via suresh)
 
+    HDFS-5828. BlockPlacementPolicyWithNodeGroup can place multiple replicas on
+    the same node group when dfs.namenode.avoid.write.stale.datanode is true. 
+    (Buddy via junping_du)
+
+    HDFS-5767. NFS implementation assumes userName userId mapping to be unique,
+    which is not true sometimes (Yongjun Zhang via brandonli)
+
+    HDFS-5791. TestHttpsFileSystem should use a random port to avoid binding
+    error during testing (Haohui Mai via brandonli)
+
+    HDFS-5709. Improve NameNode upgrade with existing reserved paths and path
+    components. (Andrew Wang via atm)
+
+    HDFS-5881. Fix skip() of the short-circuit local reader(legacy). (kihwal)
+
+    HDFS-5895. HDFS cacheadmin -listPools has exit_code of 1 when the command
+    returns 0 result. (Tassapol Athiapinya via cnauroth)
+
+    HDFS-5807. TestBalancerWithNodeGroup.testBalancerWithNodeGroup fails
+    intermittently. (Chen He via kihwal)
+
 Release 2.3.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -853,6 +877,15 @@ Release 2.3.0 - UNRELEASED
     HDFS-5842. Cannot create hftp filesystem when using a proxy user ugi and a doAs 
     on a secure cluster. (jing9)
 
+    HDFS-5399. Revisit SafeModeException and corresponding retry policies.
+    (Jing Zhao via todd)
+
+    HDFS-5876. SecureDataNodeStarter does not pick up configuration in 
+    hdfs-site.xml. (Haohui Mai via jing9)
+
+    HDFS-5873. dfs.http.policy should have higher precedence over dfs.https.enable.
+    (Haohui Mai via jing9)
+
   BREAKDOWN OF HDFS-2832 SUBTASKS AND RELATED JIRAS
 
     HDFS-4985. Add storage type to the protocol and expose it in block report

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java

@@ -629,7 +629,7 @@ class BlockReaderLocalLegacy implements BlockReader {
         skipBuf = new byte[bytesPerChecksum];
       }
       int ret = read(skipBuf, 0, (int)(n - remaining));
-      return ret;
+      return (remaining + ret);
     }
   
     // optimize for big gap: discard the current buffer, skip to
@@ -660,9 +660,9 @@ class BlockReaderLocalLegacy implements BlockReader {
     int ret = read(skipBuf, 0, myOffsetFromChunkBoundary);
 
     if (ret == -1) {  // EOS
-      return toskip;
+      return (toskip + remaining);
     } else {
-      return (toskip + ret);
+      return (toskip + remaining + ret);
     }
   }
 

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -36,6 +36,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY;
@@ -260,6 +262,7 @@ public class DFSClient implements java.io.Closeable {
   public static class Conf {
     final int hdfsTimeout;    // timeout value for a DFS operation.
     final int maxFailoverAttempts;
+    final int maxRetryAttempts;
     final int failoverSleepBaseMillis;
     final int failoverSleepMaxMillis;
     final int maxBlockAcquireFailures;
@@ -305,6 +308,9 @@ public class DFSClient implements java.io.Closeable {
       maxFailoverAttempts = conf.getInt(
           DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
           DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
+      maxRetryAttempts = conf.getInt(
+          DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY,
+          DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT);
       failoverSleepBaseMillis = conf.getInt(
           DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
           DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);

+ 6 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -82,9 +82,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int     DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_DEFAULT = 0;
   public static final String  DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY = "dfs.client.failover.connection.retries.on.timeouts";
   public static final int     DFS_CLIENT_FAILOVER_CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT = 0;
+  public static final String  DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY = "dfs.client.retry.max.attempts";
+  public static final int     DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT = 10;
   
   public static final String  DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY = "dfs.client.socketcache.expiryMsec";
-  public static final long    DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT = 2 * 60 * 1000;
+  public static final long    DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT = 3000;
   public static final String  DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL = "dfs.client.write.exclude.nodes.cache.expiry.interval.millis";
   public static final long    DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT = 10 * 60 * 1000; // 10 minutes, in ms
   public static final String  DFS_NAMENODE_BACKUP_ADDRESS_KEY = "dfs.namenode.backup.address";
@@ -213,7 +215,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_DATANODE_SYNCONCLOSE_KEY = "dfs.datanode.synconclose";
   public static final boolean DFS_DATANODE_SYNCONCLOSE_DEFAULT = false;
   public static final String  DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY = "dfs.datanode.socket.reuse.keepalive";
-  public static final int     DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT = 1000;
+  public static final int     DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT = 4000;
 
   public static final String DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_KEY = "dfs.namenode.datanode.registration.ip-hostname-check";
   public static final boolean DFS_NAMENODE_DATANODE_REGISTRATION_IP_HOSTNAME_CHECK_DEFAULT = true;
@@ -576,6 +578,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT = "10000,6,60000,10"; //t1,n1,t2,n2,...
   public static final String  DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY = "dfs.http.client.failover.max.attempts";
   public static final int     DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT = 15;
+  public static final String  DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_KEY = "dfs.http.client.retry.max.attempts";
+  public static final int     DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT = 10;
   public static final String  DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY = "dfs.http.client.failover.sleep.base.millis";
   public static final int     DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT = 500;
   public static final String  DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY = "dfs.http.client.failover.sleep.max.millis";

+ 8 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java

@@ -1345,6 +1345,14 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
           pos += blockReader.skip(diff);
           if (pos == targetPos) {
             done = true;
+          } else {
+            // The range was already checked. If the block reader returns
+            // something unexpected instead of throwing an exception, it is
+            // most likely a bug. 
+            String errMsg = "BlockReader failed to seek to " + 
+                targetPos + ". Instead, it seeked to " + pos + ".";
+            DFSClient.LOG.warn(errMsg);
+            throw new IOException(errMsg);
           }
         } catch (IOException e) {//make following read to retry
           if(DFSClient.LOG.isDebugEnabled()) {

+ 80 - 18
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -261,6 +261,47 @@ public class DFSUtil {
     return true;
   }
 
+  /**
+   * Checks if a string is a valid path component. For instance, components
+   * cannot contain a ":" or "/", and cannot be equal to a reserved component
+   * like ".snapshot".
+   * <p>
+   * The primary use of this method is for validating paths when loading the
+   * FSImage. During normal NN operation, paths are sometimes allowed to
+   * contain reserved components.
+   * 
+   * @return If component is valid
+   */
+  public static boolean isValidNameForComponent(String component) {
+    if (component.equals(".") ||
+        component.equals("..") ||
+        component.indexOf(":") >= 0 ||
+        component.indexOf("/") >= 0) {
+      return false;
+    }
+    return !isReservedPathComponent(component);
+  }
+
+
+  /**
+   * Returns if the component is reserved.
+   * 
+   * <p>
+   * Note that some components are only reserved under certain directories, e.g.
+   * "/.reserved" is reserved, while "/hadoop/.reserved" is not.
+   * 
+   * @param component
+   * @return if the component is reserved
+   */
+  public static boolean isReservedPathComponent(String component) {
+    for (String reserved : HdfsConstants.RESERVED_PATH_COMPONENTS) {
+      if (component.equals(reserved)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
   /**
    * Converts a byte array to a string using UTF8 encoding.
    */
@@ -312,7 +353,25 @@ public class DFSUtil {
     }
     return result.toString();
   }
-  
+
+  /**
+   * Converts a list of path components into a path using Path.SEPARATOR.
+   * 
+   * @param components Path components
+   * @return Combined path as a UTF-8 string
+   */
+  public static String strings2PathString(String[] components) {
+    if (components.length == 0) {
+      return "";
+    }
+    if (components.length == 1) {
+      if (components[0] == null || components[0].isEmpty()) {
+        return Path.SEPARATOR;
+      }
+    }
+    return Joiner.on(Path.SEPARATOR).join(components);
+  }
+
   /**
    * Given a list of path components returns a byte array
    */
@@ -1508,31 +1567,34 @@ public class DFSUtil {
    * configuration settings.
    */
   public static HttpConfig.Policy getHttpPolicy(Configuration conf) {
-    String httpPolicy = conf.get(DFSConfigKeys.DFS_HTTP_POLICY_KEY,
-        DFSConfigKeys.DFS_HTTP_POLICY_DEFAULT);
-
-    HttpConfig.Policy policy = HttpConfig.Policy.fromString(httpPolicy);
-
-    if (policy == HttpConfig.Policy.HTTP_ONLY) {
-      boolean httpsEnabled = conf.getBoolean(
-          DFSConfigKeys.DFS_HTTPS_ENABLE_KEY,
+    String policyStr = conf.get(DFSConfigKeys.DFS_HTTP_POLICY_KEY);
+    if (policyStr == null) {
+      boolean https = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY,
           DFSConfigKeys.DFS_HTTPS_ENABLE_DEFAULT);
 
-      boolean hadoopSslEnabled = conf.getBoolean(
+      boolean hadoopSsl = conf.getBoolean(
           CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY,
           CommonConfigurationKeys.HADOOP_SSL_ENABLED_DEFAULT);
 
-      if (hadoopSslEnabled) {
+      if (hadoopSsl) {
         LOG.warn(CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY
-            + " is deprecated. Please use "
-            + DFSConfigKeys.DFS_HTTPS_ENABLE_KEY + ".");
-        policy = HttpConfig.Policy.HTTPS_ONLY;
-      } else if (httpsEnabled) {
+            + " is deprecated. Please use " + DFSConfigKeys.DFS_HTTP_POLICY_KEY
+            + ".");
+      }
+      if (https) {
         LOG.warn(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY
-            + " is deprecated. Please use "
-            + DFSConfigKeys.DFS_HTTPS_ENABLE_KEY + ".");
-        policy = HttpConfig.Policy.HTTP_AND_HTTPS;
+            + " is deprecated. Please use " + DFSConfigKeys.DFS_HTTP_POLICY_KEY
+            + ".");
       }
+
+      return (hadoopSsl || https) ? HttpConfig.Policy.HTTP_AND_HTTPS
+          : HttpConfig.Policy.HTTP_ONLY;
+    }
+
+    HttpConfig.Policy policy = HttpConfig.Policy.fromString(policyStr);
+    if (policy == null) {
+      throw new HadoopIllegalArgumentException("Unregonized value '"
+          + policyStr + "' for " + DFSConfigKeys.DFS_HTTP_POLICY_KEY);
     }
 
     conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());

+ 11 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java

@@ -24,6 +24,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT;
 
 import java.io.IOException;
 import java.lang.reflect.Constructor;
@@ -144,9 +146,10 @@ public class NameNodeProxies {
           .createFailoverProxyProvider(conf, failoverProxyProviderClass, xface,
               nameNodeUri);
       Conf config = new Conf(conf);
-      T proxy = (T) RetryProxy.create(xface, failoverProxyProvider, RetryPolicies
-          .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
-              config.maxFailoverAttempts, config.failoverSleepBaseMillis,
+      T proxy = (T) RetryProxy.create(xface, failoverProxyProvider,
+          RetryPolicies.failoverOnNetworkException(
+              RetryPolicies.TRY_ONCE_THEN_FAIL, config.maxFailoverAttempts,
+              config.maxRetryAttempts, config.failoverSleepBaseMillis,
               config.failoverSleepMaxMillis));
       
       Text dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri);
@@ -192,11 +195,14 @@ public class NameNodeProxies {
       int maxFailoverAttempts = config.getInt(
           DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
           DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
+      int maxRetryAttempts = config.getInt(
+          DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY,
+          DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT);
       InvocationHandler dummyHandler = new LossyRetryInvocationHandler<T>(
               numResponseToDrop, failoverProxyProvider,
               RetryPolicies.failoverOnNetworkException(
-                  RetryPolicies.TRY_ONCE_THEN_FAIL, 
-                  Math.max(numResponseToDrop + 1, maxFailoverAttempts), delay, 
+                  RetryPolicies.TRY_ONCE_THEN_FAIL, maxFailoverAttempts, 
+                  Math.max(numResponseToDrop + 1, maxRetryAttempts), delay, 
                   maxCap));
       
       T proxy = (T) Proxy.newProxyInstance(

+ 18 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/PeerCache.java

@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs;
 
+import java.io.IOException;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map.Entry;
@@ -25,6 +26,7 @@ import java.util.Map.Entry;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.LinkedListMultimap;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -118,6 +120,11 @@ class PeerCache {
     return instance;
   }
 
+  @VisibleForTesting
+  public static synchronized void setInstance(int c, long e) {
+    instance = new PeerCache(c, e);
+  }
+
   private boolean isDaemonStarted() {
     return (daemon == null)? false: true;
   }
@@ -171,8 +178,17 @@ class PeerCache {
     while (iter.hasNext()) {
       Value candidate = iter.next();
       iter.remove();
-      if (!candidate.getPeer().isClosed()) {
-        return candidate.getPeer();
+      long ageMs = Time.monotonicNow() - candidate.getTime();
+      Peer peer = candidate.getPeer();
+      if (ageMs >= expiryPeriod) {
+        try {
+          peer.close();
+        } catch (IOException e) {
+          LOG.warn("got IOException closing stale peer " + peer +
+                ", which is " + ageMs + " ms old");
+        }
+      } else if (!peer.isClosed()) {
+        return peer;
       }
     }
     return null;

+ 11 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java

@@ -25,10 +25,9 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
+import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
 
 /************************************
  * Some handy constants
@@ -142,6 +141,16 @@ public class HdfsConstants {
   public static final int DATANODE_LAYOUT_VERSION
       = DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
 
+  /**
+   * Path components that are reserved in HDFS.
+   * <p>
+   * .reserved is only reserved under root ("/").
+   */
+  public static final String[] RESERVED_PATH_COMPONENTS = new String[] {
+    HdfsConstants.DOT_SNAPSHOT_DIR,
+    FSDirectory.DOT_RESERVED_STRING
+  };
+
   /**
    * A special path component contained in the path for a snapshot file/dir
    */

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java

@@ -317,7 +317,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
         // We need to additionally exclude the nodes that were added to the 
         // result list in the successful calls to choose*() above.
         for (DatanodeStorageInfo resultStorage : results) {
-          oldExcludedNodes.add(resultStorage.getDatanodeDescriptor());
+          addToExcludedNodes(resultStorage.getDatanodeDescriptor(), oldExcludedNodes);
         }
         // Set numOfReplicas, since it can get out of sync with the result list
         // if the NotEnoughReplicasException was thrown in chooseRandom().

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java

@@ -79,7 +79,8 @@ public final class HdfsServerConstants {
     INITIALIZESHAREDEDITS("-initializeSharedEdits"),
     RECOVER  ("-recover"),
     FORCE("-force"),
-    NONINTERACTIVE("-nonInteractive");
+    NONINTERACTIVE("-nonInteractive"),
+    RENAMERESERVED("-renameReserved");
     
     private final String name;
     

+ 6 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -362,13 +362,13 @@ public class DataNode extends Configured
         .setConf(conf).setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")));
 
     HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
-    InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf);
-    String infoHost = infoSocAddr.getHostName();
 
     if (policy.isHttpEnabled()) {
       if (secureResources == null) {
+        InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf);
         int port = infoSocAddr.getPort();
-        builder.addEndpoint(URI.create("http://" + infoHost + ":" + port));
+        builder.addEndpoint(URI.create("http://"
+            + NetUtils.getHostPortString(infoSocAddr)));
         if (port == 0) {
           builder.setFindPort(true);
         }
@@ -381,7 +381,7 @@ public class DataNode extends Configured
 
     if (policy.isHttpsEnabled()) {
       InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf.get(
-          DFS_DATANODE_HTTPS_ADDRESS_KEY, infoHost + ":" + 0));
+          DFS_DATANODE_HTTPS_ADDRESS_KEY, DFS_DATANODE_HTTPS_ADDRESS_DEFAULT));
 
       Configuration sslConf = DFSUtil.loadSslConfiguration(conf);
       DFSUtil.loadSslConfToHttpServerBuilder(builder, sslConf);
@@ -390,7 +390,8 @@ public class DataNode extends Configured
       if (port == 0) {
         builder.setFindPort(true);
       }
-      builder.addEndpoint(URI.create("https://" + infoHost + ":" + port));
+      builder.addEndpoint(URI.create("https://"
+          + NetUtils.getHostPortString(secInfoSocAddr)));
     }
 
     this.infoServer = builder.build();

+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java

@@ -25,6 +25,7 @@ import org.apache.commons.daemon.DaemonContext;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpServer2;
@@ -62,7 +63,9 @@ public class SecureDataNodeStarter implements Daemon {
   @Override
   public void init(DaemonContext context) throws Exception {
     System.err.println("Initializing secure datanode resources");
-    Configuration conf = new Configuration();
+    // Create a new HdfsConfiguration object to ensure that the configuration in
+    // hdfs-site.xml is picked up.
+    Configuration conf = new HdfsConfiguration();
     
     // Stash command-line arguments for regular datanode
     args = context.getArguments();

+ 91 - 53
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.hdfs.server.namenode.FSImageFormat.renameReservedPathsOnUpgrade;
 import static org.apache.hadoop.util.Time.now;
 
 import java.io.FilterInputStream;
@@ -320,8 +321,10 @@ public class FSEditLogLoader {
     switch (op.opCode) {
     case OP_ADD: {
       AddCloseOp addCloseOp = (AddCloseOp)op;
+      final String path =
+          renameReservedPathsOnUpgrade(addCloseOp.path, logVersion);
       if (FSNamesystem.LOG.isDebugEnabled()) {
-        FSNamesystem.LOG.debug(op.opCode + ": " + addCloseOp.path +
+        FSNamesystem.LOG.debug(op.opCode + ": " + path +
             " numblocks : " + addCloseOp.blocks.length +
             " clientHolder " + addCloseOp.clientName +
             " clientMachine " + addCloseOp.clientMachine);
@@ -332,9 +335,9 @@ public class FSEditLogLoader {
       // 3. OP_ADD to open file for append
 
       // See if the file already exists (persistBlocks call)
-      final INodesInPath iip = fsDir.getLastINodeInPath(addCloseOp.path);
+      final INodesInPath iip = fsDir.getLastINodeInPath(path);
       final INodeFile oldFile = INodeFile.valueOf(
-          iip.getINode(0), addCloseOp.path, true);
+          iip.getINode(0), path, true);
       INodeFile newFile = oldFile;
       if (oldFile == null) { // this is OP_ADD on a new file (case 1)
         // versions > 0 support per file replication
@@ -347,10 +350,10 @@ public class FSEditLogLoader {
         inodeId = getAndUpdateLastInodeId(addCloseOp.inodeId, logVersion,
             lastInodeId);
         newFile = fsDir.unprotectedAddFile(inodeId,
-            addCloseOp.path, addCloseOp.permissions, replication,
+            path, addCloseOp.permissions, replication,
             addCloseOp.mtime, addCloseOp.atime, addCloseOp.blockSize, true,
             addCloseOp.clientName, addCloseOp.clientMachine);
-        fsNamesys.leaseManager.addLease(addCloseOp.clientName, addCloseOp.path);
+        fsNamesys.leaseManager.addLease(addCloseOp.clientName, path);
 
         // add the op into retry cache if necessary
         if (toAddRetryCache) {
@@ -366,11 +369,11 @@ public class FSEditLogLoader {
             FSNamesystem.LOG.debug("Reopening an already-closed file " +
                 "for append");
           }
-          LocatedBlock lb = fsNamesys.prepareFileForWrite(addCloseOp.path,
+          LocatedBlock lb = fsNamesys.prepareFileForWrite(path,
               oldFile, addCloseOp.clientName, addCloseOp.clientMachine, null,
               false, iip.getLatestSnapshotId(), false);
-          newFile = INodeFile.valueOf(fsDir.getINode(addCloseOp.path),
-              addCloseOp.path, true);
+          newFile = INodeFile.valueOf(fsDir.getINode(path),
+              path, true);
           
           // add the op into retry cache is necessary
           if (toAddRetryCache) {
@@ -391,16 +394,17 @@ public class FSEditLogLoader {
     }
     case OP_CLOSE: {
       AddCloseOp addCloseOp = (AddCloseOp)op;
-      
+      final String path =
+          renameReservedPathsOnUpgrade(addCloseOp.path, logVersion);
       if (FSNamesystem.LOG.isDebugEnabled()) {
-        FSNamesystem.LOG.debug(op.opCode + ": " + addCloseOp.path +
+        FSNamesystem.LOG.debug(op.opCode + ": " + path +
             " numblocks : " + addCloseOp.blocks.length +
             " clientHolder " + addCloseOp.clientName +
             " clientMachine " + addCloseOp.clientMachine);
       }
 
-      final INodesInPath iip = fsDir.getLastINodeInPath(addCloseOp.path);
-      final INodeFile file = INodeFile.valueOf(iip.getINode(0), addCloseOp.path);
+      final INodesInPath iip = fsDir.getLastINodeInPath(path);
+      final INodeFile file = INodeFile.valueOf(iip.getINode(0), path);
 
       // Update the salient file attributes.
       file.setAccessTime(addCloseOp.atime, Snapshot.CURRENT_STATE_ID);
@@ -414,24 +418,26 @@ public class FSEditLogLoader {
         // could show up twice in a row. But after that version, this
         // should be fixed, so we should treat it as an error.
         throw new IOException(
-            "File is not under construction: " + addCloseOp.path);
+            "File is not under construction: " + path);
       }
       // One might expect that you could use removeLease(holder, path) here,
       // but OP_CLOSE doesn't serialize the holder. So, remove by path.
       if (file.isUnderConstruction()) {
-        fsNamesys.leaseManager.removeLeaseWithPrefixPath(addCloseOp.path);
+        fsNamesys.leaseManager.removeLeaseWithPrefixPath(path);
         file.toCompleteFile(file.getModificationTime());
       }
       break;
     }
     case OP_UPDATE_BLOCKS: {
       UpdateBlocksOp updateOp = (UpdateBlocksOp)op;
+      final String path =
+          renameReservedPathsOnUpgrade(updateOp.path, logVersion);
       if (FSNamesystem.LOG.isDebugEnabled()) {
-        FSNamesystem.LOG.debug(op.opCode + ": " + updateOp.path +
+        FSNamesystem.LOG.debug(op.opCode + ": " + path +
             " numblocks : " + updateOp.blocks.length);
       }
-      INodeFile oldFile = INodeFile.valueOf(fsDir.getINode(updateOp.path),
-          updateOp.path);
+      INodeFile oldFile = INodeFile.valueOf(fsDir.getINode(path),
+          path);
       // Update in-memory data structures
       updateBlocks(fsDir, updateOp, oldFile);
       
@@ -442,7 +448,7 @@ public class FSEditLogLoader {
     }
     case OP_ADD_BLOCK: {
       AddBlockOp addBlockOp = (AddBlockOp) op;
-      String path = addBlockOp.getPath();
+      String path = renameReservedPathsOnUpgrade(addBlockOp.getPath(), logVersion);
       if (FSNamesystem.LOG.isDebugEnabled()) {
         FSNamesystem.LOG.debug(op.opCode + ": " + path +
             " new block id : " + addBlockOp.getLastBlock().getBlockId());
@@ -456,14 +462,20 @@ public class FSEditLogLoader {
       SetReplicationOp setReplicationOp = (SetReplicationOp)op;
       short replication = fsNamesys.getBlockManager().adjustReplication(
           setReplicationOp.replication);
-      fsDir.unprotectedSetReplication(setReplicationOp.path,
+      fsDir.unprotectedSetReplication(
+          renameReservedPathsOnUpgrade(setReplicationOp.path, logVersion),
                                       replication, null);
       break;
     }
     case OP_CONCAT_DELETE: {
       ConcatDeleteOp concatDeleteOp = (ConcatDeleteOp)op;
-      fsDir.unprotectedConcat(concatDeleteOp.trg, concatDeleteOp.srcs,
-          concatDeleteOp.timestamp);
+      String trg = renameReservedPathsOnUpgrade(concatDeleteOp.trg, logVersion);
+      String[] srcs = new String[concatDeleteOp.srcs.length];
+      for (int i=0; i<srcs.length; i++) {
+        srcs[i] =
+            renameReservedPathsOnUpgrade(concatDeleteOp.srcs[i], logVersion);
+      }
+      fsDir.unprotectedConcat(trg, srcs, concatDeleteOp.timestamp);
       
       if (toAddRetryCache) {
         fsNamesys.addCacheEntry(concatDeleteOp.rpcClientId,
@@ -473,7 +485,9 @@ public class FSEditLogLoader {
     }
     case OP_RENAME_OLD: {
       RenameOldOp renameOp = (RenameOldOp)op;
-      fsDir.unprotectedRenameTo(renameOp.src, renameOp.dst,
+      final String src = renameReservedPathsOnUpgrade(renameOp.src, logVersion);
+      final String dst = renameReservedPathsOnUpgrade(renameOp.dst, logVersion);
+      fsDir.unprotectedRenameTo(src, dst,
                                 renameOp.timestamp);
       
       if (toAddRetryCache) {
@@ -483,7 +497,9 @@ public class FSEditLogLoader {
     }
     case OP_DELETE: {
       DeleteOp deleteOp = (DeleteOp)op;
-      fsDir.unprotectedDelete(deleteOp.path, deleteOp.timestamp);
+      fsDir.unprotectedDelete(
+          renameReservedPathsOnUpgrade(deleteOp.path, logVersion),
+          deleteOp.timestamp);
       
       if (toAddRetryCache) {
         fsNamesys.addCacheEntry(deleteOp.rpcClientId, deleteOp.rpcCallId);
@@ -494,8 +510,9 @@ public class FSEditLogLoader {
       MkdirOp mkdirOp = (MkdirOp)op;
       inodeId = getAndUpdateLastInodeId(mkdirOp.inodeId, logVersion,
           lastInodeId);
-      fsDir.unprotectedMkdir(inodeId, mkdirOp.path, mkdirOp.permissions,
-                             mkdirOp.timestamp);
+      fsDir.unprotectedMkdir(inodeId,
+          renameReservedPathsOnUpgrade(mkdirOp.path, logVersion),
+          mkdirOp.permissions, mkdirOp.timestamp);
       break;
     }
     case OP_SET_GENSTAMP_V1: {
@@ -505,53 +522,56 @@ public class FSEditLogLoader {
     }
     case OP_SET_PERMISSIONS: {
       SetPermissionsOp setPermissionsOp = (SetPermissionsOp)op;
-      fsDir.unprotectedSetPermission(setPermissionsOp.src,
-                                     setPermissionsOp.permissions);
+      fsDir.unprotectedSetPermission(
+          renameReservedPathsOnUpgrade(setPermissionsOp.src, logVersion),
+          setPermissionsOp.permissions);
       break;
     }
     case OP_SET_OWNER: {
       SetOwnerOp setOwnerOp = (SetOwnerOp)op;
-      fsDir.unprotectedSetOwner(setOwnerOp.src, setOwnerOp.username,
-                                setOwnerOp.groupname);
+      fsDir.unprotectedSetOwner(
+          renameReservedPathsOnUpgrade(setOwnerOp.src, logVersion),
+          setOwnerOp.username, setOwnerOp.groupname);
       break;
     }
     case OP_SET_NS_QUOTA: {
       SetNSQuotaOp setNSQuotaOp = (SetNSQuotaOp)op;
-      fsDir.unprotectedSetQuota(setNSQuotaOp.src,
-                                setNSQuotaOp.nsQuota,
-                                HdfsConstants.QUOTA_DONT_SET);
+      fsDir.unprotectedSetQuota(
+          renameReservedPathsOnUpgrade(setNSQuotaOp.src, logVersion),
+          setNSQuotaOp.nsQuota, HdfsConstants.QUOTA_DONT_SET);
       break;
     }
     case OP_CLEAR_NS_QUOTA: {
       ClearNSQuotaOp clearNSQuotaOp = (ClearNSQuotaOp)op;
-      fsDir.unprotectedSetQuota(clearNSQuotaOp.src,
-                                HdfsConstants.QUOTA_RESET,
-                                HdfsConstants.QUOTA_DONT_SET);
+      fsDir.unprotectedSetQuota(
+          renameReservedPathsOnUpgrade(clearNSQuotaOp.src, logVersion),
+          HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_DONT_SET);
       break;
     }
 
     case OP_SET_QUOTA:
       SetQuotaOp setQuotaOp = (SetQuotaOp)op;
-      fsDir.unprotectedSetQuota(setQuotaOp.src,
-                                setQuotaOp.nsQuota,
-                                setQuotaOp.dsQuota);
+      fsDir.unprotectedSetQuota(
+          renameReservedPathsOnUpgrade(setQuotaOp.src, logVersion),
+          setQuotaOp.nsQuota, setQuotaOp.dsQuota);
       break;
 
     case OP_TIMES: {
       TimesOp timesOp = (TimesOp)op;
 
-      fsDir.unprotectedSetTimes(timesOp.path,
-                                timesOp.mtime,
-                                timesOp.atime, true);
+      fsDir.unprotectedSetTimes(
+          renameReservedPathsOnUpgrade(timesOp.path, logVersion),
+          timesOp.mtime, timesOp.atime, true);
       break;
     }
     case OP_SYMLINK: {
       SymlinkOp symlinkOp = (SymlinkOp)op;
       inodeId = getAndUpdateLastInodeId(symlinkOp.inodeId, logVersion,
           lastInodeId);
-      fsDir.unprotectedAddSymlink(inodeId, symlinkOp.path,
-                                  symlinkOp.value, symlinkOp.mtime, 
-                                  symlinkOp.atime, symlinkOp.permissionStatus);
+      fsDir.unprotectedAddSymlink(inodeId,
+          renameReservedPathsOnUpgrade(symlinkOp.path, logVersion),
+          symlinkOp.value, symlinkOp.mtime, symlinkOp.atime,
+          symlinkOp.permissionStatus);
       
       if (toAddRetryCache) {
         fsNamesys.addCacheEntry(symlinkOp.rpcClientId, symlinkOp.rpcCallId);
@@ -560,8 +580,10 @@ public class FSEditLogLoader {
     }
     case OP_RENAME: {
       RenameOp renameOp = (RenameOp)op;
-      fsDir.unprotectedRenameTo(renameOp.src, renameOp.dst,
-                                renameOp.timestamp, renameOp.options);
+      fsDir.unprotectedRenameTo(
+          renameReservedPathsOnUpgrade(renameOp.src, logVersion),
+          renameReservedPathsOnUpgrade(renameOp.dst, logVersion),
+          renameOp.timestamp, renameOp.options);
       
       if (toAddRetryCache) {
         fsNamesys.addCacheEntry(renameOp.rpcClientId, renameOp.rpcCallId);
@@ -604,10 +626,12 @@ public class FSEditLogLoader {
 
       Lease lease = fsNamesys.leaseManager.getLease(
           reassignLeaseOp.leaseHolder);
-      INodeFile pendingFile = fsDir.getINode(reassignLeaseOp.path).asFile();
+      final String path =
+          renameReservedPathsOnUpgrade(reassignLeaseOp.path, logVersion);
+      INodeFile pendingFile = fsDir.getINode(path).asFile();
       Preconditions.checkState(pendingFile.isUnderConstruction());
       fsNamesys.reassignLeaseInternal(lease,
-          reassignLeaseOp.path, reassignLeaseOp.newHolder, pendingFile);
+          path, reassignLeaseOp.newHolder, pendingFile);
       break;
     }
     case OP_START_LOG_SEGMENT:
@@ -617,8 +641,11 @@ public class FSEditLogLoader {
     }
     case OP_CREATE_SNAPSHOT: {
       CreateSnapshotOp createSnapshotOp = (CreateSnapshotOp) op;
+      final String snapshotRoot =
+          renameReservedPathsOnUpgrade(createSnapshotOp.snapshotRoot,
+              logVersion);
       String path = fsNamesys.getSnapshotManager().createSnapshot(
-          createSnapshotOp.snapshotRoot, createSnapshotOp.snapshotName);
+          snapshotRoot, createSnapshotOp.snapshotName);
       if (toAddRetryCache) {
         fsNamesys.addCacheEntryWithPayload(createSnapshotOp.rpcClientId,
             createSnapshotOp.rpcCallId, path);
@@ -629,8 +656,11 @@ public class FSEditLogLoader {
       DeleteSnapshotOp deleteSnapshotOp = (DeleteSnapshotOp) op;
       BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
       List<INode> removedINodes = new ChunkedArrayList<INode>();
+      final String snapshotRoot =
+          renameReservedPathsOnUpgrade(deleteSnapshotOp.snapshotRoot,
+              logVersion);
       fsNamesys.getSnapshotManager().deleteSnapshot(
-          deleteSnapshotOp.snapshotRoot, deleteSnapshotOp.snapshotName,
+          snapshotRoot, deleteSnapshotOp.snapshotName,
           collectedBlocks, removedINodes);
       fsNamesys.removeBlocksAndUpdateSafemodeTotal(collectedBlocks);
       collectedBlocks.clear();
@@ -645,8 +675,11 @@ public class FSEditLogLoader {
     }
     case OP_RENAME_SNAPSHOT: {
       RenameSnapshotOp renameSnapshotOp = (RenameSnapshotOp) op;
+      final String snapshotRoot =
+          renameReservedPathsOnUpgrade(renameSnapshotOp.snapshotRoot,
+              logVersion);
       fsNamesys.getSnapshotManager().renameSnapshot(
-          renameSnapshotOp.snapshotRoot, renameSnapshotOp.snapshotOldName,
+          snapshotRoot, renameSnapshotOp.snapshotOldName,
           renameSnapshotOp.snapshotNewName);
       
       if (toAddRetryCache) {
@@ -657,14 +690,19 @@ public class FSEditLogLoader {
     }
     case OP_ALLOW_SNAPSHOT: {
       AllowSnapshotOp allowSnapshotOp = (AllowSnapshotOp) op;
+      final String snapshotRoot =
+          renameReservedPathsOnUpgrade(allowSnapshotOp.snapshotRoot, logVersion);
       fsNamesys.getSnapshotManager().setSnapshottable(
-          allowSnapshotOp.snapshotRoot, false);
+          snapshotRoot, false);
       break;
     }
     case OP_DISALLOW_SNAPSHOT: {
       DisallowSnapshotOp disallowSnapshotOp = (DisallowSnapshotOp) op;
+      final String snapshotRoot =
+          renameReservedPathsOnUpgrade(disallowSnapshotOp.snapshotRoot,
+              logVersion);
       fsNamesys.getSnapshotManager().resetSnapshottable(
-          disallowSnapshotOp.snapshotRoot);
+          snapshotRoot);
       break;
     }
     case OP_SET_GENSTAMP_V2: {

+ 169 - 10
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java

@@ -32,12 +32,13 @@ import java.security.DigestOutputStream;
 import java.security.MessageDigest;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.TreeMap;
 
 import org.apache.commons.logging.Log;
-import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -45,13 +46,15 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathIsNotDirectoryException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.PermissionStatus;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.LayoutFlags;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
-import org.apache.hadoop.hdfs.protocol.LayoutFlags;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
@@ -67,6 +70,10 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.util.StringUtils;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 
 /**
  * Contains inner classes for reading or writing the on-disk format for
@@ -411,7 +418,8 @@ public class FSImageFormat {
     }
     
   /** 
-   * load fsimage files assuming only local names are stored
+   * load fsimage files assuming only local names are stored. Used when
+   * snapshots are not supported by the layout version.
    *   
    * @param numFiles number of files expected to be read
    * @param in image input stream
@@ -527,6 +535,8 @@ public class FSImageFormat {
     */
    private int loadDirectory(DataInput in, Counter counter) throws IOException {
      String parentPath = FSImageSerialization.readString(in);
+     // Rename .snapshot paths if we're doing an upgrade
+     parentPath = renameReservedPathsOnUpgrade(parentPath, getLayoutVersion());
      final INodeDirectory parent = INodeDirectory.valueOf(
          namesystem.dir.rootDir.getNode(parentPath, true), parentPath);
      return loadChildren(parent, in, counter);
@@ -586,11 +596,9 @@ public class FSImageFormat {
    */
   private void addToParent(INodeDirectory parent, INode child) {
     FSDirectory fsDir = namesystem.dir;
-    if (parent == fsDir.rootDir && FSDirectory.isReservedName(child)) {
-        throw new HadoopIllegalArgumentException("File name \""
-            + child.getLocalName() + "\" is reserved. Please "
-            + " change the name of the existing file or directory to another "
-            + "name before upgrading to this release.");
+    if (parent == fsDir.rootDir) {
+        child.setLocalName(renameReservedRootComponentOnUpgrade(
+            child.getLocalNameBytes(), getLayoutVersion()));
     }
     // NOTE: This does not update space counts for parents
     if (!parent.addChild(child)) {
@@ -627,7 +635,9 @@ public class FSImageFormat {
     public INode loadINodeWithLocalName(boolean isSnapshotINode,
         DataInput in, boolean updateINodeMap, Counter counter)
         throws IOException {
-      final byte[] localName = FSImageSerialization.readLocalName(in);
+      byte[] localName = FSImageSerialization.readLocalName(in);
+      localName =
+          renameReservedComponentOnUpgrade(localName, getLayoutVersion());
       INode inode = loadINode(localName, isSnapshotINode, in, counter);
       if (updateINodeMap
           && NameNodeLayoutVersion.supports(
@@ -944,7 +954,156 @@ public class FSImageFormat {
       return snapshotMap.get(in.readInt());
     }
   }
-  
+
+  @VisibleForTesting
+  public static TreeMap<String, String> renameReservedMap =
+      new TreeMap<String, String>();
+
+  /**
+   * Use the default key-value pairs that will be used to determine how to
+   * rename reserved paths on upgrade.
+   */
+  @VisibleForTesting
+  public static void useDefaultRenameReservedPairs() {
+    renameReservedMap.clear();
+    for (String key: HdfsConstants.RESERVED_PATH_COMPONENTS) {
+      renameReservedMap.put(
+          key,
+          key + "." + HdfsConstants.NAMENODE_LAYOUT_VERSION + "."
+              + "UPGRADE_RENAMED");
+    }
+  }
+
+  /**
+   * Set the key-value pairs that will be used to determine how to rename
+   * reserved paths on upgrade.
+   */
+  @VisibleForTesting
+  public static void setRenameReservedPairs(String renameReserved) {
+    // Clear and set the default values
+    useDefaultRenameReservedPairs();
+    // Overwrite with provided values
+    setRenameReservedMapInternal(renameReserved);
+  }
+
+  private static void setRenameReservedMapInternal(String renameReserved) {
+    Collection<String> pairs =
+        StringUtils.getTrimmedStringCollection(renameReserved);
+    for (String p : pairs) {
+      String[] pair = StringUtils.split(p, '/', '=');
+      Preconditions.checkArgument(pair.length == 2,
+          "Could not parse key-value pair " + p);
+      String key = pair[0];
+      String value = pair[1];
+      Preconditions.checkArgument(DFSUtil.isReservedPathComponent(key),
+          "Unknown reserved path " + key);
+      Preconditions.checkArgument(DFSUtil.isValidNameForComponent(value),
+          "Invalid rename path for " + key + ": " + value);
+      LOG.info("Will rename reserved path " + key + " to " + value);
+      renameReservedMap.put(key, value);
+    }
+  }
+
+  /**
+   * When upgrading from an old version, the filesystem could contain paths
+   * that are now reserved in the new version (e.g. .snapshot). This renames
+   * these new reserved paths to a user-specified value to avoid collisions
+   * with the reserved name.
+   * 
+   * @param path Old path potentially containing a reserved path
+   * @return New path with reserved path components renamed to user value
+   */
+  static String renameReservedPathsOnUpgrade(String path,
+      final int layoutVersion) {
+    final String oldPath = path;
+    // If any known LVs aren't supported, we're doing an upgrade
+    if (!NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, layoutVersion)) {
+      String[] components = INode.getPathNames(path);
+      // Only need to worry about the root directory
+      if (components.length > 1) {
+        components[1] = DFSUtil.bytes2String(
+            renameReservedRootComponentOnUpgrade(
+                DFSUtil.string2Bytes(components[1]),
+                layoutVersion));
+        path = DFSUtil.strings2PathString(components);
+      }
+    }
+    if (!NameNodeLayoutVersion.supports(Feature.SNAPSHOT, layoutVersion)) {
+      String[] components = INode.getPathNames(path);
+      // Special case the root path
+      if (components.length == 0) {
+        return path;
+      }
+      for (int i=0; i<components.length; i++) {
+        components[i] = DFSUtil.bytes2String(
+            renameReservedComponentOnUpgrade(
+                DFSUtil.string2Bytes(components[i]),
+                layoutVersion));
+      }
+      path = DFSUtil.strings2PathString(components);
+    }
+
+    if (!path.equals(oldPath)) {
+      LOG.info("Upgrade process renamed reserved path " + oldPath + " to "
+          + path);
+    }
+    return path;
+  }
+
+  private final static String RESERVED_ERROR_MSG = 
+      FSDirectory.DOT_RESERVED_PATH_PREFIX + " is a reserved path and "
+      + HdfsConstants.DOT_SNAPSHOT_DIR + " is a reserved path component in"
+      + " this version of HDFS. Please rollback and delete or rename"
+      + " this path, or upgrade with the "
+      + StartupOption.RENAMERESERVED.getName()
+      + " [key-value pairs]"
+      + " option to automatically rename these paths during upgrade.";
+
+  /**
+   * Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single
+   * byte array path component.
+   */
+  private static byte[] renameReservedComponentOnUpgrade(byte[] component,
+      final int layoutVersion) {
+    // If the LV doesn't support snapshots, we're doing an upgrade
+    if (!NameNodeLayoutVersion.supports(Feature.SNAPSHOT, layoutVersion)) {
+      if (Arrays.equals(component, HdfsConstants.DOT_SNAPSHOT_DIR_BYTES)) {
+        Preconditions.checkArgument(
+            renameReservedMap != null &&
+            renameReservedMap.containsKey(HdfsConstants.DOT_SNAPSHOT_DIR),
+            RESERVED_ERROR_MSG);
+        component =
+            DFSUtil.string2Bytes(renameReservedMap
+                .get(HdfsConstants.DOT_SNAPSHOT_DIR));
+      }
+    }
+    return component;
+  }
+
+  /**
+   * Same as {@link #renameReservedPathsOnUpgrade(String)}, but for a single
+   * byte array path component.
+   */
+  private static byte[] renameReservedRootComponentOnUpgrade(byte[] component,
+      final int layoutVersion) {
+    // If the LV doesn't support inode IDs, we're doing an upgrade
+    if (!NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, layoutVersion)) {
+      if (Arrays.equals(component, FSDirectory.DOT_RESERVED)) {
+        Preconditions.checkArgument(
+            renameReservedMap != null &&
+            renameReservedMap.containsKey(FSDirectory.DOT_RESERVED_STRING),
+            RESERVED_ERROR_MSG);
+        final String renameString = renameReservedMap
+            .get(FSDirectory.DOT_RESERVED_STRING);
+        component =
+            DFSUtil.string2Bytes(renameString);
+        LOG.info("Renamed root path " + FSDirectory.DOT_RESERVED_STRING
+            + " to " + renameString);
+      }
+    }
+    return component;
+  }
+
   /**
    * A one-shot class responsible for writing an image file.
    * The write() function should be called once, after which the getter

+ 14 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -1180,7 +1180,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     if (isInSafeMode()) {
       SafeModeException se = new SafeModeException(errorMsg, safeMode);
       if (haEnabled && haContext != null
-          && haContext.getState().getServiceState() == HAServiceState.ACTIVE) {
+          && haContext.getState().getServiceState() == HAServiceState.ACTIVE
+          && shouldRetrySafeMode(this.safeMode)) {
         throw new RetriableException(se);
       } else {
         throw se;
@@ -1188,6 +1189,18 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     }
   }
   
+  /**
+   * We already know that the safemode is on. We will throw a RetriableException
+   * if the safemode is not manual or caused by low resource.
+   */
+  private boolean shouldRetrySafeMode(SafeModeInfo safeMode) {
+    if (safeMode == null) {
+      return false;
+    } else {
+      return !safeMode.isManual() && !safeMode.areResourcesLow();
+    }
+  }
+  
   public static Collection<URI> getNamespaceDirs(Configuration conf) {
     return getStorageDirs(conf, DFS_NAMENODE_NAME_DIR_KEY);
   }

+ 32 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -212,7 +212,9 @@ public class NameNode implements NameNodeStatusMXBean {
       + StartupOption.CLUSTERID.getName() + " cid ] ["
       + StartupOption.FORCE.getName() + "] ["
       + StartupOption.NONINTERACTIVE.getName() + "] ] | ["
-      + StartupOption.UPGRADE.getName() + "] | ["
+      + StartupOption.UPGRADE.getName() + 
+        " [" + StartupOption.CLUSTERID.getName() + " cid]" +
+        " [" + StartupOption.RENAMERESERVED.getName() + "<k-v pairs>] ] | ["
       + StartupOption.ROLLBACK.getName() + "] | ["
       + StartupOption.FINALIZE.getName() + "] | ["
       + StartupOption.IMPORT.getName() + "] | ["
@@ -1056,7 +1058,8 @@ public class NameNode implements NameNodeStatusMXBean {
     out.println(USAGE + "\n");
   }
 
-  private static StartupOption parseArguments(String args[]) {
+  @VisibleForTesting
+  static StartupOption parseArguments(String args[]) {
     int argsLen = (args == null) ? 0 : args.length;
     StartupOption startOpt = StartupOption.REGULAR;
     for(int i=0; i < argsLen; i++) {
@@ -1103,11 +1106,33 @@ public class NameNode implements NameNodeStatusMXBean {
         startOpt = StartupOption.CHECKPOINT;
       } else if (StartupOption.UPGRADE.getName().equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.UPGRADE;
-        // might be followed by two args
-        if (i + 2 < argsLen
-            && args[i + 1].equalsIgnoreCase(StartupOption.CLUSTERID.getName())) {
-          i += 2;
-          startOpt.setClusterId(args[i]);
+        /* Can be followed by CLUSTERID with a required parameter or
+         * RENAMERESERVED with an optional parameter
+         */
+        while (i + 1 < argsLen) {
+          String flag = args[i + 1];
+          if (flag.equalsIgnoreCase(StartupOption.CLUSTERID.getName())) {
+            if (i + 2 < argsLen) {
+              i += 2;
+              startOpt.setClusterId(args[i]);
+            } else {
+              LOG.fatal("Must specify a valid cluster ID after the "
+                  + StartupOption.CLUSTERID.getName() + " flag");
+              return null;
+            }
+          } else if (flag.equalsIgnoreCase(StartupOption.RENAMERESERVED
+              .getName())) {
+            if (i + 2 < argsLen) {
+              FSImageFormat.setRenameReservedPairs(args[i + 2]);
+              i += 2;
+            } else {
+              FSImageFormat.useDefaultRenameReservedPairs();
+              i += 1;
+            }
+          } else {
+            LOG.fatal("Unknown upgrade flag " + flag);
+            return null;
+          }
         }
       } else if (StartupOption.ROLLINGUPGRADE.getName().equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.ROLLINGUPGRADE;

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/CacheAdmin.java

@@ -962,9 +962,8 @@ public class CacheAdmin extends Configured implements Tool {
       if (numResults > 0) { 
         System.out.print(listing);
       }
-      // If there are no results, we return 1 (failure exit code);
-      // otherwise we return 0 (success exit code).
-      return (numResults == 0) ? 1 : 0;
+      // If list pools succeed, we return 0 (success exit code)
+      return 0;
     }
   }
 

+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java

@@ -188,6 +188,9 @@ public class WebHdfsFileSystem extends FileSystem
       int maxFailoverAttempts = conf.getInt(
           DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
           DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
+      int maxRetryAttempts = conf.getInt(
+          DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_KEY,
+          DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT);
       int failoverSleepBaseMillis = conf.getInt(
           DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
           DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
@@ -197,7 +200,7 @@ public class WebHdfsFileSystem extends FileSystem
 
       this.retryPolicy = RetryPolicies
           .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
-              maxFailoverAttempts, failoverSleepBaseMillis,
+              maxFailoverAttempts, maxRetryAttempts, failoverSleepBaseMillis,
               failoverSleepMaxMillis);
     }
 

+ 28 - 1
hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsUserGuide.apt.vm

@@ -435,7 +435,7 @@ HDFS Users Guide
    state it was in before the upgrade. HDFS upgrade is described in more
    detail in {{{http://wiki.apache.org/hadoop/Hadoop_Upgrade}Hadoop Upgrade}}
    Wiki page. HDFS can have one such backup at a time. Before upgrading,
-   administrators need to remove existing backupusing bin/hadoop dfsadmin
+   administrators need to remove existing backup using bin/hadoop dfsadmin
    <<<-finalizeUpgrade>>> command. The following briefly describes the
    typical upgrade procedure:
 
@@ -459,6 +459,33 @@ HDFS Users Guide
 
           * start the cluster with rollback option. (<<<bin/start-dfs.sh -rollback>>>).
 
+    When upgrading to a new version of HDFS, it is necessary to rename or
+    delete any paths that are reserved in the new version of HDFS. If the
+    NameNode encounters a reserved path during upgrade, it will print an
+    error like the following:
+
+    <<< /.reserved is a reserved path and .snapshot is a
+    reserved path component in this version of HDFS. Please rollback and delete
+    or rename this path, or upgrade with the -renameReserved [key-value pairs]
+    option to automatically rename these paths during upgrade.>>>
+
+    Specifying <<<-upgrade -renameReserved [optional key-value pairs]>>> causes
+    the NameNode to automatically rename any reserved paths found during
+    startup. For example, to rename all paths named <<<.snapshot>>> to
+    <<<.my-snapshot>>> and <<<.reserved>>> to <<<.my-reserved>>>, a user would
+    specify <<<-upgrade -renameReserved
+    .snapshot=.my-snapshot,.reserved=.my-reserved>>>.
+
+    If no key-value pairs are specified with <<<-renameReserved>>>, the
+    NameNode will then suffix reserved paths with
+    <<<.<LAYOUT-VERSION>.UPGRADE_RENAMED>>>, e.g.
+    <<<.snapshot.-51.UPGRADE_RENAMED>>>.
+
+    There are some caveats to this renaming process. It's recommended,
+    if possible, to first <<<hdfs dfsadmin -saveNamespace>>> before upgrading.
+    This is because data inconsistency can result if an edit log operation
+    refers to the destination of an automatically renamed file.
+
 * File Permissions and Security
 
    The file permissions are designed to be similar to file permissions on

+ 14 - 7
hadoop-hdfs-project/hadoop-hdfs/src/site/xdoc/HdfsSnapshots.xml

@@ -20,7 +20,7 @@
   xsi:schemaLocation="http://maven.apache.org/XDOC/2.0 http://maven.apache.org/xsd/xdoc-2.0.xsd">
 
   <properties>
-    <title>HFDS Snapshots</title>
+    <title>HDFS Snapshots</title>
   </properties>
 
   <body>
@@ -99,15 +99,22 @@
     <li>Copying a file from snapshot <code>s0</code>:
       <source>hdfs dfs -cp /foo/.snapshot/s0/bar /tmp</source></li>
   </ul>
-  <p>
-    <b>Note</b> that the name ".snapshot" is now a reserved file name in HDFS
-    so that users cannot create a file/directory with ".snapshot" as the name.
-    If ".snapshot" is used in a previous version of HDFS, it must be renamed before upgrade;
-    otherwise, upgrade will fail. 
-  </p>
   </subsection>
   </section>
 
+  <section name="Upgrading to a version of HDFS with snapshots" id="Upgrade">
+
+  <p>
+    The HDFS snapshot feature introduces a new reserved path name used to
+    interact with snapshots: <tt>.snapshot</tt>. When upgrading from an
+    older version of HDFS, existing paths named <tt>.snapshot</tt> need
+    to first be renamed or deleted to avoid conflicting with the reserved path.
+    See the upgrade section in
+    <a href="HdfsUserGuide.html#Upgrade_and_Rollback">the HDFS user guide</a>
+    for more information.  </p>
+
+  </section>
+
   <section name="Snapshot Operations" id="SnapshotOperations">
   <subsection name="Administrator Operations" id="AdministratorOperations">
   <p>

+ 85 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java

@@ -27,6 +27,7 @@ import java.io.File;
 import java.io.FileOutputStream;
 import java.io.FileReader;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.TreeMap;
@@ -43,7 +44,9 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.namenode.FSImageFormat;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.log4j.Logger;
 import org.junit.Test;
@@ -67,6 +70,7 @@ public class TestDFSUpgradeFromImage {
   private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt";
   private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz";
   private static final String HADOOP1_BBW_IMAGE = "hadoop1-bbw.tgz";
+  private static final String HADOOP2_RESERVED_IMAGE = "hadoop-2-reserved.tgz";
 
   private static class ReferenceFileInfo {
     String path;
@@ -320,6 +324,87 @@ public class TestDFSUpgradeFromImage {
       assertEquals("Upgrade did not fail with bad MD5", 1, md5failures);
     }
   }
+
+  /**
+   * Test upgrade from 2.0 image with a variety of .snapshot and .reserved
+   * paths to test renaming on upgrade
+   */
+  @Test
+  public void testUpgradeFromRel2ReservedImage() throws IOException {
+    unpackStorage(HADOOP2_RESERVED_IMAGE);
+    MiniDFSCluster cluster = null;
+    // Try it once without setting the upgrade flag to ensure it fails
+    try {
+      cluster =
+          new MiniDFSCluster.Builder(new Configuration())
+              .format(false)
+              .startupOption(StartupOption.UPGRADE)
+              .numDataNodes(0).build();
+    } catch (IllegalArgumentException e) {
+      GenericTestUtils.assertExceptionContains(
+          "reserved path component in this version",
+          e);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+    // Try it again with a custom rename string
+    try {
+      FSImageFormat.setRenameReservedPairs(
+          ".snapshot=.user-snapshot," +
+          ".reserved=.my-reserved");
+      cluster =
+          new MiniDFSCluster.Builder(new Configuration())
+              .format(false)
+              .startupOption(StartupOption.UPGRADE)
+              .numDataNodes(0).build();
+      // Make sure the paths were renamed as expected
+      DistributedFileSystem dfs = cluster.getFileSystem();
+      ArrayList<Path> toList = new ArrayList<Path>();
+      ArrayList<String> found = new ArrayList<String>();
+      toList.add(new Path("/"));
+      while (!toList.isEmpty()) {
+        Path p = toList.remove(0);
+        FileStatus[] statuses = dfs.listStatus(p);
+        for (FileStatus status: statuses) {
+          final String path = status.getPath().toUri().getPath();
+          System.out.println("Found path " + path);
+          found.add(path);
+          if (status.isDirectory()) {
+            toList.add(status.getPath());
+          }
+        }
+      }
+      String[] expected = new String[] {
+          "/edits",
+          "/edits/.reserved",
+          "/edits/.user-snapshot",
+          "/edits/.user-snapshot/editsdir",
+          "/edits/.user-snapshot/editsdir/editscontents",
+          "/edits/.user-snapshot/editsdir/editsdir2",
+          "/image",
+          "/image/.reserved",
+          "/image/.user-snapshot",
+          "/image/.user-snapshot/imagedir",
+          "/image/.user-snapshot/imagedir/imagecontents",
+          "/image/.user-snapshot/imagedir/imagedir2",
+          "/.my-reserved",
+          "/.my-reserved/edits-touch",
+          "/.my-reserved/image-touch"
+      };
+
+      for (String s: expected) {
+        assertTrue("Did not find expected path " + s, found.contains(s));
+      }
+      assertEquals("Found an unexpected path while listing filesystem",
+          found.size(), expected.length);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
     
   static void recoverAllLeases(DFSClient dfs, 
       Path path) throws IOException {

+ 86 - 42
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java

@@ -19,16 +19,19 @@ package org.apache.hadoop.hdfs;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import com.google.common.base.Supplier;
+
 import java.io.InputStream;
 import java.io.PrintWriter;
-import java.net.InetSocketAddress;
-import java.net.Socket;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -37,10 +40,8 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.junit.After;
 import org.junit.Before;
@@ -51,10 +52,7 @@ import com.google.common.io.NullOutputStream;
 public class TestDataTransferKeepalive {
   Configuration conf = new HdfsConfiguration();
   private MiniDFSCluster cluster;
-  private FileSystem fs;
-  private InetSocketAddress dnAddr;
   private DataNode dn;
-  private DFSClient dfsClient;
   private static Path TEST_FILE = new Path("/test");
   
   private static final int KEEPALIVE_TIMEOUT = 1000;
@@ -69,15 +67,7 @@ public class TestDataTransferKeepalive {
     
     cluster = new MiniDFSCluster.Builder(conf)
       .numDataNodes(1).build();
-    fs = cluster.getFileSystem();
-    dfsClient = ((DistributedFileSystem)fs).dfs;
-    dfsClient.peerCache.clear();
-
-    String poolId = cluster.getNamesystem().getBlockPoolId();
     dn = cluster.getDataNodes().get(0);
-    DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP(
-        dn, poolId);
-    dnAddr = NetUtils.createSocketAddr(dnReg.getXferAddr());
   }
   
   @After
@@ -90,34 +80,86 @@ public class TestDataTransferKeepalive {
    * its configured keepalive timeout.
    */
   @Test(timeout=30000)
-  public void testKeepaliveTimeouts() throws Exception {
+  public void testDatanodeRespectsKeepAliveTimeout() throws Exception {
+    Configuration clientConf = new Configuration(conf);
+    // Set a client socket cache expiry time much longer than 
+    // the datanode-side expiration time.
+    final long CLIENT_EXPIRY_MS = 60000L;
+    clientConf.setLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY, CLIENT_EXPIRY_MS);
+    PeerCache.setInstance(DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT, CLIENT_EXPIRY_MS);
+    DistributedFileSystem fs =
+        (DistributedFileSystem)FileSystem.get(cluster.getURI(),
+            clientConf);
+
     DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L);
 
     // Clients that write aren't currently re-used.
-    assertEquals(0, dfsClient.peerCache.size());
+    assertEquals(0, fs.dfs.peerCache.size());
     assertXceiverCount(0);
 
     // Reads the file, so we should get a
     // cached socket, and should have an xceiver on the other side.
     DFSTestUtil.readFile(fs, TEST_FILE);
-    assertEquals(1, dfsClient.peerCache.size());
+    assertEquals(1, fs.dfs.peerCache.size());
     assertXceiverCount(1);
 
     // Sleep for a bit longer than the keepalive timeout
     // and make sure the xceiver died.
-    Thread.sleep(KEEPALIVE_TIMEOUT * 2);
+    Thread.sleep(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT + 1);
     assertXceiverCount(0);
     
     // The socket is still in the cache, because we don't
     // notice that it's closed until we try to read
     // from it again.
-    assertEquals(1, dfsClient.peerCache.size());
+    assertEquals(1, fs.dfs.peerCache.size());
     
     // Take it out of the cache - reading should
     // give an EOF.
-    Peer peer = dfsClient.peerCache.get(dn.getDatanodeId(), false);
+    Peer peer = fs.dfs.peerCache.get(dn.getDatanodeId(), false);
     assertNotNull(peer);
     assertEquals(-1, peer.getInputStream().read());
+    PeerCache.setInstance(DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT,
+        DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT);
+  }
+
+  /**
+   * Test that the client respects its keepalive timeout.
+   */
+  @Test(timeout=30000)
+  public void testClientResponsesKeepAliveTimeout() throws Exception {
+    Configuration clientConf = new Configuration(conf);
+    // Set a client socket cache expiry time much shorter than 
+    // the datanode-side expiration time.
+    final long CLIENT_EXPIRY_MS = 10L;
+    clientConf.setLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY, CLIENT_EXPIRY_MS);
+    PeerCache.setInstance(DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT, CLIENT_EXPIRY_MS);
+    DistributedFileSystem fs =
+        (DistributedFileSystem)FileSystem.get(cluster.getURI(),
+            clientConf);
+
+    DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L);
+
+    // Clients that write aren't currently re-used.
+    assertEquals(0, fs.dfs.peerCache.size());
+    assertXceiverCount(0);
+
+    // Reads the file, so we should get a
+    // cached socket, and should have an xceiver on the other side.
+    DFSTestUtil.readFile(fs, TEST_FILE);
+    assertEquals(1, fs.dfs.peerCache.size());
+    assertXceiverCount(1);
+
+    // Sleep for a bit longer than the client keepalive timeout.
+    Thread.sleep(CLIENT_EXPIRY_MS + 1);
+    
+    // Taking out a peer which is expired should give a null.
+    Peer peer = fs.dfs.peerCache.get(dn.getDatanodeId(), false);
+    assertTrue(peer == null);
+
+    // The socket cache is now empty.
+    assertEquals(0, fs.dfs.peerCache.size());
+    PeerCache.setInstance(DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT,
+        DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT);
   }
 
   /**
@@ -125,8 +167,17 @@ public class TestDataTransferKeepalive {
    * read bytes off the stream quickly. The datanode should time out sending the
    * chunks and the transceiver should die, even if it has a long keepalive.
    */
-  @Test(timeout=30000)
+  @Test(timeout=300000)
   public void testSlowReader() throws Exception {
+    // Set a client socket cache expiry time much longer than 
+    // the datanode-side expiration time.
+    final long CLIENT_EXPIRY_MS = 600000L;
+    Configuration clientConf = new Configuration(conf);
+    clientConf.setLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY, CLIENT_EXPIRY_MS);
+    PeerCache.setInstance(DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT, CLIENT_EXPIRY_MS);
+    DistributedFileSystem fs =
+        (DistributedFileSystem)FileSystem.get(cluster.getURI(),
+            clientConf);
     // Restart the DN with a shorter write timeout.
     DataNodeProperties props = cluster.stopDataNode(0);
     props.conf.setInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
@@ -134,38 +185,31 @@ public class TestDataTransferKeepalive {
     props.conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
         120000);
     assertTrue(cluster.restartDataNode(props, true));
+    dn = cluster.getDataNodes().get(0);
     // Wait for heartbeats to avoid a startup race where we
     // try to write the block while the DN is still starting.
     cluster.triggerHeartbeats();
     
-    dn = cluster.getDataNodes().get(0);
-    
     DFSTestUtil.createFile(fs, TEST_FILE, 1024*1024*8L, (short)1, 0L);
     FSDataInputStream stm = fs.open(TEST_FILE);
-    try {
-      stm.read();
-      assertXceiverCount(1);
-
-      // Poll for 0 running xceivers.  Allow up to 5 seconds for some slack.
-      long totalSleepTime = 0;
-      long sleepTime = WRITE_TIMEOUT + 100;
-      while (getXceiverCountWithoutServer() > 0 && totalSleepTime < 5000) {
-        Thread.sleep(sleepTime);
-        totalSleepTime += sleepTime;
-        sleepTime = 100;
+    stm.read();
+    assertXceiverCount(1);
+
+    GenericTestUtils.waitFor(new Supplier<Boolean>() {
+      public Boolean get() {
+        // DN should time out in sendChunks, and this should force
+        // the xceiver to exit.
+        return getXceiverCountWithoutServer() == 0;
       }
+    }, 500, 50000);
 
-      // DN should time out in sendChunks, and this should force
-      // the xceiver to exit.
-      assertXceiverCount(0);
-    } finally {
-      IOUtils.closeStream(stm);
-    }
+    IOUtils.closeStream(stm);
   }
   
   @Test(timeout=30000)
   public void testManyClosedSocketsInCache() throws Exception {
     // Make a small file
+    DistributedFileSystem fs = cluster.getFileSystem();
     DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L);
 
     // Insert a bunch of dead sockets in the cache, by opening

+ 54 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHttpPolicy.java

@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import static org.apache.hadoop.http.HttpConfig.Policy.HTTP_AND_HTTPS;
+import static org.apache.hadoop.http.HttpConfig.Policy.HTTP_ONLY;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Assert;
+import org.junit.Test;
+
+public final class TestHttpPolicy {
+
+  @Test(expected = HadoopIllegalArgumentException.class)
+  public void testInvalidPolicyValue() {
+    Configuration conf = new Configuration();
+    conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, "invalid");
+    DFSUtil.getHttpPolicy(conf);
+  }
+
+  @Test
+  public void testDeprecatedConfiguration() {
+    Configuration conf = new Configuration(false);
+    Assert.assertSame(HTTP_ONLY, DFSUtil.getHttpPolicy(conf));
+
+    conf.setBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, true);
+    Assert.assertSame(HTTP_AND_HTTPS, DFSUtil.getHttpPolicy(conf));
+
+    conf = new Configuration(false);
+    conf.setBoolean(DFSConfigKeys.HADOOP_SSL_ENABLED_KEY, true);
+    Assert.assertSame(HTTP_AND_HTTPS, DFSUtil.getHttpPolicy(conf));
+
+    conf = new Configuration(false);
+    conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HTTP_ONLY.name());
+    conf.setBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, true);
+    Assert.assertSame(HTTP_ONLY, DFSUtil.getHttpPolicy(conf));
+  }
+}

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java

@@ -65,7 +65,7 @@ public class TestBalancerWithNodeGroup {
 
   ClientProtocol client;
 
-  static final long TIMEOUT = 20000L; //msec
+  static final long TIMEOUT = 40000L; //msec
   static final double CAPACITY_ALLOWED_VARIANCE = 0.005;  // 0.5%
   static final double BALANCE_ALLOWED_VARIANCE = 0.11;    // 10%+delta
   static final int DEFAULT_BLOCK_SIZE = 10;

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java

@@ -124,6 +124,8 @@ public class TestReplicationPolicyWithNodeGroup {
     CONF.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY, 
         NetworkTopologyWithNodeGroup.class.getName());
     
+    CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_WRITE_KEY, true);
+    
     File baseDir = PathUtils.getTestDir(TestReplicationPolicyWithNodeGroup.class);
     
     CONF.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,

+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServer.java

@@ -85,6 +85,7 @@ public class TestNameNodeHttpServer {
   @Test
   public void testHttpPolicy() throws Exception {
     conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
+    conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
 
     InetSocketAddress addr = InetSocketAddress.createUnresolved("localhost", 0);
     NameNodeHttpServer server = null;
@@ -103,7 +104,9 @@ public class TestNameNodeHttpServer {
           server.getHttpsAddress() == null));
 
     } finally {
-      server.stop();
+      if (server != null) {
+        server.stop();
+      }
     }
   }
 

+ 105 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java

@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.junit.Test;
+
+public class TestNameNodeOptionParsing {
+
+  @Test(timeout = 10000)
+  public void testUpgrade() {
+    StartupOption opt = null;
+    // UPGRADE is set, but nothing else
+    opt = NameNode.parseArguments(new String[] {"-upgrade"});
+    assertEquals(opt, StartupOption.UPGRADE);
+    assertNull(opt.getClusterId());
+    assertTrue(FSImageFormat.renameReservedMap.isEmpty());
+    // cluster ID is set
+    opt = NameNode.parseArguments(new String[] { "-upgrade", "-clusterid",
+        "mycid" });
+    assertEquals(StartupOption.UPGRADE, opt);
+    assertEquals("mycid", opt.getClusterId());
+    assertTrue(FSImageFormat.renameReservedMap.isEmpty());
+    // Everything is set
+    opt = NameNode.parseArguments(new String[] { "-upgrade", "-clusterid",
+        "mycid", "-renameReserved",
+        ".snapshot=.my-snapshot,.reserved=.my-reserved" });
+    assertEquals(StartupOption.UPGRADE, opt);
+    assertEquals("mycid", opt.getClusterId());
+    assertEquals(".my-snapshot",
+        FSImageFormat.renameReservedMap.get(".snapshot"));
+    assertEquals(".my-reserved",
+        FSImageFormat.renameReservedMap.get(".reserved"));
+    // Reset the map
+    FSImageFormat.renameReservedMap.clear();
+    // Everything is set, but in a different order
+    opt = NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved",
+        ".reserved=.my-reserved,.snapshot=.my-snapshot", "-clusterid",
+        "mycid"});
+    assertEquals(StartupOption.UPGRADE, opt);
+    assertEquals("mycid", opt.getClusterId());
+    assertEquals(".my-snapshot",
+        FSImageFormat.renameReservedMap.get(".snapshot"));
+    assertEquals(".my-reserved",
+        FSImageFormat.renameReservedMap.get(".reserved"));
+    // Try the default renameReserved
+    opt = NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved"});
+    assertEquals(StartupOption.UPGRADE, opt);
+    assertEquals(
+        ".snapshot." + HdfsConstants.NAMENODE_LAYOUT_VERSION
+            + ".UPGRADE_RENAMED",
+        FSImageFormat.renameReservedMap.get(".snapshot"));
+    assertEquals(
+        ".reserved." + HdfsConstants.NAMENODE_LAYOUT_VERSION
+            + ".UPGRADE_RENAMED",
+        FSImageFormat.renameReservedMap.get(".reserved"));
+
+    // Try some error conditions
+    try {
+      opt =
+          NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved",
+              ".reserved=.my-reserved,.not-reserved=.my-not-reserved" });
+    } catch (IllegalArgumentException e) {
+      assertExceptionContains("Unknown reserved path", e);
+    }
+    try {
+      opt =
+          NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved",
+              ".reserved=.my-reserved,.snapshot=.snapshot" });
+    } catch (IllegalArgumentException e) {
+      assertExceptionContains("Invalid rename path", e);
+    }
+    try {
+      opt =
+          NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved",
+              ".snapshot=.reserved" });
+    } catch (IllegalArgumentException e) {
+      assertExceptionContains("Invalid rename path", e);
+    }
+    opt = NameNode.parseArguments(new String[] { "-upgrade", "-cid"});
+    assertNull(opt);
+  }
+
+}

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java

@@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.SafeModeInfo;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.io.IOUtils;
@@ -65,6 +66,7 @@ import org.apache.log4j.Level;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
+import org.mockito.internal.util.reflection.Whitebox;
 
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
@@ -124,6 +126,9 @@ public class TestHASafeMode {
     final Path test = new Path("/test");
     // let nn0 enter safemode
     NameNodeAdapter.enterSafeMode(nn0, false);
+    SafeModeInfo safeMode = (SafeModeInfo) Whitebox.getInternalState(
+        nn0.getNamesystem(), "safeMode");
+    Whitebox.setInternalState(safeMode, "extension", Integer.valueOf(30000));
     LOG.info("enter safemode");
     new Thread() {
       @Override

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java

@@ -52,6 +52,7 @@ public class TestHttpsFileSystem {
     conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
     conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
     conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
+    conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
 
     File base = new File(BASEDIR);
     FileUtil.fullyDelete(base);

BIN
hadoop-hdfs-project/hadoop-hdfs/src/test/resources/hadoop-2-reserved.tgz


+ 9 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -153,6 +153,8 @@ Release 2.4.0 - UNRELEASED
     MAPREDUCE-5732. Report proper queue when job has been automatically placed
     (Sandy Ryza)
 
+    MAPREDUCE-5699. Allow setting tags on MR jobs (kasha)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -228,6 +230,10 @@ Release 2.3.0 - UNRELEASED
     MAPREDUCE-5725. Make explicit that TestNetworkedJob relies on the Capacity
     Scheduler (Sandy Ryza)
 
+    MAPREDUCE-5744. Job hangs because 
+    RMContainerAllocator$AssignedRequests.preemptReduce() violates the 
+    comparator contract (Gera Shegalov via kasha)
+
   OPTIMIZATIONS
 
     MAPREDUCE-4680. Job history cleaner should only check timestamps of files in
@@ -347,6 +353,9 @@ Release 2.3.0 - UNRELEASED
     MAPREDUCE-5723. MR AM container log can be truncated or empty.
     (Mohammad Kamrul Islam via kasha)
 
+    MAPREDUCE-5743. Fixed the test failure in TestRMContainerAllocator.
+    (Ted Yu and Vinod Kumar Vavilapalli via zjshen)
+
 Release 2.2.0 - 2013-10-13
 
   INCOMPATIBLE CHANGES

+ 3 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java

@@ -1143,9 +1143,9 @@ public class RMContainerAllocator extends RMContainerRequestor
           new Comparator<TaskAttemptId>() {
         @Override
         public int compare(TaskAttemptId o1, TaskAttemptId o2) {
-          float p = getJob().getTask(o1.getTaskId()).getAttempt(o1).getProgress() -
-              getJob().getTask(o2.getTaskId()).getAttempt(o2).getProgress();
-          return p >= 0 ? 1 : -1;
+          return Float.compare(
+              getJob().getTask(o1.getTaskId()).getAttempt(o1).getProgress(),
+              getJob().getTask(o2.getTaskId()).getAttempt(o2).getProgress());
         }
       });
       

+ 8 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java

@@ -1652,8 +1652,16 @@ public class TestRMContainerAllocator {
     RMApp app = rm.submitApp(1024);
     dispatcher.await();
 
+    // Make a node to register so as to launch the AM.
+    MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
+    amNodeManager.nodeHeartbeat(true);
+    dispatcher.await();
+
     ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
         .getAppAttemptId();
+    rm.sendAMLaunched(appAttemptId);
+    dispatcher.await();
+
     JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
     Job job = mock(Job.class);
     when(job.getReport()).thenReturn(

+ 2 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java

@@ -60,6 +60,8 @@ public interface MRJobConfig {
 
   public static final String QUEUE_NAME = "mapreduce.job.queuename";
 
+  public static final String JOB_TAGS = "mapreduce.job.tags";
+
   public static final String JVM_NUMTASKS_TORUN = "mapreduce.job.jvm.numtasks";
 
   public static final String SPLIT_FILE = "mapreduce.job.splitfile";

+ 9 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml

@@ -1,4 +1,5 @@
 <?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
@@ -15,7 +16,6 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 
 <!-- Do not modify this file directly.  Instead, copy entries that you -->
 <!-- wish to modify from this file into mapred-site.xml and change them -->
@@ -727,6 +727,14 @@
   </description>
 </property>
 
+  <property>
+    <name>mapreduce.job.tags</name>
+    <value></value>
+    <description> Tags for the job that will be passed to YARN at submission 
+      time. Queries to YARN for applications can filter on these tags.
+    </description>
+  </property>
+
 <property>
   <name>mapreduce.cluster.local.dir</name>
   <value>${hadoop.tmp.dir}/mapred/local</value>

+ 7 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java

@@ -21,7 +21,9 @@ package org.apache.hadoop.mapred;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Vector;
@@ -467,6 +469,8 @@ public class YARNRunner implements ClientProtocol {
         ContainerLaunchContext.newInstance(localResources, environment,
           vargsFinal, null, securityTokens, acls);
 
+    Collection<String> tagsFromConf =
+        jobConf.getTrimmedStringCollection(MRJobConfig.JOB_TAGS);
 
     // Set up the ApplicationSubmissionContext
     ApplicationSubmissionContext appContext =
@@ -486,6 +490,9 @@ public class YARNRunner implements ClientProtocol {
             MRJobConfig.DEFAULT_MR_AM_MAX_ATTEMPTS));
     appContext.setResource(capability);
     appContext.setApplicationType(MRJobConfig.MR_APPLICATION_TYPE);
+    if (tagsFromConf != null && !tagsFromConf.isEmpty()) {
+      appContext.setApplicationTags(new HashSet<String>(tagsFromConf));
+    }
     return appContext;
   }
 

+ 55 - 0
hadoop-yarn-project/CHANGES.txt

@@ -14,6 +14,9 @@ Trunk - Unreleased
 
     YARN-1504. RM changes for moving apps between queues (Sandy Ryza)
 
+    YARN-1499. Fair Scheduler changes for moving apps between queues (Sandy
+    Ryza)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS
@@ -82,6 +85,24 @@ Release 2.4.0 - UNRELEASED
     YARN-1633. Defined user-facing entity, entity-info and event objects related
     to Application Timeline feature. (Zhijie Shen via vinodkv)
 
+    YARN-1611. Introduced the concept of a configuration provider which can be
+    used by ResourceManager to read configuration locally or from remote systems
+    so as to help RM failover. (Xuan Gong via vinodkv)
+
+    YARN-1659. Defined the ApplicationTimelineStore store as an abstraction for
+    implementing different storage impls for storing timeline information.
+    (Billie Rinaldi via vinodkv)
+
+    YARN-1634. Added a testable in-memory implementation of
+    ApplicationTimelineStore. (Zhijie Shen via vinodkv)
+
+    YARN-1461. Added tags for YARN applications and changed RM to handle them.
+    (Karthik Kambatla via zjshen)
+
+    YARN-1636. Augmented Application-history server's web-services to also expose
+    new APIs for retrieving and storing timeline information. (Zhijie Shen via
+    vinodkv)
+
   IMPROVEMENTS
 
     YARN-1007. Enhance History Reader interface for Containers. (Mayank Bansal via
@@ -99,6 +120,25 @@ Release 2.4.0 - UNRELEASED
     YARN-1617. Remove ancient comment and surround LOG.debug in
     AppSchedulingInfo.allocate (Sandy Ryza)
 
+    YARN-1639. Modified RM HA configuration handling to have a way of not
+    requiring separate configuration files for each RM. (Xuan Gong via vinodkv)
+
+    YARN-1668. Modified RM HA handling of admin-acls to be available across RM
+    failover by making using of a remote configuration-provider. (Xuan Gong via
+    vinodkv)
+
+    YARN-1667. Modified RM HA handling of super users (with proxying ability) to
+    be available across RM failover by making using of a remote
+    configuration-provider. (Xuan Gong via vinodkv)
+
+    YARN-1285. Changed the default value of yarn.acl.enable in yarn-default.xml
+    to be consistent with what exists (false) in the code and documentation.
+    (Kenji Kikushima via vinodkv)
+
+    YARN-1669. Modified RM HA handling of protocol level service-ACLS to
+    be available across RM failover by making using of a remote
+    configuration-provider. (Xuan Gong via vinodkv)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -148,6 +188,12 @@ Release 2.4.0 - UNRELEASED
     YARN-1632. TestApplicationMasterServices should be under
     org.apache.hadoop.yarn.server.resourcemanager package (Chen He via jeagles)
 
+    YARN-1673. Fix option parsing in YARN's application CLI after it is broken
+    by YARN-967. (Mayank Bansal via vinodkv)
+
+    YARN-1684. Fixed history server heap size in yarn script. (Billie Rinaldi
+    via zjshen)
+
 Release 2.3.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -581,6 +627,15 @@ Release 2.3.0 - UNRELEASED
 
     YARN-1629. IndexOutOfBoundsException in MaxRunningAppsEnforcer (Sandy Ryza)
 
+    YARN-1628. Fixed the test failure in TestContainerManagerSecurity. (Vinod
+    Kumar Vavilapalli via zjshen)
+
+    YARN-1661. Fixed DS ApplicationMaster to write the correct exit log. (Vinod
+    Kumar Vavilapalli via zjshen)
+
+    YARN-1689. Made RMAppAttempt get killed when RMApp is at ACCEPTED. (Vinod
+    Kumar Vavilapalli via zjshen)
+
 Release 2.2.0 - 2013-10-13
 
   INCOMPATIBLE CHANGES

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/bin/yarn

@@ -204,7 +204,7 @@ elif [ "$COMMAND" = "historyserver" ] ; then
   CLASSPATH=${CLASSPATH}:$YARN_CONF_DIR/ahs-config/log4j.properties
   CLASS='org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer'
   YARN_OPTS="$YARN_OPTS $YARN_HISTORYSERVER_OPTS"
-  if [ "$YARN_RESOURCEMANAGER_HEAPSIZE" != "" ]; then
+  if [ "$YARN_HISTORYSERVER_HEAPSIZE" != "" ]; then
     JAVA_HEAP_MAX="-Xmx""$YARN_HISTORYSERVER_HEAPSIZE""m"
   fi  
 elif [ "$COMMAND" = "nodemanager" ] ; then

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd

@@ -207,7 +207,7 @@ goto :eof
   set CLASSPATH=%CLASSPATH%;%YARN_CONF_DIR%\ahs-config\log4j.properties
   set CLASS=org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer
   set YARN_OPTS=%YARN_OPTS% %HADOOP_HISTORYSERVER_OPTS%
-  if defined YARN_RESOURCEMANAGER_HEAPSIZE (
+  if defined YARN_HISTORYSERVER_HEAPSIZE (
     set JAVA_HEAP_MAX=-Xmx%YARN_HISTORYSERVER_HEAPSIZE%m
   )
   goto :eof

+ 38 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/ApplicationsRequestScope.java

@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.protocolrecords;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Enumeration that controls the scope of applications fetched
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public enum ApplicationsRequestScope {
+  /** All jobs */
+  ALL,
+
+  /** Jobs viewable by current user */
+  VIEWABLE,
+
+  /** Jobs owned by current user */
+  OWN
+}

+ 116 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java

@@ -21,7 +21,6 @@ package org.apache.hadoop.yarn.api.protocolrecords;
 import java.util.EnumSet;
 import java.util.Set;
 
-import org.apache.commons.collections.buffer.UnboundedFifoBuffer;
 import org.apache.commons.lang.math.LongRange;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
@@ -49,6 +48,86 @@ public abstract class GetApplicationsRequest {
     return request;
   }
 
+  /**
+   * <p>
+   * The request from clients to get a report of Applications matching the
+   * giving application types in the cluster from the
+   * <code>ResourceManager</code>.
+   * </p>
+   *
+   * @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)
+   *
+   * <p>Setting any of the parameters to null, would just disable that
+   * filter</p>
+   *
+   * @param scope {@link ApplicationsRequestScope} to filter by
+   * @param users list of users to filter by
+   * @param queues list of scheduler queues to filter by
+   * @param applicationTypes types of applications
+   * @param applicationTags application tags to filter by
+   * @param applicationStates application states to filter by
+   * @param startRange range of application start times to filter by
+   * @param finishRange range of application finish times to filter by
+   * @param limit number of applications to limit to
+   * @return {@link GetApplicationsRequest} to be used with
+   * {@link ApplicationClientProtocol#getApplications(GetApplicationsRequest)}
+   */
+  @Public
+  @Stable
+  public static GetApplicationsRequest newInstance(
+      ApplicationsRequestScope scope,
+      Set<String> users,
+      Set<String> queues,
+      Set<String> applicationTypes,
+      Set<String> applicationTags,
+      EnumSet<YarnApplicationState> applicationStates,
+      LongRange startRange,
+      LongRange finishRange,
+      Long limit) {
+    GetApplicationsRequest request =
+        Records.newRecord(GetApplicationsRequest.class);
+    if (scope != null) {
+      request.setScope(scope);
+    }
+    request.setUsers(users);
+    request.setQueues(queues);
+    request.setApplicationTypes(applicationTypes);
+    request.setApplicationTags(applicationTags);
+    request.setApplicationStates(applicationStates);
+    if (startRange != null) {
+      request.setStartRange(
+          startRange.getMinimumLong(), startRange.getMaximumLong());
+    }
+    if (finishRange != null) {
+      request.setFinishRange(
+          finishRange.getMinimumLong(), finishRange.getMaximumLong());
+    }
+    if (limit != null) {
+      request.setLimit(limit);
+    }
+    return request;
+  }
+
+  /**
+   * <p>
+   * The request from clients to get a report of Applications matching the
+   * giving application types in the cluster from the
+   * <code>ResourceManager</code>.
+   * </p>
+   *
+   * @param scope {@link ApplicationsRequestScope} to filter by
+   * @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)
+   */
+  @Public
+  @Stable
+  public static GetApplicationsRequest newInstance(
+      ApplicationsRequestScope scope) {
+    GetApplicationsRequest request =
+        Records.newRecord(GetApplicationsRequest.class);
+    request.setScope(scope);
+    return request;
+  }
+
   /**
    * <p>
    * The request from clients to get a report of Applications matching the
@@ -257,4 +336,40 @@ public abstract class GetApplicationsRequest {
   @Private
   @Unstable
   public abstract void setFinishRange(long begin, long end);
+
+  /**
+   * Get the tags to filter applications on
+   *
+   * @return list of tags to filter on
+   */
+  @Private
+  @Unstable
+  public abstract Set<String> getApplicationTags();
+
+  /**
+   * Set the list of tags to filter applications on
+   *
+   * @param tags list of tags to filter on
+   */
+  @Private
+  @Unstable
+  public abstract void setApplicationTags(Set<String> tags);
+
+  /**
+   * Get the {@link ApplicationsRequestScope} of applications to be filtered.
+   *
+   * @return {@link ApplicationsRequestScope} of applications to return.
+   */
+  @Private
+  @Unstable
+  public abstract ApplicationsRequestScope getScope();
+
+  /**
+   * Set the {@link ApplicationsRequestScope} of applications to filter.
+   *
+   * @param scope scope to use for filtering applications
+   */
+  @Private
+  @Unstable
+  public abstract void setScope(ApplicationsRequestScope scope);
 }

+ 14 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java

@@ -25,6 +25,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import org.apache.hadoop.yarn.util.Records;
 
+import java.util.Set;
+
 /**
  * <p><code>ApplicationReport</code> is a report of an application.</p>
  *
@@ -321,6 +323,18 @@ public abstract class ApplicationReport {
   @Unstable
   public abstract void setApplicationType(String applicationType);
 
+  /**
+   * Get all tags corresponding to the application
+   * @return Application's tags
+   */
+  @Public
+  @Stable
+  public abstract Set<String> getApplicationTags();
+
+  @Private
+  @Unstable
+  public abstract void setApplicationTags(Set<String> tags);
+
   @Private
   @Stable
   public abstract void setAMRMToken(Token amRmToken);

+ 25 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java

@@ -25,8 +25,11 @@ import org.apache.hadoop.classification.InterfaceStability.Stable;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.util.Records;
 
+import java.util.Set;
+
 /**
  * <p><code>ApplicationSubmissionContext</code> represents all of the
  * information needed by the <code>ResourceManager</code> to launch 
@@ -284,7 +287,6 @@ public abstract class ApplicationSubmissionContext {
   @Stable
   public abstract void setApplicationType(String applicationType);
 
-
   /**
    * Get the flag which indicates whether to keep containers across application
    * attempts or not.
@@ -314,4 +316,26 @@ public abstract class ApplicationSubmissionContext {
   @Stable
   public abstract void setKeepContainersAcrossApplicationAttempts(
       boolean keepContainers);
+
+  /**
+   * Get tags for the application
+   *
+   * @return the application tags
+   */
+  @Public
+  @Stable
+  public abstract Set<String> getApplicationTags();
+
+  /**
+   * Set tags for the application. A maximum of
+   * {@link YarnConfiguration#APPLICATION_MAX_TAGS} are allowed
+   * per application. Each tag can be at most
+   * {@link YarnConfiguration#APPLICATION_MAX_TAG_LENGTH}
+   * characters, and can contain only ASCII characters.
+   *
+   * @param tags tags to set
+   */
+  @Public
+  @Stable
+  public abstract void setApplicationTags(Set<String> tags);
 }

+ 109 - 22
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/apptimeline/ATSEntity.java

@@ -22,6 +22,7 @@ import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Map.Entry;
 
 import javax.xml.bind.annotation.XmlAccessType;
 import javax.xml.bind.annotation.XmlAccessorType;
@@ -49,14 +50,14 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
 @XmlAccessorType(XmlAccessType.NONE)
 @Public
 @Unstable
-public class ATSEntity {
+public class ATSEntity implements Comparable<ATSEntity> {
 
   private String entityType;
   private String entityId;
-  private long startTime;
+  private Long startTime;
   private List<ATSEvent> events = new ArrayList<ATSEvent>();
-  private Map<String, List<Object>> relatedEntities =
-      new HashMap<String, List<Object>>();
+  private Map<String, List<String>> relatedEntities =
+      new HashMap<String, List<String>>();
   private Map<String, Object> primaryFilters =
       new HashMap<String, Object>();
   private Map<String, Object> otherInfo =
@@ -112,7 +113,7 @@ public class ATSEntity {
    * @return the start time of the entity
    */
   @XmlElement(name = "starttime")
-  public long getStartTime() {
+  public Long getStartTime() {
     return startTime;
   }
 
@@ -122,7 +123,7 @@ public class ATSEntity {
    * @param startTime
    *          the start time of the entity
    */
-  public void setStartTime(long startTime) {
+  public void setStartTime(Long startTime) {
     this.startTime = startTime;
   }
 
@@ -172,26 +173,25 @@ public class ATSEntity {
    * @return the related entities
    */
   @XmlElement(name = "relatedentities")
-  public Map<String, List<Object>> getRelatedEntities() {
+  public Map<String, List<String>> getRelatedEntities() {
     return relatedEntities;
   }
 
   /**
-   * Add a list of entity of the same type to the existing related entity map
+   * Add an entity to the existing related entity map
    * 
    * @param entityType
    *          the entity type
-   * @param entityIds
-   *          a list of entity Ids
+   * @param entityId
+   *          the entity Id
    */
-  public void addRelatedEntity(String entityType, List<Object> entityIds) {
-    List<Object> thisRelatedEntity = relatedEntities.get(entityType);
-    relatedEntities.put(entityType, entityIds);
+  public void addRelatedEntity(String entityType, String entityId) {
+    List<String> thisRelatedEntity = relatedEntities.get(entityType);
     if (thisRelatedEntity == null) {
-      relatedEntities.put(entityType, entityIds);
-    } else {
-      thisRelatedEntity.addAll(entityIds);
+      thisRelatedEntity = new ArrayList<String>();
+      relatedEntities.put(entityType, thisRelatedEntity);
     }
+    thisRelatedEntity.add(entityId);
   }
 
   /**
@@ -200,11 +200,10 @@ public class ATSEntity {
    * @param relatedEntities
    *          a map of related entities
    */
-  public void addRelatedEntities(
-      Map<String, List<Object>> relatedEntities) {
-    for (Map.Entry<String, List<Object>> relatedEntity : relatedEntities
-        .entrySet()) {
-      List<Object> thisRelatedEntity =
+  public void addRelatedEntities(Map<String, List<String>> relatedEntities) {
+    for (Entry<String, List<String>> relatedEntity :
+        relatedEntities.entrySet()) {
+      List<String> thisRelatedEntity =
           this.relatedEntities.get(relatedEntity.getKey());
       if (thisRelatedEntity == null) {
         this.relatedEntities.put(
@@ -222,7 +221,7 @@ public class ATSEntity {
    *          a map of related entities
    */
   public void setRelatedEntities(
-      Map<String, List<Object>> relatedEntities) {
+      Map<String, List<String>> relatedEntities) {
     this.relatedEntities = relatedEntities;
   }
 
@@ -311,4 +310,92 @@ public class ATSEntity {
     this.otherInfo = otherInfo;
   }
 
+  @Override
+  public int hashCode() {
+    // generated by eclipse
+    final int prime = 31;
+    int result = 1;
+    result = prime * result + ((entityId == null) ? 0 : entityId.hashCode());
+    result =
+        prime * result + ((entityType == null) ? 0 : entityType.hashCode());
+    result = prime * result + ((events == null) ? 0 : events.hashCode());
+    result = prime * result + ((otherInfo == null) ? 0 : otherInfo.hashCode());
+    result =
+        prime * result
+            + ((primaryFilters == null) ? 0 : primaryFilters.hashCode());
+    result =
+        prime * result
+            + ((relatedEntities == null) ? 0 : relatedEntities.hashCode());
+    result = prime * result + ((startTime == null) ? 0 : startTime.hashCode());
+    return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    // generated by eclipse
+    if (this == obj)
+      return true;
+    if (obj == null)
+      return false;
+    if (getClass() != obj.getClass())
+      return false;
+    ATSEntity other = (ATSEntity) obj;
+    if (entityId == null) {
+      if (other.entityId != null)
+        return false;
+    } else if (!entityId.equals(other.entityId))
+      return false;
+    if (entityType == null) {
+      if (other.entityType != null)
+        return false;
+    } else if (!entityType.equals(other.entityType))
+      return false;
+    if (events == null) {
+      if (other.events != null)
+        return false;
+    } else if (!events.equals(other.events))
+      return false;
+    if (otherInfo == null) {
+      if (other.otherInfo != null)
+        return false;
+    } else if (!otherInfo.equals(other.otherInfo))
+      return false;
+    if (primaryFilters == null) {
+      if (other.primaryFilters != null)
+        return false;
+    } else if (!primaryFilters.equals(other.primaryFilters))
+      return false;
+    if (relatedEntities == null) {
+      if (other.relatedEntities != null)
+        return false;
+    } else if (!relatedEntities.equals(other.relatedEntities))
+      return false;
+    if (startTime == null) {
+      if (other.startTime != null)
+        return false;
+    } else if (!startTime.equals(other.startTime))
+      return false;
+    return true;
+  }
+
+  @Override
+  public int compareTo(ATSEntity other) {
+    int comparison = entityType.compareTo(other.entityType);
+    if (comparison == 0) {
+      long thisStartTime =
+          startTime == null ? Long.MIN_VALUE : startTime;
+      long otherStartTime =
+          other.startTime == null ? Long.MIN_VALUE : other.startTime;
+      if (thisStartTime > otherStartTime) {
+        return -1;
+      } else if (thisStartTime < otherStartTime) {
+        return 1;
+      } else {
+        return entityId.compareTo(other.entityId);
+      }
+    } else {
+      return comparison;
+    }
+  }
+
 }

+ 39 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/apptimeline/ATSEvent.java

@@ -39,7 +39,7 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
 @XmlAccessorType(XmlAccessType.NONE)
 @Public
 @Unstable
-public class ATSEvent {
+public class ATSEvent implements Comparable<ATSEvent> {
 
   private long timestamp;
   private String eventType;
@@ -131,4 +131,42 @@ public class ATSEvent {
     this.eventInfo = eventInfo;
   }
 
+  @Override
+  public int compareTo(ATSEvent other) {
+    if (timestamp > other.timestamp) {
+      return -1;
+    } else if (timestamp < other.timestamp) {
+      return 1;
+    } else {
+      return eventType.compareTo(other.eventType);
+    }
+  }
+
+  @Override
+  public boolean equals(Object o) {
+    if (this == o)
+      return true;
+    if (o == null || getClass() != o.getClass())
+      return false;
+
+    ATSEvent atsEvent = (ATSEvent) o;
+
+    if (timestamp != atsEvent.timestamp)
+      return false;
+    if (!eventType.equals(atsEvent.eventType))
+      return false;
+    if (eventInfo != null ? !eventInfo.equals(atsEvent.eventInfo) :
+        atsEvent.eventInfo != null)
+      return false;
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    int result = (int) (timestamp ^ (timestamp >>> 32));
+    result = 31 * result + eventType.hashCode();
+    result = 31 * result + (eventInfo != null ? eventInfo.hashCode() : 0);
+    return result;
+  }
 }

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/apptimeline/ATSEvents.java

@@ -160,7 +160,7 @@ public class ATSEvents {
      * @param event
      *          a single event
      */
-    public void addEntity(ATSEvent event) {
+    public void addEvent(ATSEvent event) {
       events.add(event);
     }
 

+ 163 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/apptimeline/ATSPutErrors.java

@@ -0,0 +1,163 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.api.records.apptimeline;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * A class that holds a list of put errors.  This is the response returned
+ * when a list of {@link ATSEntity} objects is added to the application
+ * timeline.  If there are errors in storing individual entity objects,
+ * they will be indicated in the list of errors.
+ */
+@XmlRootElement(name = "errors")
+@XmlAccessorType(XmlAccessType.NONE)
+@Public
+@Unstable
+public class ATSPutErrors {
+
+  private List<ATSPutError> errors = new ArrayList<ATSPutError>();
+
+  public ATSPutErrors() {
+
+  }
+
+  /**
+   * Get a list of {@link ATSPutError} instances
+   *
+   * @return a list of {@link ATSPutError} instances
+   */
+  @XmlElement(name = "errors")
+  public List<ATSPutError> getErrors() {
+    return errors;
+  }
+
+  /**
+   * Add a single {@link ATSPutError} instance into the existing list
+   *
+   * @param error
+   *          a single {@link ATSPutError} instance
+   */
+  public void addError(ATSPutError error) {
+    errors.add(error);
+  }
+
+  /**
+   * Add a list of {@link ATSPutError} instances into the existing list
+   *
+   * @param errors
+   *          a list of {@link ATSPutError} instances
+   */
+  public void addErrors(List<ATSPutError> errors) {
+    this.errors.addAll(errors);
+  }
+
+  /**
+   * Set the list to the given list of {@link ATSPutError} instances
+   *
+   * @param errors
+   *          a list of {@link ATSPutError} instances
+   */
+  public void setErrors(List<ATSPutError> errors) {
+    this.errors.clear();
+    this.errors.addAll(errors);
+  }
+
+  /**
+   * A class that holds the error code for one entity.
+   */
+  @XmlRootElement(name = "error")
+  @XmlAccessorType(XmlAccessType.NONE)
+  @Public
+  @Unstable
+  public static class ATSPutError {
+    private String entityId;
+    private String entityType;
+    private Integer errorCode;
+
+    /**
+     * Get the entity Id
+     *
+     * @return the entity Id
+     */
+    @XmlElement(name = "entity")
+    public String getEntityId() {
+      return entityId;
+    }
+
+    /**
+     * Set the entity Id
+     *
+     * @param entityId
+     *          the entity Id
+     */
+    public void setEntityId(String entityId) {
+      this.entityId = entityId;
+    }
+
+    /**
+     * Get the entity type
+     *
+     * @return the entity type
+     */
+    @XmlElement(name = "entitytype")
+    public String getEntityType() {
+      return entityType;
+    }
+
+    /**
+     * Set the entity type
+     *
+     * @param entityType
+     *          the entity type
+     */
+    public void setEntityType(String entityType) {
+      this.entityType = entityType;
+    }
+
+    /**
+     * Get the error code
+     *
+     * @return an error code
+     */
+    @XmlElement(name = "errorcode")
+    public Integer getErrorCode() {
+      return errorCode;
+    }
+
+    /**
+     * Set the error code to the given error code
+     *
+     * @param errorCode
+     *          an error code
+     */
+    public void setErrorCode(Integer errorCode) {
+      this.errorCode = errorCode;
+    }
+
+  }
+
+}

+ 64 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/ConfigurationProvider.java

@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.conf;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+
+@Private
+@Unstable
+/**
+ * Base class to implement ConfigurationProvider.
+ * Real ConfigurationProvider implementations need to derive from it and
+ * implement load methods to actually load the configuration.
+ */
+public abstract class ConfigurationProvider {
+
+  public void init(Configuration conf) throws Exception {
+    initInternal(conf);
+  }
+
+  public void close() throws Exception {
+    closeInternal();
+  }
+
+  /**
+   * Get the configuration.
+   * @param name The configuration file name
+   * @return configuration
+   * @throws YarnException
+   * @throws IOException
+   */
+  public abstract Configuration getConfiguration(String name)
+      throws YarnException, IOException;
+
+  /**
+   * Derived classes initialize themselves using this method.
+   */
+  public abstract void initInternal(Configuration conf) throws Exception;
+
+  /**
+   * Derived classes close themselves using this method.
+   */
+  public abstract void closeInternal() throws Exception;
+}

+ 57 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/ConfigurationProviderFactory.java

@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.conf;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+
+@Private
+@Unstable
+/**
+ * Factory for {@link ConfigurationProvider} implementations.
+ */
+public class ConfigurationProviderFactory {
+  /**
+   * Creates an instance of {@link ConfigurationProvider} using given
+   * configuration.
+   * @param conf
+   * @return configurationProvider
+   */
+  @SuppressWarnings("unchecked")
+  public static ConfigurationProvider
+      getConfigurationProvider(Configuration conf) {
+    Class<? extends ConfigurationProvider> defaultProviderClass;
+    try {
+      defaultProviderClass = (Class<? extends ConfigurationProvider>)
+          Class.forName(
+              YarnConfiguration.DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS);
+    } catch (Exception e) {
+      throw new YarnRuntimeException(
+          "Invalid default configuration provider class"
+              + YarnConfiguration.DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS, e);
+    }
+    ConfigurationProvider configurationProvider = ReflectionUtils.newInstance(
+        conf.getClass(YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
+            defaultProviderClass, ConfigurationProvider.class), conf);
+    return configurationProvider;
+  }
+}

+ 43 - 6
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/HAUtil.java

@@ -21,10 +21,13 @@ package org.apache.hadoop.yarn.conf;
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 
+import java.net.InetSocketAddress;
 import java.util.Collection;
 
 @InterfaceAudience.Private
@@ -108,8 +111,7 @@ public class HAUtil {
           String errmsg = iae.getMessage();
           if (confKey == null) {
             // Error at addSuffix
-            errmsg = getInvalidValueMessage(YarnConfiguration.RM_HA_ID,
-              getRMHAId(conf));
+            errmsg = getInvalidValueMessage(YarnConfiguration.RM_HA_ID, id);
           }
           throwBadConfigurationException(errmsg);
         }
@@ -122,10 +124,18 @@ public class HAUtil {
   }
 
   private static void verifyAndSetCurrentRMHAId(Configuration conf) {
-    String rmId = conf.getTrimmed(YarnConfiguration.RM_HA_ID);
+    String rmId = getRMHAId(conf);
     if (rmId == null) {
-      throwBadConfigurationException(
-        getNeedToSetValueMessage(YarnConfiguration.RM_HA_ID));
+      StringBuilder msg = new StringBuilder();
+      msg.append("Can not find valid RM_HA_ID. None of ");
+      for (String id : conf
+          .getTrimmedStringCollection(YarnConfiguration.RM_HA_IDS)) {
+        msg.append(addSuffix(YarnConfiguration.RM_ADDRESS, id) + " ");
+      }
+      msg.append(" are matching" +
+          " the local address OR " + YarnConfiguration.RM_HA_ID + " is not" +
+          " specified in HA Configuration");
+      throwBadConfigurationException(msg.toString());
     } else {
       Collection<String> ids = getRMHAIds(conf);
       if (!ids.contains(rmId)) {
@@ -179,7 +189,34 @@ public class HAUtil {
    * @return RM Id on success
    */
   public static String getRMHAId(Configuration conf) {
-    return conf.get(YarnConfiguration.RM_HA_ID);
+    int found = 0;
+    String currentRMId = conf.getTrimmed(YarnConfiguration.RM_HA_ID);
+    if(currentRMId == null) {
+      for(String rmId : getRMHAIds(conf)) {
+        String key = addSuffix(YarnConfiguration.RM_ADDRESS, rmId);
+        String addr = conf.get(key);
+        if (addr == null) {
+          continue;
+        }
+        InetSocketAddress s;
+        try {
+          s = NetUtils.createSocketAddr(addr);
+        } catch (Exception e) {
+          LOG.warn("Exception in creating socket address " + addr, e);
+          continue;
+        }
+        if (!s.isUnresolved() && NetUtils.isLocalAddress(s.getAddress())) {
+          currentRMId = rmId.trim();
+          found++;
+        }
+      }
+    }
+    if (found > 1) { // Only one address must match the local address
+      String msg = "The HA Configuration has multiple addresses that match "
+          + "local node's address.";
+      throw new HadoopIllegalArgumentException(msg);
+    }
+    return currentRMId;
   }
 
   @VisibleForTesting

+ 39 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java

@@ -25,6 +25,7 @@ import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability.Evolving;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -37,8 +38,26 @@ import org.apache.hadoop.yarn.api.ApplicationConstants;
 @Evolving
 public class YarnConfiguration extends Configuration {
 
+  @Private
+  public static final String CS_CONFIGURATION_FILE= "capacity-scheduler.xml";
+
+  @Private
+  public static final String HADOOP_POLICY_CONFIGURATION_FILE =
+      "hadoop-policy.xml";
+
+  @Private
+  public static final String YARN_SITE_XML_FILE = "yarn-site.xml";
+
+  @Private
+  public static final String CORE_SITE_CONFIGURATION_FILE = "core-site.xml";
+
+  @Evolving
+  public static final int APPLICATION_MAX_TAGS = 10;
+
+  @Evolving
+  public static final int APPLICATION_MAX_TAG_LENGTH = 100;
+
   private static final String YARN_DEFAULT_XML_FILE = "yarn-default.xml";
-  private static final String YARN_SITE_XML_FILE = "yarn-site.xml";
 
   static {
     Configuration.addDefaultResource(YARN_DEFAULT_XML_FILE);
@@ -329,6 +348,16 @@ public class YarnConfiguration extends Configuration {
   public static final String RM_HA_IDS = RM_HA_PREFIX + "rm-ids";
   public static final String RM_HA_ID = RM_HA_PREFIX + "id";
 
+  /** Store the related configuration files in File System */
+  public static final String FS_BASED_RM_CONF_STORE = RM_PREFIX
+      + "configuration.file-system-based-store";
+  public static final String DEFAULT_FS_BASED_RM_CONF_STORE = "/yarn/conf";
+
+  public static final String RM_CONFIGURATION_PROVIDER_CLASS = RM_PREFIX
+      + "configuration.provider-class";
+  public static final String DEFAULT_RM_CONFIGURATION_PROVIDER_CLASS =
+      "org.apache.hadoop.yarn.LocalConfigurationProvider";
+
   @Private
   public static final List<String> RM_SERVICES_ADDRESS_CONF_KEYS =
       Collections.unmodifiableList(Arrays.asList(
@@ -999,6 +1028,15 @@ public class YarnConfiguration extends Configuration {
   public static final String AHS_WEBAPP_SPNEGO_KEYTAB_FILE_KEY =
       AHS_PREFIX + "webapp.spnego-keytab-file";
 
+  ////////////////////////////////
+  // ATS Configs
+  ////////////////////////////////
+
+  public static final String ATS_PREFIX = YARN_PREFIX + "ats.";
+
+  /** ATS store class */
+  public static final String ATS_STORE = ATS_PREFIX + "store.class";
+
   ////////////////////////////////
   // Other Configs
   ////////////////////////////////

+ 2 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto

@@ -190,6 +190,7 @@ message ApplicationReportProto {
   optional float progress = 17;
   optional string applicationType = 18;
   optional hadoop.common.TokenProto am_rm_token = 19;
+  repeated string applicationTags = 20;
 }
 
 message ApplicationAttemptReportProto {
@@ -287,6 +288,7 @@ message ApplicationSubmissionContextProto {
   optional ResourceProto resource = 9;
   optional string applicationType = 10 [default = "YARN"];
   optional bool keep_containers_across_application_attempts = 11 [default = false];
+  repeated string applicationTags = 12;
 }
 
 enum ApplicationAccessTypeProto {

+ 8 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto

@@ -136,6 +136,12 @@ message MoveApplicationAcrossQueuesRequestProto {
 message MoveApplicationAcrossQueuesResponseProto {
 }
 
+enum ApplicationsRequestScopeProto {
+  ALL = 0;
+  VIEWABLE = 1;
+  OWN = 2;
+}
+
 message GetApplicationsRequestProto {
   repeated string application_types = 1;
   repeated YarnApplicationStateProto application_states = 2;
@@ -146,6 +152,8 @@ message GetApplicationsRequestProto {
   optional int64 start_end = 7;
   optional int64 finish_begin = 8;
   optional int64 finish_end = 9;
+  repeated string applicationTags = 10;
+  optional ApplicationsRequestScopeProto scope = 11 [default = ALL];
 }
 
 message GetApplicationsResponseProto {

+ 8 - 7
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java

@@ -232,7 +232,6 @@ public class ApplicationMaster {
   private static final String shellArgsPath = "shellArgs";
 
   private volatile boolean done;
-  private volatile boolean success;
 
   private ByteBuffer allTokens;
 
@@ -254,8 +253,8 @@ public class ApplicationMaster {
       if (!doRun) {
         System.exit(0);
       }
-      result = appMaster.run();
-      appMaster.finish();
+      appMaster.run();
+      result = appMaster.finish();
     } catch (Throwable t) {
       LOG.fatal("Error running ApplicationMaster", t);
       System.exit(1);
@@ -480,7 +479,7 @@ public class ApplicationMaster {
    * @throws IOException
    */
   @SuppressWarnings({ "unchecked" })
-  public boolean run() throws YarnException, IOException {
+  public void run() throws YarnException, IOException {
     LOG.info("Starting ApplicationMaster");
 
     Credentials credentials =
@@ -561,7 +560,6 @@ public class ApplicationMaster {
       amRMClient.addContainerRequest(containerAsk);
     }
     numRequestedContainers.set(numTotalContainersToRequest);
-    return success;
   }
 
   @VisibleForTesting
@@ -569,7 +567,8 @@ public class ApplicationMaster {
     return new NMCallbackHandler(this);
   }
 
-  protected void finish() {
+  @VisibleForTesting
+  protected boolean finish() {
     // wait for completion.
     while (!done
         && (numCompletedContainers.get() != numTotalContainers)) {
@@ -600,7 +599,7 @@ public class ApplicationMaster {
 
     FinalApplicationStatus appStatus;
     String appMessage = null;
-    success = true;
+    boolean success = true;
     if (numFailedContainers.get() == 0 && 
         numCompletedContainers.get() == numTotalContainers) {
       appStatus = FinalApplicationStatus.SUCCEEDED;
@@ -621,6 +620,8 @@ public class ApplicationMaster {
     }
     
     amRMClient.stop();
+
+    return success;
   }
   
   private class RMCallbackHandler implements AMRMClientAsync.CallbackHandler {

+ 5 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/ContainerLaunchFailAppMaster.java

@@ -18,13 +18,13 @@
 
 package org.apache.hadoop.yarn.applications.distributedshell;
 
+import java.nio.ByteBuffer;
+import java.util.Map;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 
-import java.nio.ByteBuffer;
-import java.util.Map;
-
 public class ContainerLaunchFailAppMaster extends ApplicationMaster {
 
   private static final Log LOG =
@@ -66,8 +66,8 @@ public class ContainerLaunchFailAppMaster extends ApplicationMaster {
       if (!doRun) {
         System.exit(0);
       }
-      result = appMaster.run();
-      appMaster.finish();
+      appMaster.run();
+      result = appMaster.finish();
     } catch (Throwable t) {
       LOG.fatal("Error running ApplicationMaster", t);
       System.exit(1);

+ 7 - 6
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDSFailedAppMaster.java

@@ -29,8 +29,8 @@ public class TestDSFailedAppMaster extends ApplicationMaster {
   private static final Log LOG = LogFactory.getLog(TestDSFailedAppMaster.class);
 
   @Override
-  public boolean run() throws YarnException, IOException {
-    boolean res = super.run();
+  public void run() throws YarnException, IOException {
+    super.run();
 
     // for the 2nd attempt.
     if (appAttemptID.getAttemptId() == 2) {
@@ -39,11 +39,12 @@ public class TestDSFailedAppMaster extends ApplicationMaster {
       // numRequestedContainers should be set to 0.
       if (numAllocatedContainers.get() != 1
           || numRequestedContainers.get() != 0) {
-        LOG.info("Application Master failed. exiting");
+        LOG.info("NumAllocatedContainers is " + numAllocatedContainers.get()
+            + " and NumRequestedContainers is " + numAllocatedContainers.get()
+            + ".Application Master failed. exiting");
         System.exit(200);
       }
     }
-    return res;
   }
 
   public static void main(String[] args) {
@@ -54,7 +55,7 @@ public class TestDSFailedAppMaster extends ApplicationMaster {
       if (!doRun) {
         System.exit(0);
       }
-      result = appMaster.run();
+      appMaster.run();
       if (appMaster.appAttemptID.getAttemptId() == 1) {
         try {
           // sleep some time, wait for the AM to launch a container.
@@ -63,7 +64,7 @@ public class TestDSFailedAppMaster extends ApplicationMaster {
         // fail the first am.
         System.exit(100);
       }
-      appMaster.finish();
+      result = appMaster.finish();
     } catch (Throwable t) {
       System.exit(1);
     }

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java

@@ -197,7 +197,7 @@ public class ApplicationCLI extends YarnCLI {
         listApplications(appTypes, appStates);
       }
     } else if (cliParser.hasOption(KILL_CMD)) {
-      if (args.length != 2) {
+      if (args.length != 3) {
         printUsage(opts);
         return exitCode;
       }

+ 4 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java

@@ -681,7 +681,7 @@ public class TestYarnCLI {
     sysOutStream.reset();
     ApplicationId applicationId = ApplicationId.newInstance(1234, 5);
     result =
-        cli.run(new String[] { "-kill", applicationId.toString(), "args" });
+        cli.run(new String[] {"application", "-kill", applicationId.toString(), "args" });
     verify(spyCli).printUsage(any(Options.class));
     Assert.assertEquals(createApplicationCLIHelpMessage(),
         sysOutStream.toString());
@@ -717,7 +717,7 @@ public class TestYarnCLI {
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
     when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
         newApplicationReport2);
-    int result = cli.run(new String[] { "-kill", applicationId.toString() });
+    int result = cli.run(new String[] { "application","-kill", applicationId.toString() });
     assertEquals(0, result);
     verify(client, times(0)).killApplication(any(ApplicationId.class));
     verify(sysOut).println(
@@ -730,7 +730,7 @@ public class TestYarnCLI {
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
     when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
         newApplicationReport);
-    result = cli.run(new String[] { "-kill", applicationId.toString() });
+    result = cli.run(new String[] { "application","-kill", applicationId.toString() });
     assertEquals(0, result);
     verify(client).killApplication(any(ApplicationId.class));
     verify(sysOut).println("Killing application application_1234_0005");
@@ -740,7 +740,7 @@ public class TestYarnCLI {
         .getApplicationReport(applicationId);
     cli = createAndGetAppCLI();
     try {
-      cli.run(new String[] { "-kill", applicationId.toString() });
+      cli.run(new String[] { "application","-kill", applicationId.toString() });
       Assert.fail();
     } catch (Exception ex) {
       Assert.assertTrue(ex instanceof ApplicationNotFoundException);

+ 72 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/FileSystemBasedConfigurationProvider.java

@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.yarn.conf.ConfigurationProvider;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+
+@Private
+@Unstable
+public class FileSystemBasedConfigurationProvider
+    extends ConfigurationProvider {
+
+  private static final Log LOG = LogFactory
+      .getLog(FileSystemBasedConfigurationProvider.class);
+  private FileSystem fs;
+  private Path configDir;
+
+  @Override
+  public synchronized Configuration getConfiguration(String name)
+      throws IOException, YarnException {
+    Path configPath = new Path(this.configDir, name);
+    if (!fs.exists(configPath)) {
+      throw new YarnException("Can not find Configuration: " + name + " in "
+          + configDir);
+    }
+    Configuration conf = new Configuration(false);
+    conf.addResource(fs.open(configPath));
+    return conf;
+  }
+
+  @Override
+  public synchronized void initInternal(Configuration conf) throws Exception {
+    configDir =
+        new Path(conf.get(YarnConfiguration.FS_BASED_RM_CONF_STORE,
+            YarnConfiguration.DEFAULT_FS_BASED_RM_CONF_STORE));
+    fs = configDir.getFileSystem(conf);
+    if (!fs.exists(configDir)) {
+      fs.mkdirs(configDir);
+    }
+  }
+
+  @Override
+  public synchronized void closeInternal() throws Exception {
+    fs.close();
+  }
+}

+ 48 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/LocalConfigurationProvider.java

@@ -0,0 +1,48 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.yarn.conf.ConfigurationProvider;
+import org.apache.hadoop.yarn.exceptions.YarnException;
+
+@Private
+@Unstable
+public class LocalConfigurationProvider extends ConfigurationProvider {
+
+  @Override
+  public Configuration getConfiguration(String name)
+      throws IOException, YarnException {
+    return new Configuration();
+  }
+
+  @Override
+  public void initInternal(Configuration conf) throws Exception {
+    // Do nothing
+  }
+
+  @Override
+  public void closeInternal() throws Exception {
+    // Do nothing
+  }
+}

+ 61 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java

@@ -27,6 +27,7 @@ import java.util.Set;
 import org.apache.commons.lang.math.LongRange;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope;
 import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
@@ -49,6 +50,8 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest {
   Set<String> queues = null;
   long limit = Long.MAX_VALUE;
   LongRange start = null, finish = null;
+  private Set<String> applicationTags;
+  private ApplicationsRequestScope scope;
 
   public GetApplicationsRequestPBImpl() {
     builder = GetApplicationsRequestProto.newBuilder();
@@ -112,6 +115,12 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest {
           };
       builder.addAllApplicationStates(iterable);
     }
+    if (this.applicationTags != null && !this.applicationTags.isEmpty()) {
+      builder.addAllApplicationTags(this.applicationTags);
+    }
+    if (this.scope != null) {
+      builder.setScope(ProtoUtils.convertToProtoFormat(scope));
+    }
   }
 
   private void addLocalApplicationTypesToProto() {
@@ -187,12 +196,64 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest {
     this.applicationTypes = applicationTypes;
   }
 
+  private void initApplicationTags() {
+    if (this.applicationTags != null) {
+      return;
+    }
+    GetApplicationsRequestProtoOrBuilder p = viaProto ? proto : builder;
+    this.applicationTags = new HashSet<String>();
+    this.applicationTags.addAll(p.getApplicationTagsList());
+  }
+
+  @Override
+  public Set<String> getApplicationTags() {
+    initApplicationTags();
+    return this.applicationTags;
+  }
+
+  @Override
+  public void setApplicationTags(Set<String> tags) {
+    maybeInitBuilder();
+    if (tags == null || tags.isEmpty()) {
+      builder.clearApplicationTags();
+      this.applicationTags = null;
+      return;
+    }
+    // Convert applicationTags to lower case and add
+    this.applicationTags = new HashSet<String>();
+    for (String tag : tags) {
+      this.applicationTags.add(tag.toLowerCase());
+    }
+  }
+
   @Override
   public EnumSet<YarnApplicationState> getApplicationStates() {
     initApplicationStates();
     return this.applicationStates;
   }
 
+  private void initScope() {
+    if (this.scope != null) {
+      return;
+    }
+    GetApplicationsRequestProtoOrBuilder p = viaProto ? proto : builder;
+    this.scope = ProtoUtils.convertFromProtoFormat(p.getScope());
+  }
+
+  @Override
+  public ApplicationsRequestScope getScope() {
+    initScope();
+    return this.scope;
+  }
+
+  public void setScope(ApplicationsRequestScope scope) {
+    maybeInitBuilder();
+    if (scope == null) {
+      builder.clearScope();
+    }
+    this.scope = scope;
+  }
+
   @Override
   public void setApplicationStates(EnumSet<YarnApplicationState> applicationStates) {
     maybeInitBuilder();
@@ -223,7 +284,6 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest {
     return this.users;
   }
 
-  @Override
   public void setUsers(Set<String> users) {
     maybeInitBuilder();
     if (users == null) {

+ 31 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java

@@ -38,6 +38,9 @@ import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationStateProto;
 
 import com.google.protobuf.TextFormat;
 
+import java.util.HashSet;
+import java.util.Set;
+
 @Private
 @Unstable
 public class ApplicationReportPBImpl extends ApplicationReport {
@@ -49,6 +52,7 @@ public class ApplicationReportPBImpl extends ApplicationReport {
   private ApplicationAttemptId currentApplicationAttemptId;
   private Token clientToAMToken = null;
   private Token amRmToken = null;
+  private Set<String> applicationTags = null;
 
   public ApplicationReportPBImpl() {
     builder = ApplicationReportProto.newBuilder();
@@ -245,6 +249,21 @@ public class ApplicationReportPBImpl extends ApplicationReport {
     return amRmToken;
   }
 
+  private void initApplicationTags() {
+    if (this.applicationTags != null) {
+      return;
+    }
+    ApplicationReportProtoOrBuilder p = viaProto ? proto : builder;
+    this.applicationTags = new HashSet<String>();
+    this.applicationTags.addAll(p.getApplicationTagsList());
+  }
+
+  @Override
+  public Set<String> getApplicationTags() {
+    initApplicationTags();
+    return this.applicationTags;
+  }
+
   @Override
   public void setApplicationId(ApplicationId applicationId) {
     maybeInitBuilder();
@@ -355,6 +374,15 @@ public class ApplicationReportPBImpl extends ApplicationReport {
     builder.setApplicationType((applicationType));
   }
 
+  @Override
+  public void setApplicationTags(Set<String> tags) {
+    maybeInitBuilder();
+    if (tags == null || tags.isEmpty()) {
+      builder.clearApplicationTags();
+    }
+    this.applicationTags = tags;
+  }
+
   @Override
   public void setDiagnostics(String diagnostics) {
     maybeInitBuilder();
@@ -450,6 +478,9 @@ public class ApplicationReportPBImpl extends ApplicationReport {
       builder.getAmRmToken())) {
       builder.setAmRmToken(convertToProtoFormat(this.amRmToken));
     }
+    if (this.applicationTags != null && !this.applicationTags.isEmpty()) {
+      builder.addAllApplicationTags(this.applicationTags);
+    }
   }
 
   private void mergeLocalToProto() {

+ 59 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java

@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.yarn.api.records.impl.pb;
 
+import com.google.common.base.CharMatcher;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -25,6 +26,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProtoOrBuilder;
@@ -34,6 +36,9 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
 
 import com.google.protobuf.TextFormat;
 
+import java.util.HashSet;
+import java.util.Set;
+
 @Private
 @Unstable
 public class ApplicationSubmissionContextPBImpl 
@@ -47,6 +52,7 @@ extends ApplicationSubmissionContext {
   private Priority priority = null;
   private ContainerLaunchContext amContainer = null;
   private Resource resource = null;
+  private Set<String> applicationTags = null;
 
   public ApplicationSubmissionContextPBImpl() {
     builder = ApplicationSubmissionContextProto.newBuilder();
@@ -100,6 +106,9 @@ extends ApplicationSubmissionContext {
             builder.getResource())) {
       builder.setResource(convertToProtoFormat(this.resource));
     }
+    if (this.applicationTags != null && !this.applicationTags.isEmpty()) {
+      builder.addAllApplicationTags(this.applicationTags);
+    }
   }
 
   private void mergeLocalToProto() {
@@ -196,7 +205,22 @@ extends ApplicationSubmissionContext {
     }
     return (p.getApplicationType());
   }
-  
+
+  private void initApplicationTags() {
+    if (this.applicationTags != null) {
+      return;
+    }
+    ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;
+    this.applicationTags = new HashSet<String>();
+    this.applicationTags.addAll(p.getApplicationTagsList());
+  }
+
+  @Override
+  public Set<String> getApplicationTags() {
+    initApplicationTags();
+    return this.applicationTags;
+  }
+
   @Override
   public void setQueue(String queue) {
     maybeInitBuilder();
@@ -217,6 +241,40 @@ extends ApplicationSubmissionContext {
     builder.setApplicationType((applicationType));
   }
 
+  private void checkTags(Set<String> tags) {
+    if (tags.size() > YarnConfiguration.APPLICATION_MAX_TAGS) {
+      throw new IllegalArgumentException("Too many applicationTags, a maximum of only "
+          + YarnConfiguration.APPLICATION_MAX_TAGS + " are allowed!");
+    }
+    for (String tag : tags) {
+      if (tag.length() > YarnConfiguration.APPLICATION_MAX_TAG_LENGTH) {
+        throw new IllegalArgumentException("Tag " + tag + " is too long, " +
+            "maximum allowed length of a tag is " +
+            YarnConfiguration.APPLICATION_MAX_TAG_LENGTH);
+      }
+      if (!CharMatcher.ASCII.matchesAllOf(tag)) {
+        throw new IllegalArgumentException("A tag can only have ASCII " +
+            "characters! Invalid tag - " + tag);
+      }
+    }
+  }
+
+  @Override
+  public void setApplicationTags(Set<String> tags) {
+    maybeInitBuilder();
+    if (tags == null || tags.isEmpty()) {
+      builder.clearApplicationTags();
+      this.applicationTags = null;
+      return;
+    }
+    checkTags(tags);
+    // Convert applicationTags to lower case and add
+    this.applicationTags = new HashSet<String>();
+    for (String tag : tags) {
+      this.applicationTags.add(tag.toLowerCase());
+    }
+  }
+
   @Override
   public ContainerLaunchContext getAMContainerSpec() {
     ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder;

+ 14 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoUtils.java

@@ -22,6 +22,7 @@ import java.nio.ByteBuffer;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.protocolrecords.ApplicationsRequestScope;
 import org.apache.hadoop.yarn.api.records.AMCommand;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
@@ -50,6 +51,7 @@ import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationAttemptStateProto;
 import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationStateProto;
 
 import com.google.protobuf.ByteString;
+import org.apache.hadoop.yarn.proto.YarnServiceProtos;
 
 @Private
 @Unstable
@@ -113,6 +115,18 @@ public class ProtoUtils {
         YARN_APPLICATION_ATTEMPT_STATE_PREFIX, ""));
   }
 
+  /*
+   * ApplicationsRequestScope
+   */
+  public static YarnServiceProtos.ApplicationsRequestScopeProto
+      convertToProtoFormat(ApplicationsRequestScope e) {
+    return YarnServiceProtos.ApplicationsRequestScopeProto.valueOf(e.name());
+  }
+  public static ApplicationsRequestScope convertFromProtoFormat
+      (YarnServiceProtos.ApplicationsRequestScopeProto e) {
+    return ApplicationsRequestScope.valueOf(e.name());
+  }
+
   /*
    * ApplicationResourceUsageReport
    */

+ 58 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/YarnJacksonJaxbJsonProvider.java

@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.webapp;
+
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.ext.Provider;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.codehaus.jackson.jaxrs.JacksonJaxbJsonProvider;
+import org.codehaus.jackson.map.AnnotationIntrospector;
+import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.annotate.JsonSerialize.Inclusion;
+import org.codehaus.jackson.xc.JaxbAnnotationIntrospector;
+
+import com.google.inject.Singleton;
+
+/**
+ * YARN's implementation of JAX-RS abstractions based on
+ * {@link JacksonJaxbJsonProvider} needed for deserialize JSON content to or
+ * serialize it from POJO objects.
+ */
+@Singleton
+@Provider
+@Unstable
+@Private
+public class YarnJacksonJaxbJsonProvider extends JacksonJaxbJsonProvider {
+
+  public YarnJacksonJaxbJsonProvider() {
+    super();
+  }
+
+  @Override
+  public ObjectMapper locateMapper(Class<?> type, MediaType mediaType) {
+    ObjectMapper mapper = super.locateMapper(type, mediaType);
+    AnnotationIntrospector introspector = new JaxbAnnotationIntrospector();
+    mapper.setAnnotationIntrospector(introspector);
+    mapper.getSerializationConfig()
+        .setSerializationInclusion(Inclusion.NON_NULL);
+    return mapper;
+  }
+}

+ 28 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml

@@ -130,7 +130,7 @@
   <property>
     <description>Are acls enabled.</description>
     <name>yarn.acl.enable</name>
-    <value>true</value>
+    <value>false</value>
   </property>
 
   <property>
@@ -395,7 +395,9 @@
       the Active mode when prompted to.
       (2) The nodes in the RM ensemble are listed in
       yarn.resourcemanager.ha.rm-ids
-      (3) The id of each RM comes from yarn.resourcemanager.ha.id
+      (3) The id of each RM either comes from yarn.resourcemanager.ha.id
+      if yarn.resourcemanager.ha.id is explicitly specified or can be
+      figured out by matching yarn.resourcemanager.address.{id} with local address
       (4) The actual physical addresses come from the configs of the pattern
       - {rpc-config}.{id}</description>
     <name>yarn.resourcemanager.ha.enabled</name>
@@ -442,7 +444,10 @@
 
   <property>
     <description>The id (string) of the current RM. When HA is enabled, this
-      is a required config. See description of yarn.resourcemanager.ha.enabled
+      is an optional config. The id of current RM can be set by explicitly
+      specifying yarn.resourcemanager.ha.id or figured out by matching
+      yarn.resourcemanager.address.{id} with local address
+      See description of yarn.resourcemanager.ha.enabled
       for full details on how this is used.</description>
     <name>yarn.resourcemanager.ha.id</name>
     <!--value>rm1</value-->
@@ -588,6 +593,18 @@
     <value>org.apache.hadoop.yarn.server.applicationhistoryservice.NullApplicationHistoryStore</value>
   </property>
 
+  <property>
+    <description>The class to use as the configuration provider.
+    If org.apache.hadoop.yarn.LocalConfigurationProvider is used,
+    the local configuration will be loaded.
+    If org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider is used,
+    the configuration which will be loaded should be uploaded to remote File system first.
+    </description>>
+    <name>yarn.resourcemanager.configuration.provider-class</name>
+    <value>org.apache.hadoop.yarn.LocalConfigurationProvider</value>
+    <!-- <value>org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider</value> -->
+  </property>
+
   <!-- Node Manager Configs -->
   <property>
     <description>The hostname of the NM.</description>
@@ -1120,6 +1137,14 @@
 	<value>org.apache.hadoop.yarn.server.applicationhistoryservice.FileSystemApplicationHistoryStore</value>
   </property>
 
+  <!-- Application Timeline Service's Configuration-->
+
+  <property>
+    <description>Store class name for application timeline store</description>
+    <name>yarn.ats.store.class</name>
+    <value>org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline.MemoryApplicationTimelineStore</value>
+  </property>
+
   <!-- Other configuration -->
   <property>
     <description>The interval that the yarn client library uses to poll the

+ 38 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/records/apptimeline/TestApplicationTimelineRecords.java

@@ -18,10 +18,13 @@
 
 package org.apache.hadoop.yarn.api.records.apptimeline;
 
+import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.List;
 
 import junit.framework.Assert;
 
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSPutErrors.ATSPutError;
 import org.junit.Test;
 
 public class TestApplicationTimelineRecords {
@@ -42,10 +45,8 @@ public class TestApplicationTimelineRecords {
         event.addEventInfo("key2", "val2");
         entity.addEvent(event);
       }
-      entity.addRelatedEntity(
-          "test ref type 1", Arrays.asList((Object) "test ref id 1"));
-      entity.addRelatedEntity(
-          "test ref type 2", Arrays.asList((Object) "test ref id 2"));
+      entity.addRelatedEntity("test ref type 1", "test ref id 1");
+      entity.addRelatedEntity("test ref type 2", "test ref id 2");
       entity.addPrimaryFilter("pkey1", "pval1");
       entity.addPrimaryFilter("pkey2", "pval2");
       entity.addOtherInfo("okey1", "oval1");
@@ -83,7 +84,7 @@ public class TestApplicationTimelineRecords {
         event.setEventType("event type " + i);
         event.addEventInfo("key1", "val1");
         event.addEventInfo("key2", "val2");
-        partEvents.addEntity(event);
+        partEvents.addEvent(event);
       }
       events.addEvent(partEvents);
     }
@@ -110,4 +111,36 @@ public class TestApplicationTimelineRecords {
     Assert.assertEquals(2, event22.getEventInfo().size());
   }
 
+  @Test
+  public void testATSPutErrors() {
+    ATSPutErrors atsPutErrors = new ATSPutErrors();
+    ATSPutError error1 = new ATSPutError();
+    error1.setEntityId("entity id 1");
+    error1.setEntityId("entity type 1");
+    error1.setErrorCode(1);
+    atsPutErrors.addError(error1);
+    List<ATSPutError> errors = new ArrayList<ATSPutError>();
+    errors.add(error1);
+    ATSPutError error2 = new ATSPutError();
+    error2.setEntityId("entity id 2");
+    error2.setEntityId("entity type 2");
+    error2.setErrorCode(2);
+    errors.add(error2);
+    atsPutErrors.addErrors(errors);
+
+    Assert.assertEquals(3, atsPutErrors.getErrors().size());
+    ATSPutError e = atsPutErrors.getErrors().get(0);
+    Assert.assertEquals(error1.getEntityId(), e.getEntityId());
+    Assert.assertEquals(error1.getEntityType(), e.getEntityType());
+    Assert.assertEquals(error1.getErrorCode(), e.getErrorCode());
+    e = atsPutErrors.getErrors().get(1);
+    Assert.assertEquals(error1.getEntityId(), e.getEntityId());
+    Assert.assertEquals(error1.getEntityType(), e.getEntityType());
+    Assert.assertEquals(error1.getErrorCode(), e.getErrorCode());
+    e = atsPutErrors.getErrors().get(2);
+    Assert.assertEquals(error2.getEntityId(), e.getEntityId());
+    Assert.assertEquals(error2.getEntityType(), e.getEntityType());
+    Assert.assertEquals(error2.getErrorCode(), e.getErrorCode());
+  }
+
 }

+ 17 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java

@@ -27,11 +27,14 @@ import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.service.Service;
 import org.apache.hadoop.util.ExitUtil;
+import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline.ApplicationTimelineStore;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline.MemoryApplicationTimelineStore;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.AHSWebApp;
 import org.apache.hadoop.yarn.webapp.WebApp;
 import org.apache.hadoop.yarn.webapp.WebApps;
@@ -51,6 +54,7 @@ public class ApplicationHistoryServer extends CompositeService {
 
   ApplicationHistoryClientService ahsClientService;
   ApplicationHistoryManager historyManager;
+  ApplicationTimelineStore timelineStore;
   private WebApp webApp;
 
   public ApplicationHistoryServer() {
@@ -63,6 +67,8 @@ public class ApplicationHistoryServer extends CompositeService {
     ahsClientService = createApplicationHistoryClientService(historyManager);
     addService(ahsClientService);
     addService((Service) historyManager);
+    timelineStore = createApplicationTimelineStore(conf);
+    addIfService(timelineStore);
     super.serviceInit(conf);
   }
 
@@ -135,6 +141,15 @@ public class ApplicationHistoryServer extends CompositeService {
     return new ApplicationHistoryManagerImpl();
   }
 
+  protected ApplicationTimelineStore createApplicationTimelineStore(
+      Configuration conf) {
+    // TODO: need to replace the MemoryApplicationTimelineStore.class with the
+    // LevelDB implementation
+    return ReflectionUtils.newInstance(conf.getClass(
+        YarnConfiguration.ATS_STORE, MemoryApplicationTimelineStore.class,
+        ApplicationTimelineStore.class), conf);
+  }
+
   protected void startWebApp() {
     String bindAddress = WebAppUtils.getAHSWebAppURLWithoutScheme(getConfig());
     LOG.info("Instantiating AHSWebApp at " + bindAddress);
@@ -148,7 +163,8 @@ public class ApplicationHistoryServer extends CompositeService {
               YarnConfiguration.AHS_WEBAPP_SPNEGO_USER_NAME_KEY)
             .withHttpSpnegoKeytabKey(
               YarnConfiguration.AHS_WEBAPP_SPNEGO_KEYTAB_FILE_KEY)
-            .at(bindAddress).start(new AHSWebApp(historyManager));
+            .at(bindAddress)
+            .start(new AHSWebApp(historyManager, timelineStore));
     } catch (Exception e) {
       String msg = "AHSWebApp failed to start.";
       LOG.error(msg, e);

+ 125 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/ApplicationTimelineReader.java

@@ -0,0 +1,125 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline;
+
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.Set;
+import java.util.SortedSet;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntities;
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntity;
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSEvents;
+
+/**
+ * This interface is for retrieving application timeline information.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public interface ApplicationTimelineReader {
+
+  /**
+   * Possible fields to retrieve for {@link #getEntities} and {@link
+   * #getEntity}.
+   */
+  enum Field {
+    EVENTS,
+    RELATED_ENTITIES,
+    PRIMARY_FILTERS,
+    OTHER_INFO,
+    LAST_EVENT_ONLY
+  }
+
+  /**
+   * Default limit for {@link #getEntities} and {@link #getEntityTimelines}.
+   */
+  final long DEFAULT_LIMIT = 100;
+
+  /**
+   * This method retrieves a list of entity information, {@link ATSEntity},
+   * sorted by the starting timestamp for the entity, descending.
+   *
+   * @param entityType The type of entities to return (required).
+   * @param limit A limit on the number of entities to return. If null,
+   *              defaults to {@link #DEFAULT_LIMIT}.
+   * @param windowStart The earliest start timestamp to retrieve (exclusive).
+   *                    If null, defaults to retrieving all entities until the
+   *                    limit is reached.
+   * @param windowEnd The latest start timestamp to retrieve (inclusive).
+   *                  If null, defaults to {@link Long#MAX_VALUE}
+   * @param primaryFilter Retrieves only entities that have the specified
+   *                      primary filter. If null, retrieves all entities.
+   *                      This is an indexed retrieval, and no entities that
+   *                      do not match the filter are scanned.
+   * @param secondaryFilters Retrieves only entities that have exact matches
+   *                         for all the specified filters in their primary
+   *                         filters or other info. This is not an indexed
+   *                         retrieval, so all entities are scanned but only
+   *                         those matching the filters are returned.
+   * @param fieldsToRetrieve Specifies which fields of the entity object to
+   *                         retrieve (see {@link Field}). If the set of fields
+   *                         contains {@link Field#LAST_EVENT_ONLY} and not
+   *                         {@link Field#EVENTS}, the most recent event for
+   *                         each entity is retrieved.
+   * @return An {@link ATSEntities} object.
+   */
+  ATSEntities getEntities(String entityType,
+      Long limit, Long windowStart, Long windowEnd,
+      NameValuePair primaryFilter, Collection<NameValuePair> secondaryFilters,
+      EnumSet<Field> fieldsToRetrieve);
+
+  /**
+   * This method retrieves the entity information for a given entity.
+   *
+   * @param entity The entity whose information will be retrieved.
+   * @param entityType The type of the entity.
+   * @param fieldsToRetrieve Specifies which fields of the entity object to
+   *                         retrieve (see {@link Field}). If the set of
+   *                         fields contains {@link Field#LAST_EVENT_ONLY} and
+   *                         not {@link Field#EVENTS}, the most recent event
+   *                         for each entity is retrieved.
+   * @return An {@link ATSEntity} object.
+   */
+  ATSEntity getEntity(String entity, String entityType, EnumSet<Field>
+      fieldsToRetrieve);
+
+  /**
+   * This method retrieves the events for a list of entities all of the same
+   * entity type. The events for each entity are sorted in order of their
+   * timestamps, descending.
+   *
+   * @param entityType The type of entities to retrieve events for.
+   * @param entityIds The entity IDs to retrieve events for.
+   * @param limit A limit on the number of events to return for each entity.
+   *              If null, defaults to  {@link #DEFAULT_LIMIT} events per
+   *              entity.
+   * @param windowStart If not null, retrieves only events later than the
+   *                    given time (exclusive)
+   * @param windowEnd If not null, retrieves only events earlier than the
+   *                  given time (inclusive)
+   * @param eventTypes Restricts the events returned to the given types. If
+   *                   null, events of all types will be returned.
+   * @return An {@link ATSEvents} object.
+   */
+  ATSEvents getEntityTimelines(String entityType,
+      SortedSet<String> entityIds, Long limit, Long windowStart,
+      Long windowEnd, Set<String> eventTypes);
+}

+ 29 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/ApplicationTimelineStore.java

@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.service.Service;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public interface ApplicationTimelineStore extends
+    Service, ApplicationTimelineReader, ApplicationTimelineWriter {
+}

+ 43 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/ApplicationTimelineWriter.java

@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntities;
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSPutErrors;
+
+/**
+ * This interface is for storing application timeline information.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public interface ApplicationTimelineWriter {
+
+  /**
+   * Stores entity information to the application timeline store. Any errors
+   * occurring for individual put request objects will be reported in the
+   * response.
+   *
+   * @param data An {@link ATSEntities} object.
+   * @return An {@link ATSPutErrors} object.
+   */
+  ATSPutErrors put(ATSEntities data);
+
+}

+ 100 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/EntityId.java

@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+
+/**
+ * The unique identifier for an entity
+ */
+@Private
+@Unstable
+public class EntityId implements Comparable<EntityId> {
+
+  private String id;
+  private String type;
+
+  public EntityId(String id, String type) {
+    this.id = id;
+    this.type = type;
+  }
+
+  /**
+   * Get the entity Id.
+   * @return The entity Id.
+   */
+  public String getId() {
+    return id;
+  }
+
+  /**
+   * Get the entity type.
+   * @return The entity type.
+   */
+  public String getType() {
+    return type;
+  }
+
+  @Override
+  public int compareTo(EntityId other) {
+    int c = type.compareTo(other.type);
+    if (c != 0) return c;
+    return id.compareTo(other.id);
+  }
+
+  @Override
+  public int hashCode() {
+    // generated by eclipse
+    final int prime = 31;
+    int result = 1;
+    result = prime * result + ((id == null) ? 0 : id.hashCode());
+    result = prime * result + ((type == null) ? 0 : type.hashCode());
+    return result;
+  }
+
+  @Override
+  public boolean equals(Object obj) {
+    // generated by eclipse
+    if (this == obj)
+      return true;
+    if (obj == null)
+      return false;
+    if (getClass() != obj.getClass())
+      return false;
+    EntityId other = (EntityId) obj;
+    if (id == null) {
+      if (other.id != null)
+        return false;
+    } else if (!id.equals(other.id))
+      return false;
+    if (type == null) {
+      if (other.type != null)
+        return false;
+    } else if (!type.equals(other.type))
+      return false;
+    return true;
+  }
+
+  @Override
+  public String toString() {
+    return "{ id: " + id + ", type: "+ type + " }";
+  }
+
+}

+ 288 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/MemoryApplicationTimelineStore.java

@@ -0,0 +1,288 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.PriorityQueue;
+import java.util.Set;
+import java.util.SortedSet;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntities;
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntity;
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSEvent;
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSEvents;
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSEvents.ATSEventsOfOneEntity;
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSPutErrors;
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSPutErrors.ATSPutError;
+
+/**
+ * In-memory implementation of {@link ApplicationTimelineStore}. This
+ * implementation is for test purpose only. If users improperly instantiate it,
+ * they may encounter reading and writing history data in different memory
+ * store.
+ * 
+ */
+@Private
+@Unstable
+public class MemoryApplicationTimelineStore
+    extends AbstractService implements ApplicationTimelineStore {
+
+  private Map<EntityId, ATSEntity> entities =
+      new HashMap<EntityId, ATSEntity>();
+
+  public MemoryApplicationTimelineStore() {
+    super(MemoryApplicationTimelineStore.class.getName());
+  }
+
+  @Override
+  public ATSEntities getEntities(String entityType, Long limit,
+      Long windowStart, Long windowEnd, NameValuePair primaryFilter,
+      Collection<NameValuePair> secondaryFilters, EnumSet<Field> fields) {
+    if (limit == null) {
+      limit = DEFAULT_LIMIT;
+    }
+    if (windowStart == null) {
+      windowStart = Long.MIN_VALUE;
+    }
+    if (windowEnd == null) {
+      windowEnd = Long.MAX_VALUE;
+    }
+    if (fields == null) {
+      fields = EnumSet.allOf(Field.class);
+    }
+    List<ATSEntity> entitiesSelected = new ArrayList<ATSEntity>();
+    for (ATSEntity entity : new PriorityQueue<ATSEntity>(entities.values())) {
+      if (entitiesSelected.size() >= limit) {
+        break;
+      }
+      if (!entity.getEntityType().equals(entityType)) {
+        continue;
+      }
+      if (entity.getStartTime() <= windowStart) {
+        continue;
+      }
+      if (entity.getStartTime() > windowEnd) {
+        continue;
+      }
+      if (primaryFilter != null &&
+          !matchFilter(entity.getPrimaryFilters(), primaryFilter)) {
+        continue;
+      }
+      if (secondaryFilters != null) { // OR logic
+        boolean flag = false;
+        for (NameValuePair secondaryFilter : secondaryFilters) {
+          if (secondaryFilter != null &&
+              matchFilter(entity.getOtherInfo(), secondaryFilter)) {
+            flag = true;
+            break;
+          }
+        }
+        if (!flag) {
+          continue;
+        }
+      }
+      entitiesSelected.add(entity);
+    }
+    List<ATSEntity> entitiesToReturn = new ArrayList<ATSEntity>();
+    for (ATSEntity entitySelected : entitiesSelected) {
+      entitiesToReturn.add(maskFields(entitySelected, fields));
+    }
+    Collections.sort(entitiesToReturn);
+    ATSEntities entitiesWrapper = new ATSEntities();
+    entitiesWrapper.setEntities(entitiesToReturn);
+    return entitiesWrapper;
+  }
+
+  @Override
+  public ATSEntity getEntity(String entityId, String entityType,
+      EnumSet<Field> fieldsToRetrieve) {
+    if (fieldsToRetrieve == null) {
+      fieldsToRetrieve = EnumSet.allOf(Field.class);
+    }
+    ATSEntity entity = entities.get(new EntityId(entityId, entityType));
+    if (entity == null) {
+      return null;
+    } else {
+      return maskFields(entity, fieldsToRetrieve);
+    }
+  }
+
+  @Override
+  public ATSEvents getEntityTimelines(String entityType,
+      SortedSet<String> entityIds, Long limit, Long windowStart,
+      Long windowEnd,
+      Set<String> eventTypes) {
+    ATSEvents allEvents = new ATSEvents();
+    if (entityIds == null) {
+      return allEvents;
+    }
+    if (limit == null) {
+      limit = DEFAULT_LIMIT;
+    }
+    if (windowStart == null) {
+      windowStart = Long.MIN_VALUE;
+    }
+    if (windowEnd == null) {
+      windowEnd = Long.MAX_VALUE;
+    }
+    for (String entityId : entityIds) {
+      EntityId entityID = new EntityId(entityId, entityType);
+      ATSEntity entity = entities.get(entityID);
+      if (entity == null) {
+        continue;
+      }
+      ATSEventsOfOneEntity events = new ATSEventsOfOneEntity();
+      events.setEntityId(entityId);
+      events.setEntityType(entityType);
+      for (ATSEvent event : entity.getEvents()) {
+        if (events.getEvents().size() >= limit) {
+          break;
+        }
+        if (event.getTimestamp() <= windowStart) {
+          continue;
+        }
+        if (event.getTimestamp() > windowEnd) {
+          continue;
+        }
+        if (eventTypes != null && !eventTypes.contains(event.getEventType())) {
+          continue;
+        }
+        events.addEvent(event);
+      }
+      allEvents.addEvent(events);
+    }
+    return allEvents;
+  }
+
+  @Override
+  public ATSPutErrors put(ATSEntities data) {
+    ATSPutErrors errors = new ATSPutErrors();
+    for (ATSEntity entity : data.getEntities()) {
+      EntityId entityId =
+          new EntityId(entity.getEntityId(), entity.getEntityType());
+      // store entity info in memory
+      ATSEntity existingEntity = entities.get(entityId);
+      if (existingEntity == null) {
+        existingEntity = new ATSEntity();
+        existingEntity.setEntityId(entity.getEntityId());
+        existingEntity.setEntityType(entity.getEntityType());
+        existingEntity.setStartTime(entity.getStartTime());
+        entities.put(entityId, existingEntity);
+      }
+      if (entity.getEvents() != null) {
+        if (existingEntity.getEvents() == null) {
+          existingEntity.setEvents(entity.getEvents());
+        } else {
+          existingEntity.addEvents(entity.getEvents());
+        }
+        Collections.sort(existingEntity.getEvents());
+      }
+      // check startTime
+      if (existingEntity.getStartTime() == null) {
+        if (existingEntity.getEvents() == null
+            || existingEntity.getEvents().isEmpty()) {
+          ATSPutError error = new ATSPutError();
+          error.setEntityId(entityId.getId());
+          error.setEntityType(entityId.getType());
+          error.setErrorCode(1);
+          errors.addError(error);
+          entities.remove(entityId);
+          continue;
+        } else {
+          existingEntity.setStartTime(entity.getEvents().get(0).getTimestamp());
+        }
+      }
+      if (entity.getPrimaryFilters() != null) {
+        if (existingEntity.getPrimaryFilters() == null) {
+          existingEntity.setPrimaryFilters(entity.getPrimaryFilters());
+        } else {
+          existingEntity.addPrimaryFilters(entity.getPrimaryFilters());
+        }
+      }
+      if (entity.getOtherInfo() != null) {
+        if (existingEntity.getOtherInfo() == null) {
+          existingEntity.setOtherInfo(entity.getOtherInfo());
+        } else {
+          existingEntity.addOtherInfo(entity.getOtherInfo());
+        }
+      }
+      // relate it to other entities
+      if (entity.getRelatedEntities() == null) {
+        continue;
+      }
+      for (Map.Entry<String, List<String>> partRelatedEntities : entity
+          .getRelatedEntities().entrySet()) {
+        if (partRelatedEntities == null) {
+          continue;
+        }
+        for (String idStr : partRelatedEntities.getValue()) {
+          EntityId relatedEntityId =
+              new EntityId(idStr, partRelatedEntities.getKey());
+          ATSEntity relatedEntity = entities.get(relatedEntityId);
+          if (relatedEntity != null) {
+            relatedEntity.addRelatedEntity(
+                existingEntity.getEntityType(), existingEntity.getEntityId());
+          }
+        }
+      }
+    }
+    return errors;
+  }
+
+  private static ATSEntity maskFields(
+      ATSEntity entity, EnumSet<Field> fields) {
+    // Conceal the fields that are not going to be exposed
+    ATSEntity entityToReturn = new ATSEntity();
+    entityToReturn.setEntityId(entity.getEntityId());
+    entityToReturn.setEntityType(entity.getEntityType());
+    entityToReturn.setStartTime(entity.getStartTime());
+    entityToReturn.setEvents(fields.contains(Field.EVENTS) ?
+        entity.getEvents() : fields.contains(Field.LAST_EVENT_ONLY) ?
+            Arrays.asList(entity.getEvents().get(0)) : null);
+    entityToReturn.setRelatedEntities(fields.contains(Field.RELATED_ENTITIES) ?
+        entity.getRelatedEntities() : null);
+    entityToReturn.setPrimaryFilters(fields.contains(Field.PRIMARY_FILTERS) ?
+        entity.getPrimaryFilters() : null);
+    entityToReturn.setOtherInfo(fields.contains(Field.OTHER_INFO) ?
+        entity.getOtherInfo() : null);
+    return entityToReturn;
+  }
+
+  private static boolean matchFilter(Map<String, Object> tags,
+      NameValuePair filter) {
+    Object value = tags.get(filter.getName());
+    if (value == null) { // doesn't have the filter
+      return false;
+    } else if (!value.equals(filter.getValue())) { // doesn't match the filter
+      return false;
+    }
+    return true;
+  }
+
+}

+ 59 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/NameValuePair.java

@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * A class holding a name and value pair, used for specifying filters in
+ * {@link ApplicationTimelineReader}.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class NameValuePair {
+  String name;
+  Object value;
+
+  public NameValuePair(String name, Object value) {
+    this.name = name;
+    this.value = value;
+  }
+
+  /**
+   * Get the name.
+   * @return The name.
+   */
+  public String getName() {
+
+    return name;
+  }
+
+  /**
+   * Get the value.
+   * @return The value.
+   */
+  public Object getValue() {
+    return value;
+  }
+
+  @Override
+  public String toString() {
+    return "{ name: " + name + ", value: " + value + " }";
+  }
+}

+ 20 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/package-info.java

@@ -0,0 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline;
+import org.apache.hadoop.classification.InterfaceAudience;

+ 9 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/AHSWebApp.java

@@ -21,24 +21,31 @@ import static org.apache.hadoop.yarn.util.StringHelper.pajoin;
 
 import org.apache.hadoop.yarn.server.api.ApplicationContext;
 import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryManager;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline.ApplicationTimelineStore;
 import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
 import org.apache.hadoop.yarn.webapp.WebApp;
+import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
 import org.apache.hadoop.yarn.webapp.YarnWebParams;
 
 public class AHSWebApp extends WebApp implements YarnWebParams {
 
   private final ApplicationHistoryManager applicationHistoryManager;
+  private final ApplicationTimelineStore applicationTimelineStore;
 
-  public AHSWebApp(ApplicationHistoryManager applicationHistoryManager) {
+  public AHSWebApp(ApplicationHistoryManager applicationHistoryManager,
+      ApplicationTimelineStore applicationTimelineStore) {
     this.applicationHistoryManager = applicationHistoryManager;
+    this.applicationTimelineStore = applicationTimelineStore;
   }
 
   @Override
   public void setup() {
-    bind(JAXBContextResolver.class);
+    bind(YarnJacksonJaxbJsonProvider.class);
     bind(AHSWebServices.class);
+    bind(ATSWebServices.class);
     bind(GenericExceptionHandler.class);
     bind(ApplicationContext.class).toInstance(applicationHistoryManager);
+    bind(ApplicationTimelineStore.class).toInstance(applicationTimelineStore);
     route("/", AHSController.class);
     route(pajoin("/apps", APP_STATE), AHSController.class);
     route(pajoin("/app", APPLICATION_ID), AHSController.class, "app");

+ 297 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/ATSWebServices.java

@@ -0,0 +1,297 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.EnumSet;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.Consumes;
+import javax.ws.rs.GET;
+import javax.ws.rs.POST;
+import javax.ws.rs.Path;
+import javax.ws.rs.PathParam;
+import javax.ws.rs.Produces;
+import javax.ws.rs.QueryParam;
+import javax.ws.rs.WebApplicationException;
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.MediaType;
+import javax.ws.rs.core.Response;
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntities;
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntity;
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSEvents;
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSPutErrors;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline.ApplicationTimelineReader.Field;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline.ApplicationTimelineStore;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline.NameValuePair;
+import org.apache.hadoop.yarn.webapp.BadRequestException;
+
+import com.google.inject.Inject;
+import com.google.inject.Singleton;
+
+@Singleton
+@Path("/ws/v1/apptimeline")
+//TODO: support XML serialization/deserialization
+public class ATSWebServices {
+
+  private ApplicationTimelineStore store;
+
+  @Inject
+  public ATSWebServices(ApplicationTimelineStore store) {
+    this.store = store;
+  }
+
+  @XmlRootElement(name = "about")
+  @XmlAccessorType(XmlAccessType.NONE)
+  @Public
+  @Unstable
+  public static class AboutInfo {
+
+    private String about;
+
+    public AboutInfo() {
+
+    }
+
+    public AboutInfo(String about) {
+      this.about = about;
+    }
+
+    @XmlElement(name = "About")
+    public String getAbout() {
+      return about;
+    }
+
+    public void setAbout(String about) {
+      this.about = about;
+    }
+
+  }
+
+  /**
+   * Return the description of the application timeline web services.
+   */
+  @GET
+  @Path("/")
+  @Produces({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
+  public AboutInfo about(
+      @Context HttpServletRequest req,
+      @Context HttpServletResponse res) {
+    init(res);
+    return new AboutInfo("Application Timeline API");
+  }
+
+  /**
+   * Return a list of entities that match the given parameters.
+   */
+  @GET
+  @Path("/{entityType}")
+  @Produces({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
+  public ATSEntities getEntities(
+      @Context HttpServletRequest req,
+      @Context HttpServletResponse res,
+      @PathParam("entityType") String entityType,
+      @QueryParam("primaryFilter") String primaryFilter,
+      @QueryParam("secondaryFilter") String secondaryFilter,
+      @QueryParam("windowStart") String windowStart,
+      @QueryParam("windowEnd") String windowEnd,
+      @QueryParam("limit") String limit,
+      @QueryParam("fields") String fields) {
+    init(res);
+    ATSEntities entities = null;
+    try {
+      entities = store.getEntities(
+          parseStr(entityType),
+          parseLongStr(limit),
+          parseLongStr(windowStart),
+          parseLongStr(windowEnd),
+          parsePairStr(primaryFilter, ":"),
+          parsePairsStr(secondaryFilter, ",", ":"),
+          parseFieldsStr(fields, ","));
+    } catch (NumberFormatException e) {
+      throw new BadRequestException(
+          "windowStart, windowEnd or limit is not a numeric value.");
+    } catch (IllegalArgumentException e) {
+      throw new BadRequestException("requested invalid field.");
+    }
+    if (entities == null) {
+      return new ATSEntities();
+    }
+    return entities;
+  }
+
+  /**
+   * Return a single entity of the given entity type and Id.
+   */
+  @GET
+  @Path("/{entityType}/{entityId}")
+  @Produces({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
+  public ATSEntity getEntity(
+      @Context HttpServletRequest req,
+      @Context HttpServletResponse res,
+      @PathParam("entityType") String entityType,
+      @PathParam("entityId") String entityId,
+      @QueryParam("fields") String fields) {
+    init(res);
+    ATSEntity entity = null;
+    try {
+      entity =
+          store.getEntity(parseStr(entityId), parseStr(entityType),
+              parseFieldsStr(fields, ","));
+    } catch (IllegalArgumentException e) {
+      throw new BadRequestException(
+          "requested invalid field.");
+    }
+    if (entity == null) {
+      throw new WebApplicationException(Response.Status.NOT_FOUND);
+    }
+    return entity;
+  }
+
+  /**
+   * Return the events that match the given parameters.
+   */
+  @GET
+  @Path("/{entityType}/events")
+  @Produces({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
+  public ATSEvents getEvents(
+      @Context HttpServletRequest req,
+      @Context HttpServletResponse res,
+      @PathParam("entityType") String entityType,
+      @QueryParam("entityId") String entityId,
+      @QueryParam("eventType") String eventType,
+      @QueryParam("windowStart") String windowStart,
+      @QueryParam("windowEnd") String windowEnd,
+      @QueryParam("limit") String limit) {
+    init(res);
+    ATSEvents events = null;
+    try {
+      events = store.getEntityTimelines(
+          parseStr(entityType),
+          parseArrayStr(entityId, ","),
+          parseLongStr(limit),
+          parseLongStr(windowStart),
+          parseLongStr(windowEnd),
+          parseArrayStr(eventType, ","));
+    } catch (NumberFormatException e) {
+      throw new BadRequestException(
+          "windowStart, windowEnd or limit is not a numeric value.");
+    }
+    if (events == null) {
+      return new ATSEvents();
+    }
+    return events;
+  }
+
+  /**
+   * Store the given entities into the timeline store, and return the errors
+   * that happen during storing.
+   */
+  @POST
+  @Path("/")
+  @Consumes({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */})
+  public ATSPutErrors postEntities(
+      @Context HttpServletRequest req,
+      @Context HttpServletResponse res,
+      ATSEntities entities) {
+    init(res);
+    if (entities == null) {
+      return new ATSPutErrors();
+    }
+    return store.put(entities);
+  }
+
+  private void init(HttpServletResponse response) {
+    response.setContentType(null);
+  }
+
+  private static SortedSet<String> parseArrayStr(String str, String delimiter) {
+    if (str == null) {
+      return null;
+    }
+    SortedSet<String> strSet = new TreeSet<String>();
+    String[] strs = str.split(delimiter);
+    for (String aStr : strs) {
+      strSet.add(aStr.trim());
+    }
+    return strSet;
+  }
+
+  private static NameValuePair parsePairStr(String str, String delimiter) {
+    if (str == null) {
+      return null;
+    }
+    String[] strs = str.split(delimiter, 2);
+    return new NameValuePair(strs[0].trim(), strs[1].trim());
+  }
+
+  private static Collection<NameValuePair> parsePairsStr(
+      String str, String aDelimiter, String pDelimiter) {
+    if (str == null) {
+      return null;
+    }
+    String[] strs = str.split(aDelimiter);
+    Set<NameValuePair> pairs = new HashSet<NameValuePair>();
+    for (String aStr : strs) {
+      pairs.add(parsePairStr(aStr, pDelimiter));
+    }
+    return pairs;
+  }
+
+  private static EnumSet<Field> parseFieldsStr(String str, String delimiter) {
+    if (str == null) {
+      return null;
+    }
+    String[] strs = str.split(delimiter);
+    List<Field> fieldList = new ArrayList<Field>();
+    for (String s : strs) {
+      fieldList.add(Field.valueOf(s.toUpperCase()));
+    }
+    if (fieldList.size() == 0)
+      return null;
+    Field f1 = fieldList.remove(fieldList.size() - 1);
+    if (fieldList.size() == 0)
+      return EnumSet.of(f1);
+    else
+      return EnumSet.of(f1, fieldList.toArray(new Field[fieldList.size()]));
+  }
+
+  private static Long parseLongStr(String str) {
+    return str == null ? null : Long.parseLong(str.trim());
+  }
+
+  private static String parseStr(String str) {
+    return str == null ? null : str.trim();
+  }
+
+}

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryServer.java

@@ -40,7 +40,7 @@ public class TestApplicationHistoryServer {
     Configuration config = new YarnConfiguration();
     historyServer.init(config);
     assertEquals(STATE.INITED, historyServer.getServiceState());
-    assertEquals(2, historyServer.getServices().size());
+    assertEquals(3, historyServer.getServices().size());
     ApplicationHistoryClientService historyService =
         historyServer.getClientService();
     assertNotNull(historyServer.getClientService());

+ 532 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/ApplicationTimelineStoreTestUtils.java

@@ -0,0 +1,532 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.EnumSet;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.SortedSet;
+import java.util.TreeSet;
+
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntities;
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntity;
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSEvent;
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSEvents.ATSEventsOfOneEntity;
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSPutErrors;
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSPutErrors.ATSPutError;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline.ApplicationTimelineReader.Field;
+
+public class ApplicationTimelineStoreTestUtils {
+
+  private static final Map<String, Object> EMPTY_MAP = Collections.emptyMap();
+  private static final Map<String, List<String>> EMPTY_REL_ENTITIES =
+      new HashMap<String, List<String>>();
+
+  protected ApplicationTimelineStore store;
+  private String entity1;
+  private String entityType1;
+  private String entity1b;
+  private String entity2;
+  private String entityType2;
+  private Map<String, Object> primaryFilters;
+  private Map<String, Object> secondaryFilters;
+  private Map<String, Object> allFilters;
+  private Map<String, Object> otherInfo;
+  private Map<String, List<String>> relEntityMap;
+  private NameValuePair userFilter;
+  private Collection<NameValuePair> goodTestingFilters;
+  private Collection<NameValuePair> badTestingFilters;
+  private ATSEvent ev1;
+  private ATSEvent ev2;
+  private ATSEvent ev3;
+  private ATSEvent ev4;
+  private Map<String, Object> eventInfo;
+  private List<ATSEvent> events1;
+  private List<ATSEvent> events2;
+
+  /**
+   * Load test data into the given store
+   */
+  protected void loadTestData() {
+    ATSEntities atsEntities = new ATSEntities();
+    Map<String, Object> primaryFilters = new HashMap<String, Object>();
+    primaryFilters.put("user", "username");
+    primaryFilters.put("appname", 12345l);
+    Map<String, Object> secondaryFilters = new HashMap<String, Object>();
+    secondaryFilters.put("startTime", 123456l);
+    secondaryFilters.put("status", "RUNNING");
+    Map<String, Object> otherInfo1 = new HashMap<String, Object>();
+    otherInfo1.put("info1", "val1");
+    otherInfo1.putAll(secondaryFilters);
+
+    String entity1 = "id_1";
+    String entityType1 = "type_1";
+    String entity1b = "id_2";
+    String entity2 = "id_2";
+    String entityType2 = "type_2";
+
+    Map<String, List<String>> relatedEntities =
+        new HashMap<String, List<String>>();
+    relatedEntities.put(entityType2, Collections.singletonList(entity2));
+
+    ATSEvent ev3 = createEvent(789l, "launch_event", null);
+    ATSEvent ev4 = createEvent(-123l, "init_event", null);
+    List<ATSEvent> events = new ArrayList<ATSEvent>();
+    events.add(ev3);
+    events.add(ev4);
+    atsEntities.setEntities(Collections.singletonList(createEntity(entity2,
+        entityType2, null, events, null, null, null)));
+    ATSPutErrors response = store.put(atsEntities);
+    assertEquals(0, response.getErrors().size());
+
+    ATSEvent ev1 = createEvent(123l, "start_event", null);
+    atsEntities.setEntities(Collections.singletonList(createEntity(entity1,
+        entityType1, 123l, Collections.singletonList(ev1),
+        relatedEntities, primaryFilters, otherInfo1)));
+    response = store.put(atsEntities);
+    assertEquals(0, response.getErrors().size());
+    atsEntities.setEntities(Collections.singletonList(createEntity(entity1b,
+        entityType1, null, Collections.singletonList(ev1), relatedEntities,
+        primaryFilters, otherInfo1)));
+    response = store.put(atsEntities);
+    assertEquals(0, response.getErrors().size());
+
+    Map<String, Object> eventInfo = new HashMap<String, Object>();
+    eventInfo.put("event info 1", "val1");
+    ATSEvent ev2 = createEvent(456l, "end_event", eventInfo);
+    Map<String, Object> otherInfo2 = new HashMap<String, Object>();
+    otherInfo2.put("info2", "val2");
+    atsEntities.setEntities(Collections.singletonList(createEntity(entity1,
+        entityType1, null, Collections.singletonList(ev2), null,
+        primaryFilters, otherInfo2)));
+    response = store.put(atsEntities);
+    assertEquals(0, response.getErrors().size());
+    atsEntities.setEntities(Collections.singletonList(createEntity(entity1b,
+        entityType1, 123l, Collections.singletonList(ev2), null,
+        primaryFilters, otherInfo2)));
+    response = store.put(atsEntities);
+    assertEquals(0, response.getErrors().size());
+
+    atsEntities.setEntities(Collections.singletonList(createEntity(
+        "badentityid", "badentity", null, null, null, null, otherInfo1)));
+    response = store.put(atsEntities);
+    assertEquals(1, response.getErrors().size());
+    ATSPutError error = response.getErrors().get(0);
+    assertEquals("badentityid", error.getEntityId());
+    assertEquals("badentity", error.getEntityType());
+    assertEquals((Integer) 1, error.getErrorCode());
+  }
+
+  /**
+   * Load veification data
+   */
+  protected void loadVerificationData() throws Exception {
+    userFilter = new NameValuePair("user",
+        "username");
+    goodTestingFilters = new ArrayList<NameValuePair>();
+    goodTestingFilters.add(new NameValuePair("appname", 12345l));
+    goodTestingFilters.add(new NameValuePair("status", "RUNNING"));
+    badTestingFilters = new ArrayList<NameValuePair>();
+    badTestingFilters.add(new NameValuePair("appname", 12345l));
+    badTestingFilters.add(new NameValuePair("status", "FINISHED"));
+
+    primaryFilters = new HashMap<String, Object>();
+    primaryFilters.put("user", "username");
+    primaryFilters.put("appname", 12345l);
+    secondaryFilters = new HashMap<String, Object>();
+    secondaryFilters.put("startTime", 123456l);
+    secondaryFilters.put("status", "RUNNING");
+    allFilters = new HashMap<String, Object>();
+    allFilters.putAll(secondaryFilters);
+    allFilters.putAll(primaryFilters);
+    otherInfo = new HashMap<String, Object>();
+    otherInfo.put("info1", "val1");
+    otherInfo.put("info2", "val2");
+    otherInfo.putAll(secondaryFilters);
+
+    entity1 = "id_1";
+    entityType1 = "type_1";
+    entity1b = "id_2";
+    entity2 = "id_2";
+    entityType2 = "type_2";
+
+    ev1 = createEvent(123l, "start_event", null);
+
+    eventInfo = new HashMap<String, Object>();
+    eventInfo.put("event info 1", "val1");
+    ev2 = createEvent(456l, "end_event", eventInfo);
+    events1 = new ArrayList<ATSEvent>();
+    events1.add(ev2);
+    events1.add(ev1);
+
+    relEntityMap =
+        new HashMap<String, List<String>>();
+    List<String> ids = new ArrayList<String>();
+    ids.add(entity1);
+    ids.add(entity1b);
+    relEntityMap.put(entityType1, ids);
+
+    ev3 = createEvent(789l, "launch_event", null);
+    ev4 = createEvent(-123l, "init_event", null);
+    events2 = new ArrayList<ATSEvent>();
+    events2.add(ev3);
+    events2.add(ev4);
+  }
+
+  public void testGetSingleEntity() {
+    // test getting entity info
+    verifyEntityInfo(null, null, null, null, null, null,
+        store.getEntity("id_1", "type_2", EnumSet.allOf(Field.class)));
+
+    verifyEntityInfo(entity1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, store.getEntity(entity1, entityType1,
+        EnumSet.allOf(Field.class)));
+
+    verifyEntityInfo(entity1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, store.getEntity(entity1b, entityType1,
+        EnumSet.allOf(Field.class)));
+
+    verifyEntityInfo(entity2, entityType2, events2, relEntityMap, EMPTY_MAP,
+        EMPTY_MAP, store.getEntity(entity2, entityType2,
+        EnumSet.allOf(Field.class)));
+
+    // test getting single fields
+    verifyEntityInfo(entity1, entityType1, events1, null, null, null,
+        store.getEntity(entity1, entityType1, EnumSet.of(Field.EVENTS)));
+
+    verifyEntityInfo(entity1, entityType1, Collections.singletonList(ev2),
+        null, null, null, store.getEntity(entity1, entityType1,
+        EnumSet.of(Field.LAST_EVENT_ONLY)));
+
+    verifyEntityInfo(entity1, entityType1, null, null, primaryFilters, null,
+        store.getEntity(entity1, entityType1,
+            EnumSet.of(Field.PRIMARY_FILTERS)));
+
+    verifyEntityInfo(entity1, entityType1, null, null, null, otherInfo,
+        store.getEntity(entity1, entityType1, EnumSet.of(Field.OTHER_INFO)));
+
+    verifyEntityInfo(entity2, entityType2, null, relEntityMap, null, null,
+        store.getEntity(entity2, entityType2,
+            EnumSet.of(Field.RELATED_ENTITIES)));
+  }
+
+  public void testGetEntities() {
+    // test getting entities
+    assertEquals("nonzero entities size for nonexistent type", 0,
+        store.getEntities("type_0", null, null, null, null, null,
+            null).getEntities().size());
+    assertEquals("nonzero entities size for nonexistent type", 0,
+        store.getEntities("type_3", null, null, null, null, null,
+            null).getEntities().size());
+    assertEquals("nonzero entities size for nonexistent type", 0,
+        store.getEntities("type_0", null, null, null, userFilter,
+            null, null).getEntities().size());
+    assertEquals("nonzero entities size for nonexistent type", 0,
+        store.getEntities("type_3", null, null, null, userFilter,
+            null, null).getEntities().size());
+
+    List<ATSEntity> entities =
+        store.getEntities("type_1", null, null, null, null, null,
+            EnumSet.allOf(Field.class)).getEntities();
+    assertEquals(2, entities.size());
+    verifyEntityInfo(entity1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+    verifyEntityInfo(entity1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(1));
+
+    entities = store.getEntities("type_2", null, null, null, null, null,
+        EnumSet.allOf(Field.class)).getEntities();
+    assertEquals(1, entities.size());
+    verifyEntityInfo(entity2, entityType2, events2, relEntityMap, EMPTY_MAP,
+        EMPTY_MAP, entities.get(0));
+
+    entities = store.getEntities("type_1", 1l, null, null, null, null,
+        EnumSet.allOf(Field.class)).getEntities();
+    assertEquals(1, entities.size());
+    verifyEntityInfo(entity1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+
+    entities = store.getEntities("type_1", 1l, 0l, null, null, null,
+        EnumSet.allOf(Field.class)).getEntities();
+    assertEquals(1, entities.size());
+    verifyEntityInfo(entity1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+
+    entities = store.getEntities("type_1", null, 234l, null, null, null,
+        EnumSet.allOf(Field.class)).getEntities();
+    assertEquals(0, entities.size());
+
+    entities = store.getEntities("type_1", null, 123l, null, null, null,
+        EnumSet.allOf(Field.class)).getEntities();
+    assertEquals(0, entities.size());
+
+    entities = store.getEntities("type_1", null, 234l, 345l, null, null,
+        EnumSet.allOf(Field.class)).getEntities();
+    assertEquals(0, entities.size());
+
+    entities = store.getEntities("type_1", null, null, 345l, null, null,
+        EnumSet.allOf(Field.class)).getEntities();
+    assertEquals(2, entities.size());
+    verifyEntityInfo(entity1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+    verifyEntityInfo(entity1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(1));
+
+    entities = store.getEntities("type_1", null, null, 123l, null, null,
+        EnumSet.allOf(Field.class)).getEntities();
+    assertEquals(2, entities.size());
+    verifyEntityInfo(entity1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+    verifyEntityInfo(entity1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(1));
+  }
+
+  public void testGetEntitiesWithPrimaryFilters() {
+    // test using primary filter
+    assertEquals("nonzero entities size for primary filter", 0,
+        store.getEntities("type_1", null, null, null,
+            new NameValuePair("none", "none"), null,
+            EnumSet.allOf(Field.class)).getEntities().size());
+    assertEquals("nonzero entities size for primary filter", 0,
+        store.getEntities("type_2", null, null, null,
+            new NameValuePair("none", "none"), null,
+            EnumSet.allOf(Field.class)).getEntities().size());
+    assertEquals("nonzero entities size for primary filter", 0,
+        store.getEntities("type_3", null, null, null,
+            new NameValuePair("none", "none"), null,
+            EnumSet.allOf(Field.class)).getEntities().size());
+
+    List<ATSEntity> entities = store.getEntities("type_1", null, null, null,
+        userFilter, null, EnumSet.allOf(Field.class)).getEntities();
+    assertEquals(2, entities.size());
+    verifyEntityInfo(entity1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+    verifyEntityInfo(entity1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(1));
+
+    entities = store.getEntities("type_2", null, null, null, userFilter, null,
+        EnumSet.allOf(Field.class)).getEntities();
+    assertEquals(0, entities.size());
+
+    entities = store.getEntities("type_1", 1l, null, null, userFilter, null,
+        EnumSet.allOf(Field.class)).getEntities();
+    assertEquals(1, entities.size());
+    verifyEntityInfo(entity1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+
+    entities = store.getEntities("type_1", 1l, 0l, null, userFilter, null,
+        EnumSet.allOf(Field.class)).getEntities();
+    assertEquals(1, entities.size());
+    verifyEntityInfo(entity1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+
+    entities = store.getEntities("type_1", null, 234l, null, userFilter, null,
+        EnumSet.allOf(Field.class)).getEntities();
+    assertEquals(0, entities.size());
+
+    entities = store.getEntities("type_1", null, 234l, 345l, userFilter, null,
+        EnumSet.allOf(Field.class)).getEntities();
+    assertEquals(0, entities.size());
+
+    entities = store.getEntities("type_1", null, null, 345l, userFilter, null,
+        EnumSet.allOf(Field.class)).getEntities();
+    assertEquals(2, entities.size());
+    verifyEntityInfo(entity1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+    verifyEntityInfo(entity1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(1));
+  }
+
+  public void testGetEntitiesWithSecondaryFilters() {
+    // test using secondary filter
+    List<ATSEntity> entities = store.getEntities("type_1", null, null, null,
+        null, goodTestingFilters, EnumSet.allOf(Field.class)).getEntities();
+    assertEquals(2, entities.size());
+    verifyEntityInfo(entity1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+    verifyEntityInfo(entity1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(1));
+
+    entities = store.getEntities("type_1", null, null, null, userFilter,
+        goodTestingFilters, EnumSet.allOf(Field.class)).getEntities();
+    assertEquals(2, entities.size());
+    verifyEntityInfo(entity1, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(0));
+    verifyEntityInfo(entity1b, entityType1, events1, EMPTY_REL_ENTITIES,
+        primaryFilters, otherInfo, entities.get(1));
+
+    entities = store.getEntities("type_1", null, null, null, null,
+        badTestingFilters, EnumSet.allOf(Field.class)).getEntities();
+    assertEquals(0, entities.size());
+
+    entities = store.getEntities("type_1", null, null, null, userFilter,
+        badTestingFilters, EnumSet.allOf(Field.class)).getEntities();
+    assertEquals(0, entities.size());
+  }
+
+  public void testGetEvents() {
+    // test getting entity timelines
+    SortedSet<String> sortedSet = new TreeSet<String>();
+    sortedSet.add(entity1);
+    List<ATSEventsOfOneEntity> timelines =
+        store.getEntityTimelines(entityType1, sortedSet, null, null,
+            null, null).getAllEvents();
+    assertEquals(1, timelines.size());
+    verifyEntityTimeline(timelines.get(0), entity1, entityType1, ev2, ev1);
+
+    sortedSet.add(entity1b);
+    timelines = store.getEntityTimelines(entityType1, sortedSet, null,
+        null, null, null).getAllEvents();
+    assertEquals(2, timelines.size());
+    verifyEntityTimeline(timelines.get(0), entity1, entityType1, ev2, ev1);
+    verifyEntityTimeline(timelines.get(1), entity1b, entityType1, ev2, ev1);
+
+    timelines = store.getEntityTimelines(entityType1, sortedSet, 1l,
+        null, null, null).getAllEvents();
+    assertEquals(2, timelines.size());
+    verifyEntityTimeline(timelines.get(0), entity1, entityType1, ev2);
+    verifyEntityTimeline(timelines.get(1), entity1b, entityType1, ev2);
+
+    timelines = store.getEntityTimelines(entityType1, sortedSet, null,
+        345l, null, null).getAllEvents();
+    assertEquals(2, timelines.size());
+    verifyEntityTimeline(timelines.get(0), entity1, entityType1, ev2);
+    verifyEntityTimeline(timelines.get(1), entity1b, entityType1, ev2);
+
+    timelines = store.getEntityTimelines(entityType1, sortedSet, null,
+        123l, null, null).getAllEvents();
+    assertEquals(2, timelines.size());
+    verifyEntityTimeline(timelines.get(0), entity1, entityType1, ev2);
+    verifyEntityTimeline(timelines.get(1), entity1b, entityType1, ev2);
+
+    timelines = store.getEntityTimelines(entityType1, sortedSet, null,
+        null, 345l, null).getAllEvents();
+    assertEquals(2, timelines.size());
+    verifyEntityTimeline(timelines.get(0), entity1, entityType1, ev1);
+    verifyEntityTimeline(timelines.get(1), entity1b, entityType1, ev1);
+
+    timelines = store.getEntityTimelines(entityType1, sortedSet, null,
+        null, 123l, null).getAllEvents();
+    assertEquals(2, timelines.size());
+    verifyEntityTimeline(timelines.get(0), entity1, entityType1, ev1);
+    verifyEntityTimeline(timelines.get(1), entity1b, entityType1, ev1);
+
+    timelines = store.getEntityTimelines(entityType1, sortedSet, null,
+        null, null, Collections.singleton("end_event")).getAllEvents();
+    assertEquals(2, timelines.size());
+    verifyEntityTimeline(timelines.get(0), entity1, entityType1, ev2);
+    verifyEntityTimeline(timelines.get(1), entity1b, entityType1, ev2);
+
+    sortedSet.add(entity2);
+    timelines = store.getEntityTimelines(entityType2, sortedSet, null,
+        null, null, null).getAllEvents();
+    assertEquals(1, timelines.size());
+    verifyEntityTimeline(timelines.get(0), entity2, entityType2, ev3, ev4);
+  }
+
+  /**
+   * Verify a single entity
+   */
+  private static void verifyEntityInfo(String entity, String entityType,
+      List<ATSEvent> events, Map<String, List<String>> relatedEntities,
+      Map<String, Object> primaryFilters, Map<String, Object> otherInfo,
+      ATSEntity retrievedEntityInfo) {
+    if (entity == null) {
+      assertNull(retrievedEntityInfo);
+      return;
+    }
+    assertEquals(entity, retrievedEntityInfo.getEntityId());
+    assertEquals(entityType, retrievedEntityInfo.getEntityType());
+    if (events == null)
+      assertNull(retrievedEntityInfo.getEvents());
+    else
+      assertEquals(events, retrievedEntityInfo.getEvents());
+    if (relatedEntities == null)
+      assertNull(retrievedEntityInfo.getRelatedEntities());
+    else
+      assertEquals(relatedEntities, retrievedEntityInfo.getRelatedEntities());
+    if (primaryFilters == null)
+      assertNull(retrievedEntityInfo.getPrimaryFilters());
+    else
+      assertTrue(primaryFilters.equals(
+          retrievedEntityInfo.getPrimaryFilters()));
+    if (otherInfo == null)
+      assertNull(retrievedEntityInfo.getOtherInfo());
+    else
+      assertTrue(otherInfo.equals(retrievedEntityInfo.getOtherInfo()));
+  }
+
+  /**
+   * Verify timeline events
+   */
+  private static void verifyEntityTimeline(
+      ATSEventsOfOneEntity retrievedEvents, String entity, String entityType,
+      ATSEvent... actualEvents) {
+    assertEquals(entity, retrievedEvents.getEntityId());
+    assertEquals(entityType, retrievedEvents.getEntityType());
+    assertEquals(actualEvents.length, retrievedEvents.getEvents().size());
+    for (int i = 0; i < actualEvents.length; i++) {
+      assertEquals(actualEvents[i], retrievedEvents.getEvents().get(i));
+    }
+  }
+
+  /**
+   * Create a test entity
+   */
+  private static ATSEntity createEntity(String entity, String entityType,
+      Long startTime, List<ATSEvent> events,
+      Map<String, List<String>> relatedEntities,
+      Map<String, Object> primaryFilters, Map<String, Object> otherInfo) {
+    ATSEntity atsEntity = new ATSEntity();
+    atsEntity.setEntityId(entity);
+    atsEntity.setEntityType(entityType);
+    atsEntity.setStartTime(startTime);
+    atsEntity.setEvents(events);
+    if (relatedEntities != null)
+      for (Entry<String, List<String>> e : relatedEntities.entrySet())
+        for (String v : e.getValue())
+          atsEntity.addRelatedEntity(e.getKey(), v);
+    else
+      atsEntity.setRelatedEntities(null);
+    atsEntity.setPrimaryFilters(primaryFilters);
+    atsEntity.setOtherInfo(otherInfo);
+    return atsEntity;
+  }
+
+  /**
+   * Create a test event
+   */
+  private static ATSEvent createEvent(long timestamp, String type, Map<String,
+      Object> info) {
+    ATSEvent event = new ATSEvent();
+    event.setTimestamp(timestamp);
+    event.setEventType(type);
+    event.setEventInfo(info);
+    return event;
+  }
+
+}

+ 73 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/apptimeline/TestMemoryApplicationTimelineStore.java

@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline;
+
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+
+public class TestMemoryApplicationTimelineStore
+    extends ApplicationTimelineStoreTestUtils {
+
+  @Before
+  public void setup() throws Exception {
+    store = new MemoryApplicationTimelineStore();
+    store.init(new YarnConfiguration());
+    store.start();
+    loadTestData();
+    loadVerificationData();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    store.stop();
+  }
+
+  public ApplicationTimelineStore getApplicationTimelineStore() {
+    return store;
+  }
+
+  @Test
+  public void testGetSingleEntity() {
+    super.testGetSingleEntity();
+  }
+
+  @Test
+  public void testGetEntities() {
+    super.testGetEntities();
+  }
+
+  @Test
+  public void testGetEntitiesWithPrimaryFilters() {
+    super.testGetEntitiesWithPrimaryFilters();
+  }
+
+  @Test
+  public void testGetEntitiesWithSecondaryFilters() {
+    super.testGetEntitiesWithSecondaryFilters();
+  }
+
+  @Test
+  public void testGetEvents() {
+    super.testGetEvents();
+  }
+
+}

+ 212 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/webapp/TestATSWebServices.java

@@ -0,0 +1,212 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.applicationhistoryservice.webapp;
+
+import static org.junit.Assert.assertEquals;
+
+import javax.ws.rs.core.MediaType;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntities;
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSEntity;
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSEvent;
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSEvents;
+import org.apache.hadoop.yarn.api.records.apptimeline.ATSPutErrors;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline.ApplicationTimelineStore;
+import org.apache.hadoop.yarn.server.applicationhistoryservice.apptimeline.TestMemoryApplicationTimelineStore;
+import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
+import org.apache.hadoop.yarn.webapp.YarnJacksonJaxbJsonProvider;
+import org.junit.Test;
+
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.servlet.GuiceServletContextListener;
+import com.google.inject.servlet.ServletModule;
+import com.sun.jersey.api.client.ClientResponse;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.api.client.config.DefaultClientConfig;
+import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
+import com.sun.jersey.test.framework.JerseyTest;
+import com.sun.jersey.test.framework.WebAppDescriptor;
+
+
+public class TestATSWebServices extends JerseyTest {
+
+  private static ApplicationTimelineStore store;
+
+  private Injector injector = Guice.createInjector(new ServletModule() {
+
+    @Override
+    protected void configureServlets() {
+      bind(YarnJacksonJaxbJsonProvider.class);
+      bind(ATSWebServices.class);
+      bind(GenericExceptionHandler.class);
+      try{
+        store = mockApplicationTimelineStore();
+      } catch (Exception e) {
+        Assert.fail();
+      }
+      bind(ApplicationTimelineStore.class).toInstance(store);
+      serve("/*").with(GuiceContainer.class);
+    }
+
+  });
+
+  public class GuiceServletConfig extends GuiceServletContextListener {
+
+    @Override
+    protected Injector getInjector() {
+      return injector;
+    }
+  }
+
+  private ApplicationTimelineStore mockApplicationTimelineStore()
+      throws Exception {
+    TestMemoryApplicationTimelineStore store =
+        new TestMemoryApplicationTimelineStore();
+    store.setup();
+    return store.getApplicationTimelineStore();
+  }
+
+  public TestATSWebServices() {
+    super(new WebAppDescriptor.Builder(
+        "org.apache.hadoop.yarn.server.applicationhistoryservice.webapp")
+        .contextListenerClass(GuiceServletConfig.class)
+        .filterClass(com.google.inject.servlet.GuiceFilter.class)
+        .contextPath("jersey-guice-filter")
+        .servletPath("/")
+        .clientConfig(new DefaultClientConfig(YarnJacksonJaxbJsonProvider.class))
+        .build());
+  }
+
+  @Test
+  public void testAbout() throws Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("apptimeline")
+        .accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    ATSWebServices.AboutInfo about =
+        response.getEntity(ATSWebServices.AboutInfo.class);
+    Assert.assertNotNull(about);
+    Assert.assertEquals("Application Timeline API", about.getAbout());
+  }
+
+  @Test
+  public void testGetEntities() throws Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("apptimeline")
+        .path("type_1")
+        .accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    ATSEntities entities = response.getEntity(ATSEntities.class);
+    Assert.assertNotNull(entities);
+    Assert.assertEquals(2, entities.getEntities().size());
+    ATSEntity entity1 = entities.getEntities().get(0);
+    Assert.assertNotNull(entity1);
+    Assert.assertEquals("id_1", entity1.getEntityId());
+    Assert.assertEquals("type_1", entity1.getEntityType());
+    Assert.assertEquals(123l, entity1.getStartTime().longValue());
+    Assert.assertEquals(2, entity1.getEvents().size());
+    Assert.assertEquals(2, entity1.getPrimaryFilters().size());
+    Assert.assertEquals(4, entity1.getOtherInfo().size());
+    ATSEntity entity2 = entities.getEntities().get(1);
+    Assert.assertNotNull(entity2);
+    Assert.assertEquals("id_2", entity2.getEntityId());
+    Assert.assertEquals("type_1", entity2.getEntityType());
+    Assert.assertEquals(123l, entity2.getStartTime().longValue());
+    Assert.assertEquals(2, entity2.getEvents().size());
+    Assert.assertEquals(2, entity2.getPrimaryFilters().size());
+    Assert.assertEquals(4, entity2.getOtherInfo().size());
+  }
+
+  @Test
+  public void testGetEntity() throws Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("apptimeline")
+        .path("type_1").path("id_1")
+        .accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    ATSEntity entity = response.getEntity(ATSEntity.class);
+    Assert.assertNotNull(entity);
+    Assert.assertEquals("id_1", entity.getEntityId());
+    Assert.assertEquals("type_1", entity.getEntityType());
+    Assert.assertEquals(123l, entity.getStartTime().longValue());
+    Assert.assertEquals(2, entity.getEvents().size());
+    Assert.assertEquals(2, entity.getPrimaryFilters().size());
+    Assert.assertEquals(4, entity.getOtherInfo().size());
+  }
+
+  @Test
+  public void testGetEvents() throws Exception {
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("apptimeline")
+        .path("type_1").path("events")
+        .queryParam("entityId", "id_1")
+        .accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    ATSEvents events = response.getEntity(ATSEvents.class);
+    Assert.assertNotNull(events);
+    Assert.assertEquals(1, events.getAllEvents().size());
+    ATSEvents.ATSEventsOfOneEntity partEvents = events.getAllEvents().get(0);
+    Assert.assertEquals(2, partEvents.getEvents().size());
+    ATSEvent event1 = partEvents.getEvents().get(0);
+    Assert.assertEquals(456l, event1.getTimestamp());
+    Assert.assertEquals("end_event", event1.getEventType());
+    Assert.assertEquals(1, event1.getEventInfo().size());
+    ATSEvent event2 = partEvents.getEvents().get(1);
+    Assert.assertEquals(123l, event2.getTimestamp());
+    Assert.assertEquals("start_event", event2.getEventType());
+    Assert.assertEquals(0, event2.getEventInfo().size());
+  }
+
+  @Test
+  public void testPostEntities() throws Exception {
+    ATSEntities entities = new ATSEntities();
+    ATSEntity entity = new ATSEntity();
+    entity.setEntityId("test id");
+    entity.setEntityType("test type");
+    entity.setStartTime(System.currentTimeMillis());
+    entities.addEntity(entity);
+    WebResource r = resource();
+    ClientResponse response = r.path("ws").path("v1").path("apptimeline")
+        .accept(MediaType.APPLICATION_JSON)
+        .type(MediaType.APPLICATION_JSON)
+        .post(ClientResponse.class, entities);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    ATSPutErrors errors = response.getEntity(ATSPutErrors.class);
+    Assert.assertNotNull(errors);
+    Assert.assertEquals(0, errors.getErrors().size());
+    // verify the entity exists in the store
+    response = r.path("ws").path("v1").path("apptimeline")
+        .path("test type").path("test id")
+        .accept(MediaType.APPLICATION_JSON)
+        .get(ClientResponse.class);
+    assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+    entity = response.getEntity(ATSEntity.class);
+    Assert.assertNotNull(entity);
+    Assert.assertEquals("test id", entity.getEntityId());
+    Assert.assertEquals("test type", entity.getEntityType());
+  }
+
+}

+ 3 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java

@@ -26,6 +26,7 @@ import java.nio.ByteBuffer;
 import java.util.Comparator;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.io.Text;
@@ -312,7 +313,7 @@ public class BuilderUtils {
       String url, long startTime, long finishTime,
       FinalApplicationStatus finalStatus,
       ApplicationResourceUsageReport appResources, String origTrackingUrl,
-      float progress, String appType, Token amRmToken) {
+      float progress, String appType, Token amRmToken, Set<String> tags) {
     ApplicationReport report = recordFactory
         .newRecordInstance(ApplicationReport.class);
     report.setApplicationId(applicationId);
@@ -334,6 +335,7 @@ public class BuilderUtils {
     report.setProgress(progress);
     report.setApplicationType(appType);
     report.setAMRMToken(amRmToken);
+    report.setApplicationTags(tags);
     return report;
   }
   

+ 86 - 35
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/AdminService.java

@@ -45,8 +45,11 @@ import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.service.CompositeService;
+import org.apache.hadoop.yarn.LocalConfigurationProvider;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.ResourceOption;
+import org.apache.hadoop.yarn.conf.ConfigurationProvider;
+import org.apache.hadoop.yarn.conf.ConfigurationProviderFactory;
 import org.apache.hadoop.yarn.conf.HAUtil;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -72,6 +75,7 @@ import org.apache.hadoop.yarn.server.api.protocolrecords.UpdateNodeResourceRespo
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.BlockingService;
 
 public class AdminService extends CompositeService implements
@@ -89,6 +93,8 @@ public class AdminService extends CompositeService implements
   private InetSocketAddress masterServiceAddress;
   private AccessControlList adminAcl;
   
+  private ConfigurationProvider configurationProvider = null;
+
   private final RecordFactory recordFactory = 
     RecordFactoryProvider.getRecordFactory(null);
 
@@ -109,6 +115,10 @@ public class AdminService extends CompositeService implements
       }
     }
 
+    this.configurationProvider =
+        ConfigurationProviderFactory.getConfigurationProvider(conf);
+    configurationProvider.init(conf);
+
     masterServiceAddress = conf.getSocketAddr(
         YarnConfiguration.RM_ADMIN_ADDRESS,
         YarnConfiguration.DEFAULT_RM_ADMIN_ADDRESS,
@@ -129,6 +139,9 @@ public class AdminService extends CompositeService implements
   @Override
   protected synchronized void serviceStop() throws Exception {
     stopServer();
+    if (this.configurationProvider != null) {
+      configurationProvider.close();
+    }
     super.serviceStop();
   }
 
@@ -295,23 +308,28 @@ public class AdminService extends CompositeService implements
   @Override
   public RefreshQueuesResponse refreshQueues(RefreshQueuesRequest request)
       throws YarnException, StandbyException {
-    UserGroupInformation user = checkAcls("refreshQueues");
+    String argName = "refreshQueues";
+    UserGroupInformation user = checkAcls(argName);
 
     if (!isRMActive()) {
-      RMAuditLogger.logFailure(user.getShortUserName(), "refreshQueues",
+      RMAuditLogger.logFailure(user.getShortUserName(), argName,
           adminAcl.toString(), "AdminService",
           "ResourceManager is not active. Can not refresh queues.");
       throwStandbyException();
     }
 
+    RefreshQueuesResponse response =
+        recordFactory.newRecordInstance(RefreshQueuesResponse.class);
     try {
-      rmContext.getScheduler().reinitialize(getConfig(), this.rmContext);
-      RMAuditLogger.logSuccess(user.getShortUserName(), "refreshQueues", 
+      Configuration conf =
+          getConfiguration(YarnConfiguration.CS_CONFIGURATION_FILE);
+      rmContext.getScheduler().reinitialize(conf, this.rmContext);
+      RMAuditLogger.logSuccess(user.getShortUserName(), argName,
           "AdminService");
-      return recordFactory.newRecordInstance(RefreshQueuesResponse.class);
+      return response;
     } catch (IOException ioe) {
       LOG.info("Exception refreshing queues ", ioe);
-      RMAuditLogger.logFailure(user.getShortUserName(), "refreshQueues",
+      RMAuditLogger.logFailure(user.getShortUserName(), argName,
           adminAcl.toString(), "AdminService",
           "Exception refreshing queues");
       throw RPCUtil.getRemoteException(ioe);
@@ -346,21 +364,22 @@ public class AdminService extends CompositeService implements
   @Override
   public RefreshSuperUserGroupsConfigurationResponse refreshSuperUserGroupsConfiguration(
       RefreshSuperUserGroupsConfigurationRequest request)
-      throws YarnException, StandbyException {
-    UserGroupInformation user = checkAcls("refreshSuperUserGroupsConfiguration");
+      throws YarnException, IOException {
+    String argName = "refreshSuperUserGroupsConfiguration";
+    UserGroupInformation user = checkAcls(argName);
 
-    // TODO (YARN-1459): Revisit handling super-user-groups on Standby RM
     if (!isRMActive()) {
-      RMAuditLogger.logFailure(user.getShortUserName(),
-          "refreshSuperUserGroupsConfiguration",
+      RMAuditLogger.logFailure(user.getShortUserName(), argName,
           adminAcl.toString(), "AdminService",
           "ResourceManager is not active. Can not refresh super-user-groups.");
       throwStandbyException();
     }
 
-    ProxyUsers.refreshSuperUserGroupsConfiguration(new Configuration());
+    Configuration conf =
+        getConfiguration(YarnConfiguration.CORE_SITE_CONFIGURATION_FILE);
+    ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
     RMAuditLogger.logSuccess(user.getShortUserName(),
-        "refreshSuperUserGroupsConfiguration", "AdminService");
+        argName, "AdminService");
     
     return recordFactory.newRecordInstance(
         RefreshSuperUserGroupsConfigurationResponse.class);
@@ -391,14 +410,22 @@ public class AdminService extends CompositeService implements
 
   @Override
   public RefreshAdminAclsResponse refreshAdminAcls(
-      RefreshAdminAclsRequest request) throws YarnException {
-    UserGroupInformation user = checkAcls("refreshAdminAcls");
+      RefreshAdminAclsRequest request) throws YarnException, IOException {
+    String argName = "refreshAdminAcls";
+    UserGroupInformation user = checkAcls(argName);
     
-    Configuration conf = new Configuration();
+    if (!isRMActive()) {
+      RMAuditLogger.logFailure(user.getShortUserName(), argName,
+          adminAcl.toString(), "AdminService",
+          "ResourceManager is not active. Can not refresh user-groups.");
+      throwStandbyException();
+    }
+    Configuration conf =
+        getConfiguration(YarnConfiguration.YARN_SITE_XML_FILE);
     adminAcl = new AccessControlList(conf.get(
         YarnConfiguration.YARN_ADMIN_ACL,
         YarnConfiguration.DEFAULT_YARN_ADMIN_ACL));
-    RMAuditLogger.logSuccess(user.getShortUserName(), "refreshAdminAcls", 
+    RMAuditLogger.logSuccess(user.getShortUserName(), argName,
         "AdminService");
 
     return recordFactory.newRecordInstance(RefreshAdminAclsResponse.class);
@@ -406,9 +433,8 @@ public class AdminService extends CompositeService implements
 
   @Override
   public RefreshServiceAclsResponse refreshServiceAcls(
-      RefreshServiceAclsRequest request) throws YarnException {
-    Configuration conf = new Configuration();
-    if (!conf.getBoolean(
+      RefreshServiceAclsRequest request) throws YarnException, IOException {
+    if (!getConfig().getBoolean(
              CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, 
              false)) {
       throw RPCUtil.getRemoteException(
@@ -416,27 +442,38 @@ public class AdminService extends CompositeService implements
               CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION + 
               ") not enabled."));
     }
-    
+
+    String argName = "refreshServiceAcls";
+    if (!isRMActive()) {
+      RMAuditLogger.logFailure(UserGroupInformation.getCurrentUser()
+          .getShortUserName(), argName,
+          adminAcl.toString(), "AdminService",
+          "ResourceManager is not active. Can not refresh Service ACLs.");
+      throwStandbyException();
+    }
+
     PolicyProvider policyProvider = new RMPolicyProvider(); 
-    
+    Configuration conf =
+        getConfiguration(YarnConfiguration.HADOOP_POLICY_CONFIGURATION_FILE);
+
     refreshServiceAcls(conf, policyProvider);
-    if (isRMActive()) {
-      rmContext.getClientRMService().refreshServiceAcls(conf, policyProvider);
-      rmContext.getApplicationMasterService().refreshServiceAcls(
-          conf, policyProvider);
-      rmContext.getResourceTrackerService().refreshServiceAcls(
-          conf, policyProvider);
-    } else {
-      LOG.warn("ResourceManager is not active. Not refreshing ACLs for " +
-          "Clients, ApplicationMasters and NodeManagers");
-    }
+    rmContext.getClientRMService().refreshServiceAcls(conf, policyProvider);
+    rmContext.getApplicationMasterService().refreshServiceAcls(
+        conf, policyProvider);
+    rmContext.getResourceTrackerService().refreshServiceAcls(
+        conf, policyProvider);
     
     return recordFactory.newRecordInstance(RefreshServiceAclsResponse.class);
   }
 
-  void refreshServiceAcls(Configuration configuration, 
+  synchronized void refreshServiceAcls(Configuration configuration,
       PolicyProvider policyProvider) {
-    this.server.refreshServiceAcl(configuration, policyProvider);
+    if (this.configurationProvider instanceof LocalConfigurationProvider) {
+      this.server.refreshServiceAcl(configuration, policyProvider);
+    } else {
+      this.server.refreshServiceAclWithConfigration(configuration,
+          policyProvider);
+    }
   }
 
   @Override
@@ -483,5 +520,19 @@ public class AdminService extends CompositeService implements
           UpdateNodeResourceResponse.class);
       return response;
   }
-  
+
+  private synchronized Configuration getConfiguration(String confFileName)
+      throws YarnException, IOException {
+    return this.configurationProvider.getConfiguration(confFileName);
+  }
+
+  @VisibleForTesting
+  public AccessControlList getAccessControlList() {
+    return this.adminAcl;
+  }
+
+  @VisibleForTesting
+  public Server getServer() {
+    return this.server;
+  }
 }

+ 24 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ApplicationMasterService.java

@@ -39,6 +39,7 @@ import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.service.AbstractService;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.LocalConfigurationProvider;
 import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
@@ -86,6 +87,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 
+import com.google.common.annotations.VisibleForTesting;
+
 @SuppressWarnings("unchecked")
 @Private
 public class ApplicationMasterService extends AbstractService implements
@@ -102,6 +105,7 @@ public class ApplicationMasterService extends AbstractService implements
   private final AllocateResponse resync =
       recordFactory.newRecordInstance(AllocateResponse.class);
   private final RMContext rmContext;
+  private boolean useLocalConfigurationProvider;
 
   public ApplicationMasterService(RMContext rmContext, YarnScheduler scheduler) {
     super(ApplicationMasterService.class.getName());
@@ -111,6 +115,15 @@ public class ApplicationMasterService extends AbstractService implements
     this.rmContext = rmContext;
   }
 
+  @Override
+  protected void serviceInit(Configuration conf) throws Exception {
+    this.useLocalConfigurationProvider =
+        (LocalConfigurationProvider.class.isAssignableFrom(conf.getClass(
+            YarnConfiguration.RM_CONFIGURATION_PROVIDER_CLASS,
+            LocalConfigurationProvider.class)));
+    super.serviceInit(conf);
+  }
+
   @Override
   protected void serviceStart() throws Exception {
     Configuration conf = getConfig();
@@ -578,7 +591,12 @@ public class ApplicationMasterService extends AbstractService implements
 
   public void refreshServiceAcls(Configuration configuration, 
       PolicyProvider policyProvider) {
-    this.server.refreshServiceAcl(configuration, policyProvider);
+    if (this.useLocalConfigurationProvider) {
+      this.server.refreshServiceAcl(configuration, policyProvider);
+    } else {
+      this.server.refreshServiceAclWithConfigration(configuration,
+          policyProvider);
+    }
   }
   
   @Override
@@ -604,4 +622,9 @@ public class ApplicationMasterService extends AbstractService implements
       this.response = response;
     }
   }
+
+  @VisibleForTesting
+  public Server getServer() {
+    return this.server;
+  }
 }

Some files were not shown because too many files changed in this diff