Browse Source

Merge branch 'trunk' into HDFS-7240

Anu Engineer 7 years ago
parent
commit
91975886e3
100 changed files with 1660 additions and 461 deletions
  1. 3 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
  2. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
  3. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
  4. 0 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java
  5. 1 1
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java
  6. 1 2
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java
  7. 2 2
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java
  8. 2 2
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java
  9. 3 3
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
  10. 1 1
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java
  11. 1 1
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java
  12. 15 2
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html
  13. 4 1
      hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js
  14. 5 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  15. 55 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
  16. 50 15
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java
  17. 23 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java
  18. 4 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
  19. 4 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
  20. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java
  21. 33 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
  22. 5 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/BlockReaderTestUtil.java
  23. 200 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeRespectsBindHostKeys.java
  24. 104 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java
  25. 79 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
  26. 6 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java
  27. 6 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java
  28. 0 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
  29. 0 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
  30. 0 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java
  31. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
  32. 2 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
  33. 2 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java
  34. 1 1
      hadoop-project/pom.xml
  35. 1 0
      hadoop-project/src/site/site.xml
  36. 30 0
      hadoop-tools/hadoop-aliyun/src/site/resources/css/site.css
  37. 30 0
      hadoop-tools/hadoop-aws/src/site/resources/css/site.css
  38. 30 0
      hadoop-tools/hadoop-azure-datalake/src/site/resources/css/site.css
  39. 30 0
      hadoop-tools/hadoop-azure/src/site/resources/css/site.css
  40. 1 0
      hadoop-tools/hadoop-sls/pom.xml
  41. 25 10
      hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java
  42. 19 5
      hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java
  43. 25 0
      hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/utils/TestSLSUtils.java
  44. 19 0
      hadoop-tools/hadoop-sls/src/test/resources/nodes-with-resources.json
  45. 1 1
      hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreTables.sql
  46. 41 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java
  47. 1 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto
  48. 16 6
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java
  49. 6 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml
  50. 44 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java
  51. 7 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSystemServiceManagerImpl.java
  52. 16 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/bad/bad.yarnfile
  53. 0 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app1.yarnfile
  54. 0 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app2.yarnfile
  55. 0 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app3.json
  56. 0 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user2/example-app1.yarnfile
  57. 0 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user2/example-app2.yarnfile
  58. 5 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ContainerFailureTracker.java
  59. 10 8
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java
  60. 3 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
  61. 1 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ReadinessCheck.java
  62. 24 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Service.java
  63. 3 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ClientAMProxy.java
  64. 33 23
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
  65. 13 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java
  66. 20 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
  67. 27 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java
  68. 6 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/ServiceMonitor.java
  69. 99 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/DefaultProbe.java
  70. 21 16
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/HttpProbe.java
  71. 12 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorKeys.java
  72. 7 7
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorUtils.java
  73. 12 12
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/PortProbe.java
  74. 13 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/Probe.java
  75. 11 6
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java
  76. 1 16
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java
  77. 21 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java
  78. 60 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceRegistryUtils.java
  79. 25 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java
  80. 50 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java
  81. 0 156
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestSystemServiceManager.java
  82. 155 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/monitor/probe/TestDefaultProbe.java
  83. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java
  84. 4 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java
  85. 4 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java
  86. 21 21
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
  87. 11 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java
  88. 11 6
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java
  89. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicatonReport.java
  90. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java
  91. 2 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java
  92. 3 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java
  93. 4 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java
  94. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java
  95. 6 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java
  96. 0 30
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java
  97. 0 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
  98. 0 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java
  99. 0 19
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
  100. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java

+ 3 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java

@@ -525,6 +525,9 @@ public class GenericOptionsParser {
     }
     }
     List<String> newArgs = new ArrayList<String>(args.length);
     List<String> newArgs = new ArrayList<String>(args.length);
     for (int i=0; i < args.length; i++) {
     for (int i=0; i < args.length; i++) {
+      if (args[i] == null) {
+        continue;
+      }
       String prop = null;
       String prop = null;
       if (args[i].equals("-D")) {
       if (args[i].equals("-D")) {
         newArgs.add(args[i]);
         newArgs.add(args[i]);

+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java


+ 0 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/TestDFSPacket.java


+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/FederationMetrics.java

@@ -456,7 +456,7 @@ public class FederationMetrics implements FederationMBean {
         dev = (float) Math.sqrt(dev / usages.length);
         dev = (float) Math.sqrt(dev / usages.length);
       }
       }
     } catch (IOException e) {
     } catch (IOException e) {
-      LOG.info("Cannot get the live nodes: {}", e.getMessage());
+      LOG.error("Cannot get the live nodes: {}", e.getMessage());
     }
     }
 
 
     final Map<String, Object> innerInfo = new HashMap<>();
     final Map<String, Object> innerInfo = new HashMap<>();

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/LocalResolver.java

@@ -210,8 +210,7 @@ public class LocalResolver extends RouterResolver<String, String> {
         }
         }
       }
       }
     } catch (IOException ioe) {
     } catch (IOException ioe) {
-      LOG.error("Cannot get Namenodes from the State Store: {}",
-          ioe.getMessage());
+      LOG.error("Cannot get Namenodes from the State Store", ioe);
     }
     }
     return ret;
     return ret;
   }
   }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterHeartbeatService.java

@@ -100,7 +100,7 @@ public class RouterHeartbeatService extends PeriodicService {
           LOG.debug("Router heartbeat for router {}", routerId);
           LOG.debug("Router heartbeat for router {}", routerId);
         }
         }
       } catch (IOException e) {
       } catch (IOException e) {
-        LOG.error("Cannot heartbeat router {}: {}", routerId, e.getMessage());
+        LOG.error("Cannot heartbeat router {}", routerId, e);
       }
       }
     } else {
     } else {
       LOG.warn("Cannot heartbeat router {}: State Store unavailable", routerId);
       LOG.warn("Cannot heartbeat router {}: State Store unavailable", routerId);
@@ -132,7 +132,7 @@ public class RouterHeartbeatService extends PeriodicService {
         }
         }
       }
       }
     } catch (Exception e) {
     } catch (Exception e) {
-      LOG.error("Cannot get version for {}: {}", clazz, e.getMessage());
+      LOG.error("Cannot get version for {}", clazz, e);
     }
     }
     return version;
     return version;
   }
   }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java

@@ -1067,8 +1067,8 @@ public class RouterRpcClient {
           results.put(location, clazz.cast(result));
           results.put(location, clazz.cast(result));
         } catch (CancellationException ce) {
         } catch (CancellationException ce) {
           T loc = orderedLocations.get(i);
           T loc = orderedLocations.get(i);
-          String msg =
-              "Invocation to \"" + loc + "\" for \"" + method + "\" timed out";
+          String msg = "Invocation to \"" + loc + "\" for \""
+              + method.getMethodName() + "\" timed out";
           LOG.error(msg);
           LOG.error(msg);
           IOException ioe = new SubClusterTimeoutException(msg);
           IOException ioe = new SubClusterTimeoutException(msg);
           exceptions.put(location, ioe);
           exceptions.put(location, ioe);

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java

@@ -2293,7 +2293,7 @@ public class RouterRpcServer extends AbstractService
           return entry.isAll();
           return entry.isAll();
         }
         }
       } catch (IOException e) {
       } catch (IOException e) {
-        LOG.error("Cannot get mount point: {}", e.getMessage());
+        LOG.error("Cannot get mount point", e);
       }
       }
     }
     }
     return false;
     return false;
@@ -2314,7 +2314,7 @@ public class RouterRpcServer extends AbstractService
           return true;
           return true;
         }
         }
       } catch (IOException e) {
       } catch (IOException e) {
-        LOG.error("Cannot get mount point: {}", e.getMessage());
+        LOG.error("Cannot get mount point", e);
       }
       }
     }
     }
     return false;
     return false;
@@ -2343,7 +2343,7 @@ public class RouterRpcServer extends AbstractService
           ret.put(child, entry.getDateModified());
           ret.put(child, entry.getDateModified());
         }
         }
       } catch (IOException e) {
       } catch (IOException e) {
-        LOG.error("Cannot get mount point: {}", e.getMessage());
+        LOG.error("Cannot get mount point", e);
       }
       }
     }
     }
     return ret;
     return ret;

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreService.java

@@ -183,7 +183,7 @@ public class StateStoreService extends CompositeService {
     } catch (NotCompliantMBeanException e) {
     } catch (NotCompliantMBeanException e) {
       throw new RuntimeException("Bad StateStoreMBean setup", e);
       throw new RuntimeException("Bad StateStoreMBean setup", e);
     } catch (MetricsException e) {
     } catch (MetricsException e) {
-      LOG.info("Failed to register State Store bean {}", e.getMessage());
+      LOG.error("Failed to register State Store bean {}", e.getMessage());
     }
     }
 
 
     super.serviceInit(this.conf);
     super.serviceInit(this.conf);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreFileBaseImpl.java

@@ -361,7 +361,7 @@ public abstract class StateStoreFileBaseImpl
           try {
           try {
             writer.close();
             writer.close();
           } catch (IOException e) {
           } catch (IOException e) {
-            LOG.error("Cannot close the writer for {}", recordPathTemp);
+            LOG.error("Cannot close the writer for {}", recordPathTemp, e);
           }
           }
         }
         }
       }
       }

+ 15 - 2
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.html

@@ -90,6 +90,21 @@
 
 
 <div class="page-header"><h1>Summary</h1></div>
 <div class="page-header"><h1>Summary</h1></div>
 {#federation}
 {#federation}
+<p>
+  Security is {#routerstat}{#SecurityEnabled}on{:else}off{/SecurityEnabled}{/routerstat}.</p>
+<p>{#router}{#Safemode}{.}{:else}Safemode is off.{/Safemode}{/router}</p>
+
+<p>
+  {NumFiles|fmt_number} files and directories, {NumBlocks|fmt_number} blocks.
+</p>
+
+{#mem.HeapMemoryUsage}
+<p>Heap Memory used {used|fmt_bytes} of {committed|fmt_bytes} Heap Memory. Max Heap Memory is {@eq key=max value="-1" type="number"}&ltunbounded&gt{:else}{max|fmt_bytes}{/eq}.</p>
+{/mem.HeapMemoryUsage}
+
+{#mem.NonHeapMemoryUsage}
+<p>Non Heap Memory used {used|fmt_bytes} of {committed|fmt_bytes} Commited Non Heap Memory. Max Non Heap Memory is {@eq key=max value="-1" type="number"}&ltunbounded&gt{:else}{max|fmt_bytes}{/eq}.</p>
+{/mem.NonHeapMemoryUsage}
 <table class="table table-bordered table-striped">
 <table class="table table-bordered table-striped">
   <tr><th>Total capacity</th><td>{TotalCapacity|fmt_bytes}</td></tr>
   <tr><th>Total capacity</th><td>{TotalCapacity|fmt_bytes}</td></tr>
   <tr><th>Used capacity</th><td>{UsedCapacity|fmt_bytes}</td></tr>
   <tr><th>Used capacity</th><td>{UsedCapacity|fmt_bytes}</td></tr>
@@ -103,8 +118,6 @@
   <tr><th><a href="#tab-datanode">Live Nodes</a></th><td>{NumLiveNodes} (Decommissioned: {NumDecomLiveNodes})</td></tr>
   <tr><th><a href="#tab-datanode">Live Nodes</a></th><td>{NumLiveNodes} (Decommissioned: {NumDecomLiveNodes})</td></tr>
   <tr><th><a href="#tab-datanode">Dead Nodes</a></th><td>{NumDeadNodes} (Decommissioned: {NumDecomDeadNodes})</td></tr>
   <tr><th><a href="#tab-datanode">Dead Nodes</a></th><td>{NumDeadNodes} (Decommissioned: {NumDecomDeadNodes})</td></tr>
   <tr><th><a href="#tab-datanode">Decommissioning Nodes</a></th><td>{NumDecommissioningNodes}</td></tr>
   <tr><th><a href="#tab-datanode">Decommissioning Nodes</a></th><td>{NumDecommissioningNodes}</td></tr>
-  <tr><th>Files</th><td>{NumFiles}</td></tr>
-  <tr><th>Blocks</th><td>{NumBlocks}</td></tr>
   <tr><th title="Excludes missing blocks.">Number of Under-Replicated Blocks</th><td>{NumOfBlocksUnderReplicated}</td></tr>
   <tr><th title="Excludes missing blocks.">Number of Under-Replicated Blocks</th><td>{NumOfBlocksUnderReplicated}</td></tr>
   <tr><th>Number of Blocks Pending Deletion</th><td>{NumOfBlocksPendingDeletion}</td></tr>
   <tr><th>Number of Blocks Pending Deletion</th><td>{NumOfBlocksPendingDeletion}</td></tr>
 </table>
 </table>

+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/webapps/router/federationhealth.js

@@ -33,7 +33,10 @@
 
 
   function load_overview() {
   function load_overview() {
     var BEANS = [
     var BEANS = [
-      {"name": "federation",      "url": "/jmx?qry=Hadoop:service=Router,name=FederationState"}
+      {"name": "federation",  "url": "/jmx?qry=Hadoop:service=Router,name=FederationState"},
+      {"name": "routerstat",  "url": "/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus"},
+      {"name": "router",      "url": "/jmx?qrt=Hadoop:service=NameNode,name=NameNodeInfo"},
+      {"name": "mem",         "url": "/jmx?qry=java.lang:type=Memory"}
     ];
     ];
 
 
     var HELPERS = {
     var HELPERS = {

+ 5 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -977,15 +977,19 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_JOURNALNODE_EDITS_DIR_DEFAULT = "/tmp/hadoop/dfs/journalnode/";
   public static final String  DFS_JOURNALNODE_EDITS_DIR_DEFAULT = "/tmp/hadoop/dfs/journalnode/";
   public static final String  DFS_JOURNALNODE_RPC_ADDRESS_KEY = "dfs.journalnode.rpc-address";
   public static final String  DFS_JOURNALNODE_RPC_ADDRESS_KEY = "dfs.journalnode.rpc-address";
   public static final int     DFS_JOURNALNODE_RPC_PORT_DEFAULT = 8485;
   public static final int     DFS_JOURNALNODE_RPC_PORT_DEFAULT = 8485;
+  public static final String  DFS_JOURNALNODE_RPC_BIND_HOST_KEY = "dfs.journalnode.rpc-bind-host";
   public static final String  DFS_JOURNALNODE_RPC_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_JOURNALNODE_RPC_PORT_DEFAULT;
   public static final String  DFS_JOURNALNODE_RPC_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_JOURNALNODE_RPC_PORT_DEFAULT;
-    
+
   public static final String  DFS_JOURNALNODE_HTTP_ADDRESS_KEY = "dfs.journalnode.http-address";
   public static final String  DFS_JOURNALNODE_HTTP_ADDRESS_KEY = "dfs.journalnode.http-address";
   public static final int     DFS_JOURNALNODE_HTTP_PORT_DEFAULT = 8480;
   public static final int     DFS_JOURNALNODE_HTTP_PORT_DEFAULT = 8480;
+  public static final String  DFS_JOURNALNODE_HTTP_BIND_HOST_KEY = "dfs.journalnode.http-bind-host";
   public static final String  DFS_JOURNALNODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_JOURNALNODE_HTTP_PORT_DEFAULT;
   public static final String  DFS_JOURNALNODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_JOURNALNODE_HTTP_PORT_DEFAULT;
   public static final String  DFS_JOURNALNODE_HTTPS_ADDRESS_KEY = "dfs.journalnode.https-address";
   public static final String  DFS_JOURNALNODE_HTTPS_ADDRESS_KEY = "dfs.journalnode.https-address";
   public static final int     DFS_JOURNALNODE_HTTPS_PORT_DEFAULT = 8481;
   public static final int     DFS_JOURNALNODE_HTTPS_PORT_DEFAULT = 8481;
+  public static final String  DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY = "dfs.journalnode.https-bind-host";
   public static final String  DFS_JOURNALNODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_JOURNALNODE_HTTPS_PORT_DEFAULT;
   public static final String  DFS_JOURNALNODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_JOURNALNODE_HTTPS_PORT_DEFAULT;
 
 
+
   public static final String  DFS_JOURNALNODE_KEYTAB_FILE_KEY = "dfs.journalnode.keytab.file";
   public static final String  DFS_JOURNALNODE_KEYTAB_FILE_KEY = "dfs.journalnode.keytab.file";
   public static final String  DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY = "dfs.journalnode.kerberos.principal";
   public static final String  DFS_JOURNALNODE_KERBEROS_PRINCIPAL_KEY = "dfs.journalnode.kerberos.principal";
   public static final String  DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY = "dfs.journalnode.kerberos.internal.spnego.principal";
   public static final String  DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY = "dfs.journalnode.kerberos.internal.spnego.principal";

+ 55 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java

@@ -36,9 +36,12 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.metrics2.util.MBeans;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.tracing.TraceUtils;
 import org.apache.hadoop.tracing.TraceUtils;
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.DiskChecker;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_HTTP_BIND_HOST_KEY;
 import static org.apache.hadoop.util.ExitUtil.terminate;
 import static org.apache.hadoop.util.ExitUtil.terminate;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.Tool;
@@ -226,7 +229,8 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
 
 
       registerJNMXBean();
       registerJNMXBean();
 
 
-      httpServer = new JournalNodeHttpServer(conf, this);
+      httpServer = new JournalNodeHttpServer(conf, this,
+          getHttpServerBindAddress(conf));
       httpServer.start();
       httpServer.start();
 
 
       httpServerURI = httpServer.getServerURI().toString();
       httpServerURI = httpServer.getServerURI().toString();
@@ -251,11 +255,6 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
   public InetSocketAddress getBoundIpcAddress() {
   public InetSocketAddress getBoundIpcAddress() {
     return rpcServer.getAddress();
     return rpcServer.getAddress();
   }
   }
-  
-  @Deprecated
-  public InetSocketAddress getBoundHttpAddress() {
-    return httpServer.getAddress();
-  }
 
 
   public String getHttpServerURI() {
   public String getHttpServerURI() {
     return httpServerURI;
     return httpServerURI;
@@ -400,7 +399,7 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
   private void registerJNMXBean() {
   private void registerJNMXBean() {
     journalNodeInfoBeanName = MBeans.register("JournalNode", "JournalNodeInfo", this);
     journalNodeInfoBeanName = MBeans.register("JournalNode", "JournalNodeInfo", this);
   }
   }
-  
+
   private class ErrorReporter implements StorageErrorReporter {
   private class ErrorReporter implements StorageErrorReporter {
     @Override
     @Override
     public void reportErrorOnFile(File f) {
     public void reportErrorOnFile(File f) {
@@ -464,4 +463,53 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
     return journalsById.get(jid);
     return journalsById.get(jid);
   }
   }
 
 
+  public static InetSocketAddress getHttpAddress(Configuration conf) {
+    String addr = conf.get(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY,
+        DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_DEFAULT);
+    return NetUtils.createSocketAddr(addr,
+        DFSConfigKeys.DFS_JOURNALNODE_HTTP_PORT_DEFAULT,
+        DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY);
+  }
+
+  protected InetSocketAddress getHttpServerBindAddress(
+      Configuration configuration) {
+    InetSocketAddress bindAddress = getHttpAddress(configuration);
+
+    // If DFS_JOURNALNODE_HTTP_BIND_HOST_KEY exists then it overrides the
+    // host name portion of DFS_JOURNALNODE_HTTP_ADDRESS_KEY.
+    final String bindHost = configuration.getTrimmed(
+        DFS_JOURNALNODE_HTTP_BIND_HOST_KEY);
+    if (bindHost != null && !bindHost.isEmpty()) {
+      bindAddress = new InetSocketAddress(bindHost, bindAddress.getPort());
+    }
+
+    return bindAddress;
+  }
+
+  @VisibleForTesting
+  public JournalNodeRpcServer getRpcServer() {
+    return rpcServer;
+  }
+
+
+  /**
+   * @return the actual JournalNode HTTP/HTTPS address.
+   */
+  public InetSocketAddress getBoundHttpAddress() {
+    return httpServer.getAddress();
+  }
+
+  /**
+   * @return JournalNode HTTP address
+   */
+  public InetSocketAddress getHttpAddress() {
+    return httpServer.getHttpAddress();
+  }
+
+  /**
+   * @return JournalNode HTTPS address
+   */
+  public InetSocketAddress getHttpsAddress() {
+    return httpServer.getHttpsAddress();
+  }
 }
 }

+ 50 - 15
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java

@@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
+import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 
 
@@ -41,21 +42,37 @@ public class JournalNodeHttpServer {
   private HttpServer2 httpServer;
   private HttpServer2 httpServer;
   private final JournalNode localJournalNode;
   private final JournalNode localJournalNode;
 
 
+  private InetSocketAddress httpAddress;
+  private InetSocketAddress httpsAddress;
+  private final InetSocketAddress bindAddress;
+
   private final Configuration conf;
   private final Configuration conf;
 
 
-  JournalNodeHttpServer(Configuration conf, JournalNode jn) {
+  JournalNodeHttpServer(Configuration conf, JournalNode jn,
+      InetSocketAddress bindAddress) {
     this.conf = conf;
     this.conf = conf;
     this.localJournalNode = jn;
     this.localJournalNode = jn;
+    this.bindAddress = bindAddress;
   }
   }
 
 
   void start() throws IOException {
   void start() throws IOException {
-    final InetSocketAddress httpAddr = getAddress(conf);
+    final InetSocketAddress httpAddr = bindAddress;
 
 
     final String httpsAddrString = conf.get(
     final String httpsAddrString = conf.get(
         DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY,
         DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY,
         DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_DEFAULT);
         DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_DEFAULT);
     InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
     InetSocketAddress httpsAddr = NetUtils.createSocketAddr(httpsAddrString);
 
 
+    if (httpsAddr != null) {
+      // If DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY exists then it overrides the
+      // host name portion of DFS_NAMENODE_HTTPS_ADDRESS_KEY.
+      final String bindHost =
+          conf.getTrimmed(DFSConfigKeys.DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY);
+      if (bindHost != null && !bindHost.isEmpty()) {
+        httpsAddr = new InetSocketAddress(bindHost, httpsAddr.getPort());
+      }
+    }
+
     HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
     HttpServer2.Builder builder = DFSUtil.httpServerTemplateForNNAndJN(conf,
         httpAddr, httpsAddr, "journal",
         httpAddr, httpsAddr, "journal",
         DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
         DFSConfigKeys.DFS_JOURNALNODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
@@ -67,6 +84,20 @@ public class JournalNodeHttpServer {
     httpServer.addInternalServlet("getJournal", "/getJournal",
     httpServer.addInternalServlet("getJournal", "/getJournal",
         GetJournalEditServlet.class, true);
         GetJournalEditServlet.class, true);
     httpServer.start();
     httpServer.start();
+
+    HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
+    int connIdx = 0;
+    if (policy.isHttpEnabled()) {
+      httpAddress = httpServer.getConnectorAddress(connIdx++);
+      conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY,
+          NetUtils.getHostPortString(httpAddress));
+    }
+
+    if (policy.isHttpsEnabled()) {
+      httpsAddress = httpServer.getConnectorAddress(connIdx);
+      conf.set(DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY,
+          NetUtils.getHostPortString(httpsAddress));
+    }
   }
   }
 
 
   void stop() throws IOException {
   void stop() throws IOException {
@@ -78,15 +109,27 @@ public class JournalNodeHttpServer {
       }
       }
     }
     }
   }
   }
+
+  /**
+   * Return the actual HTTP/HTTPS address bound to by the running server.
+   */
+  public InetSocketAddress getAddress() {
+    assert httpAddress != null || httpsAddress != null;
+    return httpAddress != null ? httpAddress : httpsAddress;
+  }
   
   
   /**
   /**
    * Return the actual address bound to by the running server.
    * Return the actual address bound to by the running server.
    */
    */
-  @Deprecated
-  public InetSocketAddress getAddress() {
-    InetSocketAddress addr = httpServer.getConnectorAddress(0);
-    assert addr.getPort() != 0;
-    return addr;
+  public InetSocketAddress getHttpAddress() {
+    return httpAddress;
+  }
+
+  /**
+   * Return the actual address bound to by the running server.
+   */
+  public InetSocketAddress getHttpsAddress() {
+    return httpsAddress;
   }
   }
 
 
   /**
   /**
@@ -101,14 +144,6 @@ public class JournalNodeHttpServer {
         + NetUtils.getHostPortString(addr));
         + NetUtils.getHostPortString(addr));
   }
   }
 
 
-  private static InetSocketAddress getAddress(Configuration conf) {
-    String addr = conf.get(DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY,
-        DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_DEFAULT);
-    return NetUtils.createSocketAddr(addr,
-        DFSConfigKeys.DFS_JOURNALNODE_HTTP_PORT_DEFAULT,
-        DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY);
-  }
-
   public static Journal getJournalFromContext(ServletContext context, String jid)
   public static Journal getJournalFromContext(ServletContext context, String jid)
       throws IOException {
       throws IOException {
     JournalNode jn = (JournalNode)context.getAttribute(JN_ATTRIBUTE_KEY);
     JournalNode jn = (JournalNode)context.getAttribute(JN_ATTRIBUTE_KEY);

+ 23 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.qjournal.server;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.BlockingService;
 import com.google.protobuf.BlockingService;
+import org.apache.commons.logging.Log;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -53,11 +54,14 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.net.URL;
 import java.net.URL;
 
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_RPC_BIND_HOST_KEY;
+
 
 
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @VisibleForTesting
 @VisibleForTesting
 public class JournalNodeRpcServer implements QJournalProtocol,
 public class JournalNodeRpcServer implements QJournalProtocol,
     InterQJournalProtocol {
     InterQJournalProtocol {
+  private static final Log LOG = JournalNode.LOG;
   private static final int HANDLER_COUNT = 5;
   private static final int HANDLER_COUNT = 5;
   private final JournalNode jn;
   private final JournalNode jn;
   private Server server;
   private Server server;
@@ -73,6 +77,12 @@ public class JournalNodeRpcServer implements QJournalProtocol,
         true);
         true);
     
     
     InetSocketAddress addr = getAddress(confCopy);
     InetSocketAddress addr = getAddress(confCopy);
+    String bindHost = conf.getTrimmed(DFS_JOURNALNODE_RPC_BIND_HOST_KEY, null);
+    if (bindHost == null) {
+      bindHost = addr.getHostName();
+    }
+    LOG.info("RPC server is binding to " + bindHost + ":" + addr.getPort());
+
     RPC.setProtocolEngine(confCopy, QJournalProtocolPB.class,
     RPC.setProtocolEngine(confCopy, QJournalProtocolPB.class,
         ProtobufRpcEngine.class);
         ProtobufRpcEngine.class);
     QJournalProtocolServerSideTranslatorPB translator =
     QJournalProtocolServerSideTranslatorPB translator =
@@ -81,13 +91,13 @@ public class JournalNodeRpcServer implements QJournalProtocol,
         .newReflectiveBlockingService(translator);
         .newReflectiveBlockingService(translator);
     
     
     this.server = new RPC.Builder(confCopy)
     this.server = new RPC.Builder(confCopy)
-      .setProtocol(QJournalProtocolPB.class)
-      .setInstance(service)
-      .setBindAddress(addr.getHostName())
-      .setPort(addr.getPort())
-      .setNumHandlers(HANDLER_COUNT)
-      .setVerbose(false)
-      .build();
+        .setProtocol(QJournalProtocolPB.class)
+        .setInstance(service)
+        .setBindAddress(bindHost)
+        .setPort(addr.getPort())
+        .setNumHandlers(HANDLER_COUNT)
+        .setVerbose(false)
+        .build();
 
 
 
 
     //Adding InterQJournalProtocolPB to server
     //Adding InterQJournalProtocolPB to server
@@ -298,4 +308,10 @@ public class JournalNodeRpcServer implements QJournalProtocol,
         .setFromURL(jn.getHttpServerURI())
         .setFromURL(jn.getHttpServerURI())
         .build();
         .build();
   }
   }
+
+  /** Allow access to the RPC server for testing. */
+  @VisibleForTesting
+  Server getRpcServer() {
+    return server;
+  }
 }
 }

+ 4 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

@@ -970,7 +970,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
    * @return newReplicaInfo
    * @return newReplicaInfo
    * @throws IOException
    * @throws IOException
    */
    */
-  private ReplicaInfo moveBlock(ExtendedBlock block, ReplicaInfo replicaInfo,
+  @VisibleForTesting
+  ReplicaInfo moveBlock(ExtendedBlock block, ReplicaInfo replicaInfo,
       FsVolumeReference volumeRef) throws IOException {
       FsVolumeReference volumeRef) throws IOException {
     ReplicaInfo newReplicaInfo = copyReplicaToVolume(block, replicaInfo,
     ReplicaInfo newReplicaInfo = copyReplicaToVolume(block, replicaInfo,
         volumeRef);
         volumeRef);
@@ -2302,10 +2303,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
    * the disk, update {@link ReplicaInfo} with the correct file</li>
    * the disk, update {@link ReplicaInfo} with the correct file</li>
    * </ul>
    * </ul>
    *
    *
-   * @param blockId Block that differs
-   * @param diskFile Block file on the disk
-   * @param diskMetaFile Metadata file from on the disk
-   * @param vol Volume of the block file
+   * @param bpid block pool ID
+   * @param scanInfo {@link ScanInfo} for a given block
    */
    */
   @Override
   @Override
   public void checkAndUpdate(String bpid, ScanInfo scanInfo)
   public void checkAndUpdate(String bpid, ScanInfo scanInfo)

+ 4 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java

@@ -231,12 +231,11 @@ class FsVolumeList {
   }
   }
 
 
   /**
   /**
-   * Calls {@link FsVolumeImpl#checkDirs()} on each volume.
-   * 
-   * Use {@link checkDirsLock} to allow only one instance of checkDirs() call.
+   * Updates the failed volume info in the volumeFailureInfos Map
+   * and calls {@link #removeVolume(FsVolumeImpl)} to remove the volume
+   * from the volume list for each of the failed volumes.
    *
    *
-   * @return list of all the failed volumes.
-   * @param failedVolumes
+   * @param failedVolumes set of volumes marked failed.
    */
    */
   void handleVolumeFailures(Set<FsVolumeSpi> failedVolumes) {
   void handleVolumeFailures(Set<FsVolumeSpi> failedVolumes) {
     try (AutoCloseableLock lock = checkDirsLock.acquire()) {
     try (AutoCloseableLock lock = checkDirsLock.acquire()) {

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java

@@ -385,8 +385,8 @@ class OfflineImageReconstructor {
           break;
           break;
         case XMLEvent.CHARACTERS:
         case XMLEvent.CHARACTERS:
           String val = XMLUtils.
           String val = XMLUtils.
-              unmangleXmlString(ev.asCharacters().getData(), true);
-          parent.setVal(val);
+              unmangleXmlString(ev.asCharacters().getData(), false);
+          parent.setVal(parent.getVal() + val);
           events.nextEvent();
           events.nextEvent();
           break;
           break;
         case XMLEvent.ATTRIBUTE:
         case XMLEvent.ATTRIBUTE:

+ 33 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

@@ -2167,6 +2167,17 @@
   </description>
   </description>
 </property>
 </property>
 
 
+<property>
+  <name>dfs.journalnode.rpc-bind-host</name>
+  <value></value>
+  <description>
+    The actual address the RPC server will bind to. If this optional address is
+    set, it overrides only the hostname portion of dfs.journalnode.rpc-address.
+    This is useful for making the JournalNode listen on all interfaces by
+    setting it to 0.0.0.0.
+  </description>
+</property>
+
 <property>
 <property>
   <name>dfs.journalnode.http-address</name>
   <name>dfs.journalnode.http-address</name>
   <value>0.0.0.0:8480</value>
   <value>0.0.0.0:8480</value>
@@ -2176,6 +2187,17 @@
   </description>
   </description>
 </property>
 </property>
 
 
+<property>
+  <name>dfs.journalnode.http-bind-host</name>
+  <value></value>
+  <description>
+    The actual address the HTTP server will bind to. If this optional address
+    is set, it overrides only the hostname portion of
+    dfs.journalnode.http-address. This is useful for making the JournalNode
+    HTTP server listen on allinterfaces by setting it to 0.0.0.0.
+  </description>
+</property>
+
 <property>
 <property>
   <name>dfs.journalnode.https-address</name>
   <name>dfs.journalnode.https-address</name>
   <value>0.0.0.0:8481</value>
   <value>0.0.0.0:8481</value>
@@ -2185,6 +2207,17 @@
   </description>
   </description>
 </property>
 </property>
 
 
+<property>
+  <name>dfs.journalnode.https-bind-host</name>
+  <value></value>
+  <description>
+    The actual address the HTTP server will bind to. If this optional address
+    is set, it overrides only the hostname portion of
+    dfs.journalnode.https-address. This is useful for making the JournalNode
+    HTTP server listen on all interfaces by setting it to 0.0.0.0.
+  </description>
+</property>
+
 <property>
 <property>
   <name>dfs.namenode.audit.loggers</name>
   <name>dfs.namenode.audit.loggers</name>
   <value>default</value>
   <value>default</value>

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/client/impl/BlockReaderTestUtil.java

@@ -87,6 +87,11 @@ public class BlockReaderTestUtil {
     this(replicationFactor, new HdfsConfiguration());
     this(replicationFactor, new HdfsConfiguration());
   }
   }
 
 
+  public BlockReaderTestUtil(MiniDFSCluster cluster, HdfsConfiguration conf) {
+    this.conf = conf;
+    this.cluster = cluster;
+  }
+
   public BlockReaderTestUtil(int replicationFactor, HdfsConfiguration config) throws Exception {
   public BlockReaderTestUtil(int replicationFactor, HdfsConfiguration config) throws Exception {
     this.conf = config;
     this.conf = config;
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replicationFactor);
     conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, replicationFactor);

+ 200 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeRespectsBindHostKeys.java

@@ -0,0 +1,200 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.qjournal.server;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTP_POLICY_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_HTTP_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_HTTPS_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_HTTP_BIND_HOST_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_JOURNALNODE_RPC_BIND_HOST_KEY;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertThat;
+import static org.hamcrest.core.Is.is;
+import static org.hamcrest.core.IsNot.not;
+
+import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.http.HttpConfig;
+import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+
+/**
+ * This test checks that the JournalNode respects the following keys.
+ *
+ *  - DFS_JOURNALNODE_RPC_BIND_HOST_KEY
+ *  - DFS_JOURNALNODE_HTTP_BIND_HOST_KEY
+ *  - DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY
+ */
+public class TestJournalNodeRespectsBindHostKeys {
+
+  public static final Log LOG = LogFactory.getLog(
+      TestJournalNodeRespectsBindHostKeys.class);
+  private static final String WILDCARD_ADDRESS = "0.0.0.0";
+  private static final String LOCALHOST_SERVER_ADDRESS = "127.0.0.1:0";
+  private static final int NUM_JN = 1;
+
+  private HdfsConfiguration conf;
+  private MiniJournalCluster jCluster;
+  private JournalNode jn;
+
+  @Before
+  public void setUp() {
+    conf = new HdfsConfiguration();
+  }
+
+  @After
+  public void tearDown() throws IOException {
+    if (jCluster != null) {
+      jCluster.shutdown();
+      jCluster = null;
+    }
+  }
+
+  private static String getRpcServerAddress(JournalNode jn) {
+    JournalNodeRpcServer rpcServer = jn.getRpcServer();
+    return rpcServer.getRpcServer().getListenerAddress().getAddress().
+        toString();
+  }
+
+  @Test (timeout=300000)
+  public void testRpcBindHostKey() throws IOException {
+    LOG.info("Testing without " + DFS_JOURNALNODE_RPC_BIND_HOST_KEY);
+
+    // NN should not bind the wildcard address by default.
+    jCluster = new MiniJournalCluster.Builder(conf).format(true)
+        .numJournalNodes(NUM_JN).build();
+    jn = jCluster.getJournalNode(0);
+    String address = getRpcServerAddress(jn);
+    assertThat("Bind address not expected to be wildcard by default.",
+        address, not("/" + WILDCARD_ADDRESS));
+
+    LOG.info("Testing with " + DFS_JOURNALNODE_RPC_BIND_HOST_KEY);
+
+    // Tell NN to bind the wildcard address.
+    conf.set(DFS_JOURNALNODE_RPC_BIND_HOST_KEY, WILDCARD_ADDRESS);
+
+    // Verify that NN binds wildcard address now.
+    jCluster = new MiniJournalCluster.Builder(conf).format(true)
+        .numJournalNodes(NUM_JN).build();
+    jn = jCluster.getJournalNode(0);
+    address = getRpcServerAddress(jn);
+    assertThat("Bind address " + address + " is not wildcard.",
+        address, is("/" + WILDCARD_ADDRESS));
+  }
+
+  @Test(timeout=300000)
+  public void testHttpBindHostKey() throws IOException {
+    LOG.info("Testing without " + DFS_JOURNALNODE_HTTP_BIND_HOST_KEY);
+
+    // NN should not bind the wildcard address by default.
+    conf.set(DFS_JOURNALNODE_HTTP_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
+    jCluster = new MiniJournalCluster.Builder(conf).format(true)
+        .numJournalNodes(NUM_JN).build();
+    jn = jCluster.getJournalNode(0);
+    String address = jn.getHttpAddress().toString();
+    assertFalse("HTTP Bind address not expected to be wildcard by default.",
+        address.startsWith(WILDCARD_ADDRESS));
+
+    LOG.info("Testing with " + DFS_JOURNALNODE_HTTP_BIND_HOST_KEY);
+
+    // Tell NN to bind the wildcard address.
+    conf.set(DFS_JOURNALNODE_HTTP_BIND_HOST_KEY, WILDCARD_ADDRESS);
+
+    // Verify that NN binds wildcard address now.
+    conf.set(DFS_JOURNALNODE_HTTP_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
+    jCluster = new MiniJournalCluster.Builder(conf).format(true)
+        .numJournalNodes(NUM_JN).build();
+    jn = jCluster.getJournalNode(0);
+    address = jn.getHttpAddress().toString();
+    assertTrue("HTTP Bind address " + address + " is not wildcard.",
+        address.startsWith(WILDCARD_ADDRESS));
+  }
+
+  private static final String BASEDIR = System.getProperty("test.build.dir",
+      "target/test-dir") + "/" +
+      TestJournalNodeRespectsBindHostKeys.class.getSimpleName();
+
+  private static void setupSsl() throws Exception {
+    Configuration conf = new Configuration();
+    conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
+    conf.set(DFS_JOURNALNODE_HTTPS_ADDRESS_KEY, "localhost:0");
+    conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
+
+    File base = new File(BASEDIR);
+    FileUtil.fullyDelete(base);
+    assertTrue(base.mkdirs());
+    final String keystoresDir = new File(BASEDIR).getAbsolutePath();
+    final String sslConfDir = KeyStoreTestUtil.getClasspathDir(
+        TestJournalNodeRespectsBindHostKeys.class);
+
+    KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
+  }
+
+  /**
+   * HTTPS test is different since we need to setup SSL configuration.
+   * NN also binds the wildcard address for HTTPS port by default so we must
+   * pick a different host/port combination.
+   * @throws Exception
+   */
+  @Test (timeout=300000)
+  public void testHttpsBindHostKey() throws Exception {
+    LOG.info("Testing behavior without " + DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY);
+
+    setupSsl();
+
+    conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
+
+    // NN should not bind the wildcard address by default.
+    conf.set(DFS_JOURNALNODE_HTTPS_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
+    jCluster = new MiniJournalCluster.Builder(conf).format(true)
+        .numJournalNodes(NUM_JN).build();
+    jn = jCluster.getJournalNode(0);
+    String address = jn.getHttpsAddress().toString();
+    assertFalse("HTTP Bind address not expected to be wildcard by default.",
+        address.startsWith(WILDCARD_ADDRESS));
+
+    LOG.info("Testing behavior with " + DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY);
+
+    // Tell NN to bind the wildcard address.
+    conf.set(DFS_JOURNALNODE_HTTPS_BIND_HOST_KEY, WILDCARD_ADDRESS);
+
+    // Verify that NN binds wildcard address now.
+    conf.set(DFS_JOURNALNODE_HTTPS_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
+    jCluster = new MiniJournalCluster.Builder(conf).format(true)
+        .numJournalNodes(NUM_JN).build();
+    jn = jCluster.getJournalNode(0);
+    address = jn.getHttpsAddress().toString();
+    assertTrue("HTTP Bind address " + address + " is not wildcard.",
+        address.startsWith(WILDCARD_ADDRESS));
+  }
+}

+ 104 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsDatasetImpl.java

@@ -20,17 +20,23 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl;
 import com.google.common.base.Supplier;
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Lists;
 
 
+import java.nio.file.Files;
+import java.nio.file.Paths;
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.client.impl.BlockReaderTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -83,6 +89,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOUR
 import static org.hamcrest.core.Is.is;
 import static org.hamcrest.core.Is.is;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertNull;
@@ -832,8 +840,21 @@ public class TestFsDatasetImpl {
   private ReplicaInfo createNewReplicaObj(ExtendedBlock block, FsDatasetImpl
   private ReplicaInfo createNewReplicaObj(ExtendedBlock block, FsDatasetImpl
       fsDataSetImpl) throws IOException {
       fsDataSetImpl) throws IOException {
     ReplicaInfo replicaInfo = fsDataSetImpl.getReplicaInfo(block);
     ReplicaInfo replicaInfo = fsDataSetImpl.getReplicaInfo(block);
-    FsVolumeSpi destVolume = null;
+    FsVolumeSpi destVolume = getDestinationVolume(block, fsDataSetImpl);
+    return fsDataSetImpl.copyReplicaToVolume(block, replicaInfo,
+        destVolume.obtainReference());
+  }
 
 
+  /**
+   * Finds a new destination volume for block.
+   *
+   * @param block         - Extended Block
+   * @param fsDataSetImpl - FsDatasetImpl reference
+   * @throws IOException
+   */
+  private FsVolumeSpi getDestinationVolume(ExtendedBlock block, FsDatasetImpl
+      fsDataSetImpl) throws IOException {
+    FsVolumeSpi destVolume = null;
     final String srcStorageId = fsDataSetImpl.getVolume(block).getStorageID();
     final String srcStorageId = fsDataSetImpl.getVolume(block).getStorageID();
     try (FsVolumeReferences volumeReferences =
     try (FsVolumeReferences volumeReferences =
         fsDataSetImpl.getFsVolumeReferences()) {
         fsDataSetImpl.getFsVolumeReferences()) {
@@ -844,8 +865,88 @@ public class TestFsDatasetImpl {
         }
         }
       }
       }
     }
     }
-    return fsDataSetImpl.copyReplicaToVolume(block, replicaInfo,
-        destVolume.obtainReference());
+    return destVolume;
+  }
+
+  @Test(timeout = 3000000)
+  public void testBlockReadOpWhileMovingBlock() throws IOException {
+    MiniDFSCluster cluster = null;
+    try {
+
+      // Setup cluster
+      conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
+      cluster = new MiniDFSCluster.Builder(conf)
+          .numDataNodes(1)
+          .storageTypes(new StorageType[]{StorageType.DISK, StorageType.DISK})
+          .storagesPerDatanode(2)
+          .build();
+      FileSystem fs = cluster.getFileSystem();
+      DataNode dataNode = cluster.getDataNodes().get(0);
+
+      // Create test file with ASCII data
+      Path filePath = new Path("/tmp/testData");
+      String blockData = RandomStringUtils.randomAscii(512 * 4);
+      FSDataOutputStream fout = fs.create(filePath);
+      fout.writeBytes(blockData);
+      fout.close();
+      assertEquals(blockData, DFSTestUtil.readFile(fs, filePath));
+
+      ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath);
+      BlockReaderTestUtil util = new BlockReaderTestUtil(cluster, new
+          HdfsConfiguration(conf));
+      LocatedBlock blk = util.getFileBlocks(filePath, 512 * 2).get(0);
+      File[] blkFiles = cluster.getAllBlockFiles(block);
+
+      // Part 1: Read partial data from block
+      LOG.info("Reading partial data for block {} before moving it: ",
+          blk.getBlock().toString());
+      BlockReader blkReader = BlockReaderTestUtil.getBlockReader(
+          (DistributedFileSystem) fs, blk, 0, 512 * 2);
+      byte[] buf = new byte[512 * 2];
+      blkReader.read(buf, 0, 512);
+      assertEquals(blockData.substring(0, 512), new String(buf,
+          StandardCharsets.US_ASCII).substring(0, 512));
+
+      // Part 2: Move block and than read remaining block
+      FsDatasetImpl fsDataSetImpl = (FsDatasetImpl) dataNode.getFSDataset();
+      ReplicaInfo replicaInfo = fsDataSetImpl.getReplicaInfo(block);
+      FsVolumeSpi destVolume = getDestinationVolume(block, fsDataSetImpl);
+      assertNotNull("Destination volume should not be null.", destVolume);
+      fsDataSetImpl.moveBlock(block, replicaInfo, destVolume.obtainReference());
+      // Trigger block report to update block info in NN
+      cluster.triggerBlockReports();
+      blkReader.read(buf, 512, 512);
+      assertEquals(blockData.substring(0, 512 * 2), new String(buf,
+          StandardCharsets.US_ASCII).substring(0, 512 * 2));
+      blkReader = BlockReaderTestUtil.getBlockReader(
+          (DistributedFileSystem) fs,
+          blk, 0, blockData.length());
+      buf = new byte[512 * 4];
+      blkReader.read(buf, 0, 512 * 4);
+      assertEquals(blockData, new String(buf, StandardCharsets.US_ASCII));
+
+      // Part 3: 1. Close the block reader
+      // 2. Assert source block doesn't exist on initial volume
+      // 3. Assert new file location for block is different
+      // 4. Confirm client can read data from new location
+      blkReader.close();
+      ExtendedBlock block2 = DFSTestUtil.getFirstBlock(fs, filePath);
+      File[] blkFiles2 = cluster.getAllBlockFiles(block2);
+      blk = util.getFileBlocks(filePath, 512 * 4).get(0);
+      blkReader = BlockReaderTestUtil.getBlockReader(
+          (DistributedFileSystem) fs,
+          blk, 0, blockData.length());
+      blkReader.read(buf, 0, 512 * 4);
+
+      assertFalse(Files.exists(Paths.get(blkFiles[0].getAbsolutePath())));
+      assertNotEquals(blkFiles[0], blkFiles2[0]);
+      assertEquals(blockData, new String(buf, StandardCharsets.US_ASCII));
+
+    } finally {
+      if (cluster.isClusterUp()) {
+        cluster.shutdown();
+      }
+    }
   }
   }
 
 
 }
 }

+ 79 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java

@@ -26,6 +26,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHEC
 import com.google.common.base.Supplier;
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Lists;
 
 
+import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang.text.StrBuilder;
 import org.apache.commons.lang.text.StrBuilder;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
@@ -56,17 +57,24 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.TestRefreshUserMappings;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ToolRunner;
 import org.junit.After;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
+import org.junit.Assert;
 
 
 import java.io.ByteArrayOutputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.File;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.io.PrintStream;
+import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.HashSet;
@@ -101,6 +109,7 @@ public class TestDFSAdmin {
   private final ByteArrayOutputStream err = new ByteArrayOutputStream();
   private final ByteArrayOutputStream err = new ByteArrayOutputStream();
   private static final PrintStream OLD_OUT = System.out;
   private static final PrintStream OLD_OUT = System.out;
   private static final PrintStream OLD_ERR = System.err;
   private static final PrintStream OLD_ERR = System.err;
+  private String tempResource = null;
 
 
   @Before
   @Before
   public void setUp() throws Exception {
   public void setUp() throws Exception {
@@ -108,7 +117,7 @@ public class TestDFSAdmin {
     conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 3);
     conf.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 3);
     restartCluster();
     restartCluster();
 
 
-    admin = new DFSAdmin();
+    admin = new DFSAdmin(conf);
   }
   }
 
 
   private void redirectStream() {
   private void redirectStream() {
@@ -137,6 +146,11 @@ public class TestDFSAdmin {
     }
     }
 
 
     resetStream();
     resetStream();
+    if (tempResource != null) {
+      File f = new File(tempResource);
+      FileUtils.deleteQuietly(f);
+      tempResource = null;
+    }
   }
   }
 
 
   private void restartCluster() throws IOException {
   private void restartCluster() throws IOException {
@@ -923,4 +937,68 @@ public class TestDFSAdmin {
       cluster.shutdown();
       cluster.shutdown();
     }
     }
   }
   }
+
+  @Test
+  public void testRefreshProxyUser() throws Exception {
+    Path dirPath = new Path("/testdir1");
+    Path subDirPath = new Path("/testdir1/subdir1");
+    UserGroupInformation loginUserUgi =  UserGroupInformation.getLoginUser();
+    String proxyUser = "fakeuser";
+    String realUser = loginUserUgi.getShortUserName();
+
+    UserGroupInformation proxyUgi =
+        UserGroupInformation.createProxyUserForTesting(proxyUser,
+            loginUserUgi, loginUserUgi.getGroupNames());
+
+    // create a directory as login user and re-assign it to proxy user
+    loginUserUgi.doAs(new PrivilegedExceptionAction<Integer>() {
+      @Override
+      public Integer run() throws Exception {
+        cluster.getFileSystem().mkdirs(dirPath);
+        cluster.getFileSystem().setOwner(dirPath, proxyUser,
+            proxyUgi.getPrimaryGroupName());
+        return 0;
+      }
+    });
+
+    // try creating subdirectory inside the directory as proxy user,
+    // This should fail because of the current user hasn't still been proxied
+    try {
+      proxyUgi.doAs(new PrivilegedExceptionAction<Integer>() {
+        @Override public Integer run() throws Exception {
+          cluster.getFileSystem().mkdirs(subDirPath);
+          return 0;
+        }
+      });
+    } catch (RemoteException re) {
+      Assert.assertTrue(re.unwrapRemoteException()
+          instanceof AccessControlException);
+      Assert.assertTrue(re.unwrapRemoteException().getMessage()
+          .equals("User: " + realUser +
+              " is not allowed to impersonate " + proxyUser));
+    }
+
+    // refresh will look at configuration on the server side
+    // add additional resource with the new value
+    // so the server side will pick it up
+    String userKeyGroups = DefaultImpersonationProvider.getTestProvider().
+        getProxySuperuserGroupConfKey(realUser);
+    String userKeyHosts = DefaultImpersonationProvider.getTestProvider().
+        getProxySuperuserIpConfKey(realUser);
+    String rsrc = "testGroupMappingRefresh_rsrc.xml";
+    tempResource = TestRefreshUserMappings.addNewConfigResource(rsrc,
+        userKeyGroups, "*", userKeyHosts, "*");
+
+    String[] args = new String[]{"-refreshSuperUserGroupsConfiguration"};
+    admin.run(args);
+
+    // After proxying the fakeuser, the mkdir should work
+    proxyUgi.doAs(new PrivilegedExceptionAction<Integer>() {
+      @Override
+      public Integer run() throws Exception {
+        cluster.getFileSystem().mkdirs(dirPath);
+        return 0;
+      }
+    });
+  }
 }
 }

+ 6 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java

@@ -194,10 +194,15 @@ public class TestOfflineImageViewer {
       dirCount++;
       dirCount++;
       writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir));
       writtenFiles.put(emptydir.toString(), hdfs.getFileStatus(emptydir));
 
 
-      //Create a directory whose name should be escaped in XML
+      //Create directories whose name should be escaped in XML
       Path invalidXMLDir = new Path("/dirContainingInvalidXMLChar\u0000here");
       Path invalidXMLDir = new Path("/dirContainingInvalidXMLChar\u0000here");
       hdfs.mkdirs(invalidXMLDir);
       hdfs.mkdirs(invalidXMLDir);
       dirCount++;
       dirCount++;
+      Path entityRefXMLDir = new Path("/dirContainingEntityRef&here");
+      hdfs.mkdirs(entityRefXMLDir);
+      dirCount++;
+      writtenFiles.put(entityRefXMLDir.toString(),
+          hdfs.getFileStatus(entityRefXMLDir));
 
 
       //Create a directory with sticky bits
       //Create a directory with sticky bits
       Path stickyBitDir = new Path("/stickyBit");
       Path stickyBitDir = new Path("/stickyBit");

+ 6 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java

@@ -208,7 +208,8 @@ public class TestRefreshUserMappings {
     // add additional resource with the new value
     // add additional resource with the new value
     // so the server side will pick it up
     // so the server side will pick it up
     String rsrc = "testGroupMappingRefresh_rsrc.xml";
     String rsrc = "testGroupMappingRefresh_rsrc.xml";
-    addNewConfigResource(rsrc, userKeyGroups, "gr2", userKeyHosts, "127.0.0.1");  
+    tempResource = addNewConfigResource(rsrc, userKeyGroups, "gr2",
+        userKeyHosts, "127.0.0.1");
     
     
     DFSAdmin admin = new DFSAdmin(config);
     DFSAdmin admin = new DFSAdmin(config);
     String [] args = new String[]{"-refreshSuperUserGroupsConfiguration"};
     String [] args = new String[]{"-refreshSuperUserGroupsConfiguration"};
@@ -232,7 +233,7 @@ public class TestRefreshUserMappings {
     
     
   }
   }
 
 
-  private void addNewConfigResource(String rsrcName, String keyGroup,
+  public static String addNewConfigResource(String rsrcName, String keyGroup,
       String groups, String keyHosts, String hosts)
       String groups, String keyHosts, String hosts)
           throws FileNotFoundException, UnsupportedEncodingException {
           throws FileNotFoundException, UnsupportedEncodingException {
     // location for temp resource should be in CLASSPATH
     // location for temp resource should be in CLASSPATH
@@ -242,17 +243,18 @@ public class TestRefreshUserMappings {
     String urlPath = URLDecoder.decode(url.getPath().toString(), "UTF-8");
     String urlPath = URLDecoder.decode(url.getPath().toString(), "UTF-8");
     Path p = new Path(urlPath);
     Path p = new Path(urlPath);
     Path dir = p.getParent();
     Path dir = p.getParent();
-    tempResource = dir.toString() + "/" + rsrcName;
+    String tmp = dir.toString() + "/" + rsrcName;
 
 
     String newResource =
     String newResource =
     "<configuration>"+
     "<configuration>"+
     "<property><name>" + keyGroup + "</name><value>"+groups+"</value></property>" +
     "<property><name>" + keyGroup + "</name><value>"+groups+"</value></property>" +
     "<property><name>" + keyHosts + "</name><value>"+hosts+"</value></property>" +
     "<property><name>" + keyHosts + "</name><value>"+hosts+"</value></property>" +
     "</configuration>";
     "</configuration>";
-    PrintWriter writer = new PrintWriter(new FileOutputStream(tempResource));
+    PrintWriter writer = new PrintWriter(new FileOutputStream(tmp));
     writer.println(newResource);
     writer.println(newResource);
     writer.close();
     writer.close();
 
 
     Configuration.addDefaultResource(rsrcName);
     Configuration.addDefaultResource(rsrcName);
+    return tmp;
   }
   }
 }
 }

+ 0 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java


+ 0 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java


+ 0 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java


+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java

@@ -89,7 +89,7 @@ public class NotRunningJob implements MRClientProtocol {
     // used for a non running job
     // used for a non running job
     return ApplicationReport.newInstance(unknownAppId, unknownAttemptId,
     return ApplicationReport.newInstance(unknownAppId, unknownAttemptId,
       "N/A", "N/A", "N/A", "N/A", 0, null, YarnApplicationState.NEW, "N/A",
       "N/A", "N/A", "N/A", "N/A", 0, null, YarnApplicationState.NEW, "N/A",
-      "N/A", 0, 0, FinalApplicationStatus.UNDEFINED, null, "N/A", 0.0f,
+      "N/A", 0, 0, 0, FinalApplicationStatus.UNDEFINED, null, "N/A", 0.0f,
       YarnConfiguration.DEFAULT_APPLICATION_TYPE, null);
       YarnConfiguration.DEFAULT_APPLICATION_TYPE, null);
   }
   }
 
 

+ 2 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java

@@ -515,7 +515,7 @@ public class TestClientServiceDelegate {
         appId, 0);
         appId, 0);
     return ApplicationReport.newInstance(appId, attemptId, "user", "queue",
     return ApplicationReport.newInstance(appId, attemptId, "user", "queue",
       "appname", "host", 124, null, YarnApplicationState.FINISHED,
       "appname", "host", 124, null, YarnApplicationState.FINISHED,
-      "diagnostics", "url", 0, 0, FinalApplicationStatus.SUCCEEDED, null,
+      "diagnostics", "url", 0, 0, 0, FinalApplicationStatus.SUCCEEDED, null,
       "N/A", 0.0f, YarnConfiguration.DEFAULT_APPLICATION_TYPE, null);
       "N/A", 0.0f, YarnConfiguration.DEFAULT_APPLICATION_TYPE, null);
   }
   }
 
 
@@ -525,7 +525,7 @@ public class TestClientServiceDelegate {
         appId, 0);
         appId, 0);
     return ApplicationReport.newInstance(appId, attemptId, "user", "queue",
     return ApplicationReport.newInstance(appId, attemptId, "user", "queue",
       "appname", host, port, null, YarnApplicationState.RUNNING, "diagnostics",
       "appname", host, port, null, YarnApplicationState.RUNNING, "diagnostics",
-      "url", 0, 0, FinalApplicationStatus.UNDEFINED, null, "N/A", 0.0f,
+      "url", 0, 0, 0, FinalApplicationStatus.UNDEFINED, null, "N/A", 0.0f,
       YarnConfiguration.DEFAULT_APPLICATION_TYPE, null);
       YarnConfiguration.DEFAULT_APPLICATION_TYPE, null);
   }
   }
 
 

+ 2 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java

@@ -272,7 +272,8 @@ public class TestYARNRunner {
         .thenReturn(
         .thenReturn(
             ApplicationReport.newInstance(appId, null, "tmp", "tmp", "tmp",
             ApplicationReport.newInstance(appId, null, "tmp", "tmp", "tmp",
                 "tmp", 0, null, YarnApplicationState.FINISHED, "tmp", "tmp",
                 "tmp", 0, null, YarnApplicationState.FINISHED, "tmp", "tmp",
-                0l, 0l, FinalApplicationStatus.SUCCEEDED, null, null, 0f,
+                0L, 0L, 0L,
+                 FinalApplicationStatus.SUCCEEDED, null, null, 0f,
                 "tmp", null));
                 "tmp", null));
     yarnRunner.killJob(jobId);
     yarnRunner.killJob(jobId);
     verify(clientDelegate).killJob(jobId);
     verify(clientDelegate).killJob(jobId);

+ 1 - 1
hadoop-project/pom.xml

@@ -1056,7 +1056,7 @@
       <dependency>
       <dependency>
         <groupId>org.apache.commons</groupId>
         <groupId>org.apache.commons</groupId>
         <artifactId>commons-lang3</artifactId>
         <artifactId>commons-lang3</artifactId>
-        <version>3.4</version>
+        <version>3.7</version>
       </dependency>
       </dependency>
       <dependency>
       <dependency>
         <groupId>org.slf4j</groupId>
         <groupId>org.slf4j</groupId>

+ 1 - 0
hadoop-project/src/site/site.xml

@@ -176,6 +176,7 @@
       <item name="Concepts" href="hadoop-yarn/hadoop-yarn-site/yarn-service/Concepts.html"/>
       <item name="Concepts" href="hadoop-yarn/hadoop-yarn-site/yarn-service/Concepts.html"/>
       <item name="Yarn Service API" href="hadoop-yarn/hadoop-yarn-site/yarn-service/YarnServiceAPI.html"/>
       <item name="Yarn Service API" href="hadoop-yarn/hadoop-yarn-site/yarn-service/YarnServiceAPI.html"/>
       <item name="Service Discovery" href="hadoop-yarn/hadoop-yarn-site/yarn-service/ServiceDiscovery.html"/>
       <item name="Service Discovery" href="hadoop-yarn/hadoop-yarn-site/yarn-service/ServiceDiscovery.html"/>
+      <item name="System Services" href="hadoop-yarn/hadoop-yarn-site/yarn-service/SystemServices.html"/>
     </menu>
     </menu>
 
 
     <menu name="Hadoop Compatible File Systems" inherit="top">
     <menu name="Hadoop Compatible File Systems" inherit="top">

+ 30 - 0
hadoop-tools/hadoop-aliyun/src/site/resources/css/site.css

@@ -0,0 +1,30 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+#banner {
+  height: 93px;
+  background: none;
+}
+
+#bannerLeft img {
+  margin-left: 30px;
+  margin-top: 10px;
+}
+
+#bannerRight img {
+  margin: 17px;
+}
+

+ 30 - 0
hadoop-tools/hadoop-aws/src/site/resources/css/site.css

@@ -0,0 +1,30 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+#banner {
+  height: 93px;
+  background: none;
+}
+
+#bannerLeft img {
+  margin-left: 30px;
+  margin-top: 10px;
+}
+
+#bannerRight img {
+  margin: 17px;
+}
+

+ 30 - 0
hadoop-tools/hadoop-azure-datalake/src/site/resources/css/site.css

@@ -0,0 +1,30 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+#banner {
+  height: 93px;
+  background: none;
+}
+
+#bannerLeft img {
+  margin-left: 30px;
+  margin-top: 10px;
+}
+
+#bannerRight img {
+  margin: 17px;
+}
+

+ 30 - 0
hadoop-tools/hadoop-azure/src/site/resources/css/site.css

@@ -0,0 +1,30 @@
+/*
+* Licensed to the Apache Software Foundation (ASF) under one or more
+* contributor license agreements.  See the NOTICE file distributed with
+* this work for additional information regarding copyright ownership.
+* The ASF licenses this file to You under the Apache License, Version 2.0
+* (the "License"); you may not use this file except in compliance with
+* the License.  You may obtain a copy of the License at
+*
+*     http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+#banner {
+  height: 93px;
+  background: none;
+}
+
+#bannerLeft img {
+  margin-left: 30px;
+  margin-top: 10px;
+}
+
+#bannerRight img {
+  margin: 17px;
+}
+

+ 1 - 0
hadoop-tools/hadoop-sls/pom.xml

@@ -137,6 +137,7 @@
             <exclude>src/test/resources/syn_stream.json</exclude>
             <exclude>src/test/resources/syn_stream.json</exclude>
             <exclude>src/test/resources/inputsls.json</exclude>
             <exclude>src/test/resources/inputsls.json</exclude>
             <exclude>src/test/resources/nodes.json</exclude>
             <exclude>src/test/resources/nodes.json</exclude>
+            <exclude>src/test/resources/nodes-with-resources.json</exclude>
             <exclude>src/test/resources/exit-invariants.txt</exclude>
             <exclude>src/test/resources/exit-invariants.txt</exclude>
             <exclude>src/test/resources/ongoing-invariants.txt</exclude>
             <exclude>src/test/resources/ongoing-invariants.txt</exclude>
           </excludes>
           </excludes>

+ 25 - 10
hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java

@@ -292,21 +292,30 @@ public class SLSRunner extends Configured implements Tool {
         SLSConfiguration.NM_RESOURCE_UTILIZATION_RATIO,
         SLSConfiguration.NM_RESOURCE_UTILIZATION_RATIO,
         SLSConfiguration.NM_RESOURCE_UTILIZATION_RATIO_DEFAULT);
         SLSConfiguration.NM_RESOURCE_UTILIZATION_RATIO_DEFAULT);
     // nm information (fetch from topology file, or from sls/rumen json file)
     // nm information (fetch from topology file, or from sls/rumen json file)
-    Set<String> nodeSet = new HashSet<String>();
+    Map<String, Resource> nodeResourceMap = new HashMap<>();
+    Set<? extends  String> nodeSet;
     if (nodeFile.isEmpty()) {
     if (nodeFile.isEmpty()) {
       for (String inputTrace : inputTraces) {
       for (String inputTrace : inputTraces) {
-
         switch (inputType) {
         switch (inputType) {
         case SLS:
         case SLS:
-          nodeSet.addAll(SLSUtils.parseNodesFromSLSTrace(inputTrace));
+          nodeSet = SLSUtils.parseNodesFromSLSTrace(inputTrace);
+          for (String node : nodeSet) {
+            nodeResourceMap.put(node, null);
+          }
           break;
           break;
         case RUMEN:
         case RUMEN:
-          nodeSet.addAll(SLSUtils.parseNodesFromRumenTrace(inputTrace));
+          nodeSet = SLSUtils.parseNodesFromRumenTrace(inputTrace);
+          for (String node : nodeSet) {
+            nodeResourceMap.put(node, null);
+          }
           break;
           break;
         case SYNTH:
         case SYNTH:
           stjp = new SynthTraceJobProducer(getConf(), new Path(inputTraces[0]));
           stjp = new SynthTraceJobProducer(getConf(), new Path(inputTraces[0]));
-          nodeSet.addAll(SLSUtils.generateNodes(stjp.getNumNodes(),
-              stjp.getNumNodes()/stjp.getNodesPerRack()));
+          nodeSet = SLSUtils.generateNodes(stjp.getNumNodes(),
+              stjp.getNumNodes()/stjp.getNodesPerRack());
+          for (String node : nodeSet) {
+            nodeResourceMap.put(node, null);
+          }
           break;
           break;
         default:
         default:
           throw new YarnException("Input configuration not recognized, "
           throw new YarnException("Input configuration not recognized, "
@@ -314,20 +323,26 @@ public class SLSRunner extends Configured implements Tool {
         }
         }
       }
       }
     } else {
     } else {
-      nodeSet.addAll(SLSUtils.parseNodesFromNodeFile(nodeFile));
+      nodeResourceMap = SLSUtils.parseNodesFromNodeFile(nodeFile,
+          nodeManagerResource);
     }
     }
 
 
-    if (nodeSet.size() == 0) {
+    if (nodeResourceMap.size() == 0) {
       throw new YarnException("No node! Please configure nodes.");
       throw new YarnException("No node! Please configure nodes.");
     }
     }
 
 
     // create NM simulators
     // create NM simulators
     Random random = new Random();
     Random random = new Random();
     Set<String> rackSet = new HashSet<String>();
     Set<String> rackSet = new HashSet<String>();
-    for (String hostName : nodeSet) {
+    for (Map.Entry<String, Resource> entry : nodeResourceMap.entrySet()) {
       // we randomize the heartbeat start time from zero to 1 interval
       // we randomize the heartbeat start time from zero to 1 interval
       NMSimulator nm = new NMSimulator();
       NMSimulator nm = new NMSimulator();
-      nm.init(hostName, nodeManagerResource, random.nextInt(heartbeatInterval),
+      Resource nmResource = nodeManagerResource;
+      String hostName = entry.getKey();
+      if (entry.getValue() != null) {
+        nmResource = entry.getValue();
+      }
+      nm.init(hostName, nmResource, random.nextInt(heartbeatInterval),
           heartbeatInterval, rm, resourceUtilizationRatio);
           heartbeatInterval, rm, resourceUtilizationRatio);
       nmMap.put(nm.getNode().getNodeID(), nm);
       nmMap.put(nm.getNode().getNodeID(), nm);
       runner.schedule(nm);
       runner.schedule(nm);

+ 19 - 5
hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/utils/SLSUtils.java

@@ -22,6 +22,8 @@ import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.InputStreamReader;
 import java.io.Reader;
 import java.io.Reader;
+
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Iterator;
 import java.util.List;
 import java.util.List;
@@ -39,7 +41,11 @@ import org.apache.hadoop.tools.rumen.JobTraceReader;
 import org.apache.hadoop.tools.rumen.LoggedJob;
 import org.apache.hadoop.tools.rumen.LoggedJob;
 import org.apache.hadoop.tools.rumen.LoggedTask;
 import org.apache.hadoop.tools.rumen.LoggedTask;
 import org.apache.hadoop.tools.rumen.LoggedTaskAttempt;
 import org.apache.hadoop.tools.rumen.LoggedTaskAttempt;
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.sls.conf.SLSConfiguration;
 import org.apache.hadoop.yarn.sls.conf.SLSConfiguration;
+import org.apache.hadoop.yarn.util.resource.ResourceUtils;
+import org.apache.hadoop.yarn.util.resource.Resources;
 
 
 @Private
 @Private
 @Unstable
 @Unstable
@@ -145,9 +151,9 @@ public class SLSUtils {
   /**
   /**
    * parse the input node file, return each host name
    * parse the input node file, return each host name
    */
    */
-  public static Set<String> parseNodesFromNodeFile(String nodeFile)
-          throws IOException {
-    Set<String> nodeSet = new HashSet<String>();
+  public static Map<String, Resource> parseNodesFromNodeFile(String nodeFile,
+      Resource nmDefaultResource) throws IOException {
+    Map<String, Resource> nodeResourceMap = new HashMap<>();
     JsonFactory jsonF = new JsonFactory();
     JsonFactory jsonF = new JsonFactory();
     ObjectMapper mapper = new ObjectMapper();
     ObjectMapper mapper = new ObjectMapper();
     Reader input =
     Reader input =
@@ -160,13 +166,21 @@ public class SLSUtils {
         List tasks = (List) jsonE.get("nodes");
         List tasks = (List) jsonE.get("nodes");
         for (Object o : tasks) {
         for (Object o : tasks) {
           Map jsonNode = (Map) o;
           Map jsonNode = (Map) o;
-          nodeSet.add(rack + "/" + jsonNode.get("node"));
+          Resource nodeResource = Resources.clone(nmDefaultResource);
+          ResourceInformation[] infors = ResourceUtils.getResourceTypesArray();
+          for (ResourceInformation info : infors) {
+            if (jsonNode.get(info.getName()) != null) {
+              nodeResource.setResourceValue(info.getName(),
+                  Integer.parseInt(jsonNode.get(info.getName()).toString()));
+            }
+          }
+          nodeResourceMap.put(rack + "/" + jsonNode.get("node"), nodeResource);
         }
         }
       }
       }
     } finally {
     } finally {
       input.close();
       input.close();
     }
     }
-    return nodeSet;
+    return nodeResourceMap;
   }
   }
 
 
   public static Set<? extends String> generateNodes(int numNodes,
   public static Set<? extends String> generateNodes(int numNodes,

+ 25 - 0
hadoop-tools/hadoop-sls/src/test/java/org/apache/hadoop/yarn/sls/utils/TestSLSUtils.java

@@ -18,10 +18,13 @@
 
 
 package org.apache.hadoop.yarn.sls.utils;
 package org.apache.hadoop.yarn.sls.utils;
 
 
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.util.resource.Resources;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
 
 
 import java.util.HashSet;
 import java.util.HashSet;
+import java.util.Map;
 import java.util.Set;
 import java.util.Set;
 
 
 public class TestSLSUtils {
 public class TestSLSUtils {
@@ -39,6 +42,28 @@ public class TestSLSUtils {
     Assert.assertEquals(rackHostname[1], "node1");
     Assert.assertEquals(rackHostname[1], "node1");
   }
   }
 
 
+  @Test
+  public void testParseNodesFromNodeFile() throws Exception {
+    String nodeFile = "src/test/resources/nodes.json";
+    Map<String, Resource> nodeResourceMap = SLSUtils.parseNodesFromNodeFile(
+        nodeFile, Resources.createResource(1024, 2));
+    Assert.assertEquals(20, nodeResourceMap.size());
+
+    nodeFile = "src/test/resources/nodes-with-resources.json";
+    nodeResourceMap = SLSUtils.parseNodesFromNodeFile(
+        nodeFile, Resources.createResource(1024, 2));
+    Assert.assertEquals(4,
+        nodeResourceMap.size());
+    Assert.assertEquals(2048,
+        nodeResourceMap.get("/rack1/node1").getMemorySize());
+    Assert.assertEquals(6,
+        nodeResourceMap.get("/rack1/node1").getVirtualCores());
+    Assert.assertEquals(1024,
+        nodeResourceMap.get("/rack1/node2").getMemorySize());
+    Assert.assertEquals(2,
+        nodeResourceMap.get("/rack1/node2").getVirtualCores());
+  }
+
   @Test
   @Test
   public void testGenerateNodes() {
   public void testGenerateNodes() {
     Set<? extends String> nodes = SLSUtils.generateNodes(3, 3);
     Set<? extends String> nodes = SLSUtils.generateNodes(3, 3);

+ 19 - 0
hadoop-tools/hadoop-sls/src/test/resources/nodes-with-resources.json

@@ -0,0 +1,19 @@
+{
+  "rack": "rack1",
+  "nodes": [
+    {
+      "node": "node1",
+      "memory-mb" : 2048,
+      "vcores" : 6
+    },
+    {
+      "node": "node2"
+    },
+    {
+      "node": "node3"
+    },
+    {
+      "node": "node4"
+    }
+  ]
+}

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/bin/FederationStateStore/MySQL/FederationStateStoreTables.sql

@@ -22,7 +22,7 @@ USE FederationStateStore
 
 
 CREATE TABLE applicationsHomeSubCluster(
 CREATE TABLE applicationsHomeSubCluster(
    applicationId varchar(64) NOT NULL,
    applicationId varchar(64) NOT NULL,
-   subClusterId varchar(256) NULL,
+   homeSubCluster varchar(256) NULL,
    CONSTRAINT pk_applicationId PRIMARY KEY (applicationId)
    CONSTRAINT pk_applicationId PRIMARY KEY (applicationId)
 );
 );
 
 

+ 41 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java

@@ -58,7 +58,8 @@ public abstract class ApplicationReport {
       ApplicationAttemptId applicationAttemptId, String user, String queue,
       ApplicationAttemptId applicationAttemptId, String user, String queue,
       String name, String host, int rpcPort, Token clientToAMToken,
       String name, String host, int rpcPort, Token clientToAMToken,
       YarnApplicationState state, String diagnostics, String url,
       YarnApplicationState state, String diagnostics, String url,
-      long startTime, long finishTime, FinalApplicationStatus finalStatus,
+      long startTime, long launchTime, long finishTime,
+      FinalApplicationStatus finalStatus,
       ApplicationResourceUsageReport appResources, String origTrackingUrl,
       ApplicationResourceUsageReport appResources, String origTrackingUrl,
       float progress, String applicationType, Token amRmToken) {
       float progress, String applicationType, Token amRmToken) {
     ApplicationReport report = Records.newRecord(ApplicationReport.class);
     ApplicationReport report = Records.newRecord(ApplicationReport.class);
@@ -74,6 +75,7 @@ public abstract class ApplicationReport {
     report.setDiagnostics(diagnostics);
     report.setDiagnostics(diagnostics);
     report.setTrackingUrl(url);
     report.setTrackingUrl(url);
     report.setStartTime(startTime);
     report.setStartTime(startTime);
+    report.setLaunchTime(launchTime);
     report.setFinishTime(finishTime);
     report.setFinishTime(finishTime);
     report.setFinalApplicationStatus(finalStatus);
     report.setFinalApplicationStatus(finalStatus);
     report.setApplicationResourceUsageReport(appResources);
     report.setApplicationResourceUsageReport(appResources);
@@ -84,13 +86,40 @@ public abstract class ApplicationReport {
     return report;
     return report;
   }
   }
 
 
+
+  @Private
+  @Unstable
+  public static ApplicationReport newInstance(ApplicationId applicationId,
+        ApplicationAttemptId applicationAttemptId, String user, String queue,
+        String name, String host, int rpcPort, Token clientToAMToken,
+        YarnApplicationState state, String diagnostics, String url,
+        long startTime, long finishTime,
+        FinalApplicationStatus finalStatus,
+        ApplicationResourceUsageReport appResources, String origTrackingUrl,
+        float progress, String applicationType, Token amRmToken,
+        Set<String> tags, boolean unmanagedApplication, Priority priority,
+        String appNodeLabelExpression, String amNodeLabelExpression) {
+    ApplicationReport report =
+            newInstance(applicationId, applicationAttemptId, user, queue, name,
+                    host, rpcPort, clientToAMToken, state, diagnostics, url,
+                    startTime, 0, finishTime, finalStatus, appResources,
+                    origTrackingUrl, progress, applicationType, amRmToken);
+    report.setApplicationTags(tags);
+    report.setUnmanagedApp(unmanagedApplication);
+    report.setPriority(priority);
+    report.setAppNodeLabelExpression(appNodeLabelExpression);
+    report.setAmNodeLabelExpression(amNodeLabelExpression);
+    return report;
+  }
+
   @Private
   @Private
   @Unstable
   @Unstable
   public static ApplicationReport newInstance(ApplicationId applicationId,
   public static ApplicationReport newInstance(ApplicationId applicationId,
       ApplicationAttemptId applicationAttemptId, String user, String queue,
       ApplicationAttemptId applicationAttemptId, String user, String queue,
       String name, String host, int rpcPort, Token clientToAMToken,
       String name, String host, int rpcPort, Token clientToAMToken,
       YarnApplicationState state, String diagnostics, String url,
       YarnApplicationState state, String diagnostics, String url,
-      long startTime, long finishTime, FinalApplicationStatus finalStatus,
+      long startTime, long launchTime, long finishTime,
+      FinalApplicationStatus finalStatus,
       ApplicationResourceUsageReport appResources, String origTrackingUrl,
       ApplicationResourceUsageReport appResources, String origTrackingUrl,
       float progress, String applicationType, Token amRmToken, Set<String> tags,
       float progress, String applicationType, Token amRmToken, Set<String> tags,
       boolean unmanagedApplication, Priority priority,
       boolean unmanagedApplication, Priority priority,
@@ -98,8 +127,8 @@ public abstract class ApplicationReport {
     ApplicationReport report =
     ApplicationReport report =
         newInstance(applicationId, applicationAttemptId, user, queue, name,
         newInstance(applicationId, applicationAttemptId, user, queue, name,
           host, rpcPort, clientToAMToken, state, diagnostics, url, startTime,
           host, rpcPort, clientToAMToken, state, diagnostics, url, startTime,
-          finishTime, finalStatus, appResources, origTrackingUrl, progress,
-          applicationType, amRmToken);
+          launchTime, finishTime, finalStatus, appResources,
+          origTrackingUrl, progress, applicationType, amRmToken);
     report.setApplicationTags(tags);
     report.setApplicationTags(tags);
     report.setUnmanagedApp(unmanagedApplication);
     report.setUnmanagedApp(unmanagedApplication);
     report.setPriority(priority);
     report.setPriority(priority);
@@ -282,6 +311,14 @@ public abstract class ApplicationReport {
   @Unstable
   @Unstable
   public abstract void setStartTime(long startTime);
   public abstract void setStartTime(long startTime);
 
 
+  @Private
+  @Unstable
+  public abstract void setLaunchTime(long setLaunchTime);
+
+  @Public
+  @Unstable
+  public abstract long getLaunchTime();
+
   /**
   /**
    * Get the <em>finish time</em> of the application.
    * Get the <em>finish time</em> of the application.
    * @return <em>finish time</em> of the application
    * @return <em>finish time</em> of the application

+ 1 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto

@@ -281,6 +281,7 @@ message ApplicationReportProto {
   optional string appNodeLabelExpression = 24;
   optional string appNodeLabelExpression = 24;
   optional string amNodeLabelExpression = 25;
   optional string amNodeLabelExpression = 25;
   repeated AppTimeoutsMapProto appTimeouts = 26;
   repeated AppTimeoutsMapProto appTimeouts = 26;
+  optional int64 launchTime = 27;
 }
 }
 
 
 message AppTimeoutsMapProto {
 message AppTimeoutsMapProto {

+ 16 - 6
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/SystemServiceManagerImpl.java

@@ -92,10 +92,12 @@ public class SystemServiceManagerImpl extends AbstractService
   private Thread serviceLaucher;
   private Thread serviceLaucher;
 
 
   @VisibleForTesting
   @VisibleForTesting
-  private int skipCounter;
+  private int badFileNameExtensionSkipCounter;
   @VisibleForTesting
   @VisibleForTesting
   private Map<String, Integer> ignoredUserServices =
   private Map<String, Integer> ignoredUserServices =
       new HashMap<>();
       new HashMap<>();
+  @VisibleForTesting
+  private int badDirSkipCounter;
 
 
   public SystemServiceManagerImpl() {
   public SystemServiceManagerImpl() {
     super(SystemServiceManagerImpl.class.getName());
     super(SystemServiceManagerImpl.class.getName());
@@ -268,6 +270,7 @@ public class SystemServiceManagerImpl extends AbstractService
         } else if (launchType.getPath().getName().equals(ASYNC)) {
         } else if (launchType.getPath().getName().equals(ASYNC)) {
           scanForUserServiceDefinition(launchType.getPath(), asyncUserServices);
           scanForUserServiceDefinition(launchType.getPath(), asyncUserServices);
         } else {
         } else {
+          badDirSkipCounter++;
           LOG.debug("Scanner skips for unknown dir {}.", launchType.getPath());
           LOG.debug("Scanner skips for unknown dir {}.", launchType.getPath());
         }
         }
       }
       }
@@ -308,7 +311,7 @@ public class SystemServiceManagerImpl extends AbstractService
         if (!filename.endsWith(YARN_FILE_SUFFIX)) {
         if (!filename.endsWith(YARN_FILE_SUFFIX)) {
           LOG.info("Scanner skips for unknown file extension, filename = {}",
           LOG.info("Scanner skips for unknown file extension, filename = {}",
               filename);
               filename);
-          skipCounter++;
+          badFileNameExtensionSkipCounter++;
           continue;
           continue;
         }
         }
         Service service = getServiceDefinition(serviceCache.getPath());
         Service service = getServiceDefinition(serviceCache.getPath());
@@ -325,9 +328,10 @@ public class SystemServiceManagerImpl extends AbstractService
             LOG.warn(
             LOG.warn(
                 "Ignoring service {} for the user {} as it is already present,"
                 "Ignoring service {} for the user {} as it is already present,"
                     + " filename = {}", service.getName(), userName, filename);
                     + " filename = {}", service.getName(), userName, filename);
+          } else {
+            LOG.info("Added service {} for the user {}, filename = {}",
+                service.getName(), userName, filename);
           }
           }
-          LOG.info("Added service {} for the user {}, filename = {}",
-              service.getName(), userName, filename);
         }
         }
       }
       }
     }
     }
@@ -375,7 +379,13 @@ public class SystemServiceManagerImpl extends AbstractService
     return syncUserServices;
     return syncUserServices;
   }
   }
 
 
-  @VisibleForTesting int getSkipCounter() {
-    return skipCounter;
+  @VisibleForTesting
+  int getBadFileNameExtensionSkipCounter() {
+    return badFileNameExtensionSkipCounter;
+  }
+
+  @VisibleForTesting
+  int getBadDirSkipCounter() {
+    return badDirSkipCounter;
   }
   }
 }
 }

+ 6 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/main/resources/definition/YARN-Simplified-V1-API-Layer-For-Services.yaml

@@ -251,6 +251,9 @@ definitions:
       kerberos_principal:
       kerberos_principal:
         description: The Kerberos Principal of the service
         description: The Kerberos Principal of the service
         $ref: '#/definitions/KerberosPrincipal'
         $ref: '#/definitions/KerberosPrincipal'
+      docker_client_config:
+        type: string
+        description: URI of the file containing the docker client configuration (e.g. hdfs:///tmp/config.json).
   ResourceInformation:
   ResourceInformation:
     description:
     description:
       ResourceInformation determines unit/value of resource types in addition to memory and vcores. It will be part of Resource object.
       ResourceInformation determines unit/value of resource types in addition to memory and vcores. It will be part of Resource object.
@@ -422,14 +425,15 @@ definitions:
           type: string
           type: string
         description: A list of quicklink keys defined at the service level, and to be resolved by this component.
         description: A list of quicklink keys defined at the service level, and to be resolved by this component.
   ReadinessCheck:
   ReadinessCheck:
-    description: A custom command or a pluggable helper container to determine the readiness of a container of a component. Readiness for every service is different. Hence the need for a simple interface, with scope to support advanced usecases.
+    description: A check to be performed to determine the readiness of a component instance (a container). If no readiness check is specified, the default readiness check will be used unless the yarn.service.default-readiness-check.enabled configuration property is set to false at the component, service, or system level. The artifact field is currently unsupported but may be implemented in the future, enabling a pluggable helper container to support advanced use cases.
     required:
     required:
     - type
     - type
     properties:
     properties:
       type:
       type:
         type: string
         type: string
-        description: E.g. HTTP (YARN will perform a simple REST call at a regular interval and expect a 204 No content).
+        description: DEFAULT (AM checks whether the container has an IP and optionally performs a DNS lookup for the container hostname), HTTP (AM performs default checks, plus sends a REST call to the container and expects a response code between 200 and 299), or PORT (AM performs default checks, plus attempts to open a socket connection to the container on a specified port).
         enum:
         enum:
+          - DEFAULT
           - HTTP
           - HTTP
           - PORT
           - PORT
       properties:
       properties:

+ 44 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java

@@ -19,6 +19,9 @@ package org.apache.hadoop.yarn.service;
 
 
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;
 
 
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileWriter;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.List;
 
 
@@ -27,6 +30,7 @@ import javax.ws.rs.Path;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.Response;
 import javax.ws.rs.core.Response.Status;
 import javax.ws.rs.core.Response.Status;
 
 
+import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.service.api.records.Artifact;
 import org.apache.hadoop.yarn.service.api.records.Artifact;
 import org.apache.hadoop.yarn.service.api.records.Artifact.TypeEnum;
 import org.apache.hadoop.yarn.service.api.records.Artifact.TypeEnum;
@@ -90,7 +94,19 @@ public class TestApiServer {
   }
   }
 
 
   @Test
   @Test
-  public void testGoodCreateService() {
+  public void testGoodCreateService() throws Exception {
+    String json = "{\"auths\": "
+        + "{\"https://index.docker.io/v1/\": "
+        + "{\"auth\": \"foobarbaz\"},"
+        + "\"registry.example.com\": "
+        + "{\"auth\": \"bazbarfoo\"}}}";
+    File dockerTmpDir = new File("target", "docker-tmp");
+    FileUtils.deleteQuietly(dockerTmpDir);
+    dockerTmpDir.mkdirs();
+    String dockerConfig = dockerTmpDir + "/config.json";
+    BufferedWriter bw = new BufferedWriter(new FileWriter(dockerConfig));
+    bw.write(json);
+    bw.close();
     Service service = new Service();
     Service service = new Service();
     service.setName("jenkins");
     service.setName("jenkins");
     service.setVersion("v1");
     service.setVersion("v1");
@@ -115,6 +131,33 @@ public class TestApiServer {
         actual.getStatus());
         actual.getStatus());
   }
   }
 
 
+  @Test
+  public void testInternalServerErrorDockerClientConfigMissingCreateService() {
+    Service service = new Service();
+    service.setName("jenkins");
+    service.setVersion("v1");
+    service.setDockerClientConfig("/does/not/exist/config.json");
+    Artifact artifact = new Artifact();
+    artifact.setType(TypeEnum.DOCKER);
+    artifact.setId("jenkins:latest");
+    Resource resource = new Resource();
+    resource.setCpus(1);
+    resource.setMemory("2048");
+    List<Component> components = new ArrayList<>();
+    Component c = new Component();
+    c.setName("jenkins");
+    c.setNumberOfContainers(1L);
+    c.setArtifact(artifact);
+    c.setLaunchCommand("");
+    c.setResource(resource);
+    components.add(c);
+    service.setComponents(components);
+    final Response actual = apiServer.createService(request, service);
+    assertEquals("Create service is ",
+        Response.status(Status.BAD_REQUEST).build().getStatus(),
+        actual.getStatus());
+  }
+
   @Test
   @Test
   public void testBadGetService() {
   public void testBadGetService() {
     final Response actual = apiServer.getService(request, "no-jenkins");
     final Response actual = apiServer.getService(request, "no-jenkins");

+ 7 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSystemServiceImpl.java → hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/client/TestSystemServiceManagerImpl.java

@@ -41,13 +41,13 @@ import java.util.Set;
 /**
 /**
  * Test class for system service manager.
  * Test class for system service manager.
  */
  */
-public class TestSystemServiceImpl {
+public class TestSystemServiceManagerImpl {
 
 
   private static final Logger LOG =
   private static final Logger LOG =
-      LoggerFactory.getLogger(TestSystemServiceImpl.class);
+      LoggerFactory.getLogger(TestSystemServiceManagerImpl.class);
   private SystemServiceManagerImpl systemService;
   private SystemServiceManagerImpl systemService;
   private Configuration conf;
   private Configuration conf;
-  private String resourcePath = "users";
+  private String resourcePath = "system-services";
 
 
   private String[] users = new String[] {"user1", "user2"};
   private String[] users = new String[] {"user1", "user2"};
   private static Map<String, Set<String>> loadedServices = new HashMap<>();
   private static Map<String, Set<String>> loadedServices = new HashMap<>();
@@ -88,7 +88,9 @@ public class TestSystemServiceImpl {
         ignoredUserServices.containsKey(users[0]));
         ignoredUserServices.containsKey(users[0]));
     int count = ignoredUserServices.get(users[0]);
     int count = ignoredUserServices.get(users[0]);
     Assert.assertEquals(1, count);
     Assert.assertEquals(1, count);
-    Assert.assertEquals(1, systemService.getSkipCounter());
+    Assert.assertEquals(1,
+        systemService.getBadFileNameExtensionSkipCounter());
+    Assert.assertEquals(1, systemService.getBadDirSkipCounter());
 
 
     Map<String, Set<Service>> userServices =
     Map<String, Set<Service>> userServices =
         systemService.getSyncUserServices();
         systemService.getSyncUserServices();
@@ -112,7 +114,7 @@ public class TestSystemServiceImpl {
       while (iterator.hasNext()) {
       while (iterator.hasNext()) {
         Service next = iterator.next();
         Service next = iterator.next();
         Assert.assertTrue(
         Assert.assertTrue(
-            "Service name doesn't exist in expected " + "userService "
+            "Service name doesn't exist in expected userService "
                 + serviceNames, serviceNames.contains(next.getName()));
                 + serviceNames, serviceNames.contains(next.getName()));
       }
       }
     }
     }

+ 16 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/bad/bad.yarnfile

@@ -0,0 +1,16 @@
+{
+  "name": "bad",
+  "version": "1.0.0",
+  "components" :
+  [
+    {
+      "name": "simple",
+      "number_of_containers": 1,
+      "launch_command": "sleep 2",
+      "resource": {
+        "cpus": 1,
+        "memory": "128"
+      }
+    }
+  ]
+}

+ 0 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/users/sync/user1/example-app1.yarnfile → hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app1.yarnfile


+ 0 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/users/sync/user1/example-app2.yarnfile → hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app2.yarnfile


+ 0 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/users/sync/user1/example-app3.json → hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user1/example-app3.json


+ 0 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/users/sync/user2/example-app1.yarnfile → hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user2/example-app1.yarnfile


+ 0 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/users/sync/user2/example-app2.yarnfile → hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/resources/system-services/sync/user2/example-app2.yarnfile


+ 5 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ContainerFailureTracker.java

@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.service;
 package org.apache.hadoop.yarn.service;
 
 
 import org.apache.hadoop.yarn.service.component.Component;
 import org.apache.hadoop.yarn.service.component.Component;
+import org.apache.hadoop.yarn.service.conf.YarnServiceConf;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -29,6 +30,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.Set;
 import java.util.Set;
 
 
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_NODE_BLACKLIST_THRESHOLD;
 import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.NODE_BLACKLIST_THRESHOLD;
 import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.NODE_BLACKLIST_THRESHOLD;
 
 
 /**
 /**
@@ -51,8 +53,9 @@ public class ContainerFailureTracker {
   public ContainerFailureTracker(ServiceContext context, Component component) {
   public ContainerFailureTracker(ServiceContext context, Component component) {
     this.context = context;
     this.context = context;
     this.component = component;
     this.component = component;
-    maxFailurePerNode = component.getComponentSpec().getConfiguration()
-        .getPropertyInt(NODE_BLACKLIST_THRESHOLD, 3);
+    maxFailurePerNode = YarnServiceConf.getInt(NODE_BLACKLIST_THRESHOLD,
+        DEFAULT_NODE_BLACKLIST_THRESHOLD, component.getComponentSpec()
+        .getConfiguration(), context.scheduler.getConfig());
   }
   }
 
 
 
 

+ 10 - 8
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceMaster.java

@@ -18,6 +18,7 @@
 
 
 package org.apache.hadoop.yarn.service;
 package org.apache.hadoop.yarn.service;
 
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.Options;
 import org.apache.commons.lang3.StringUtils;
 import org.apache.commons.lang3.StringUtils;
@@ -89,8 +90,8 @@ public class ServiceMaster extends CompositeService {
     fs.setAppDir(appDir);
     fs.setAppDir(appDir);
     loadApplicationJson(context, fs);
     loadApplicationJson(context, fs);
 
 
+    context.tokens = recordTokensForContainers();
     if (UserGroupInformation.isSecurityEnabled()) {
     if (UserGroupInformation.isSecurityEnabled()) {
-      context.tokens = recordTokensForContainers();
       doSecureLogin();
       doSecureLogin();
     }
     }
     // Take yarn config from YarnFile and merge them into YarnConfiguration
     // Take yarn config from YarnFile and merge them into YarnConfiguration
@@ -128,15 +129,10 @@ public class ServiceMaster extends CompositeService {
 
 
   // Record the tokens and use them for launching containers.
   // Record the tokens and use them for launching containers.
   // e.g. localization requires the hdfs delegation tokens
   // e.g. localization requires the hdfs delegation tokens
-  private ByteBuffer recordTokensForContainers() throws IOException {
+  @VisibleForTesting
+  protected ByteBuffer recordTokensForContainers() throws IOException {
     Credentials copy = new Credentials(UserGroupInformation.getCurrentUser()
     Credentials copy = new Credentials(UserGroupInformation.getCurrentUser()
         .getCredentials());
         .getCredentials());
-    DataOutputBuffer dob = new DataOutputBuffer();
-    try {
-      copy.writeTokenStorageToStream(dob);
-    } finally {
-      dob.close();
-    }
     // Now remove the AM->RM token so that task containers cannot access it.
     // Now remove the AM->RM token so that task containers cannot access it.
     Iterator<Token<?>> iter = copy.getAllTokens().iterator();
     Iterator<Token<?>> iter = copy.getAllTokens().iterator();
     while (iter.hasNext()) {
     while (iter.hasNext()) {
@@ -146,6 +142,12 @@ public class ServiceMaster extends CompositeService {
         iter.remove();
         iter.remove();
       }
       }
     }
     }
+    DataOutputBuffer dob = new DataOutputBuffer();
+    try {
+      copy.writeTokenStorageToStream(dob);
+    } finally {
+      dob.close();
+    }
     return ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
     return ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
   }
   }
 
 

+ 3 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java

@@ -234,9 +234,10 @@ public class ServiceScheduler extends CompositeService {
     createConfigFileCache(context.fs.getFileSystem());
     createConfigFileCache(context.fs.getFileSystem());
 
 
     createAllComponents();
     createAllComponents();
-    containerRecoveryTimeout = getConfig().getInt(
+    containerRecoveryTimeout = YarnServiceConf.getInt(
         YarnServiceConf.CONTAINER_RECOVERY_TIMEOUT_MS,
         YarnServiceConf.CONTAINER_RECOVERY_TIMEOUT_MS,
-        YarnServiceConf.DEFAULT_CONTAINER_RECOVERY_TIMEOUT_MS);
+        YarnServiceConf.DEFAULT_CONTAINER_RECOVERY_TIMEOUT_MS,
+        app.getConfiguration(), getConfig());
   }
   }
 
 
   protected YarnRegistryViewForProviders createYarnRegistryOperations(
   protected YarnRegistryViewForProviders createYarnRegistryOperations(

+ 1 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/ReadinessCheck.java

@@ -60,6 +60,7 @@ public class ReadinessCheck implements Serializable {
   @XmlType(name = "type")
   @XmlType(name = "type")
   @XmlEnum
   @XmlEnum
   public enum TypeEnum {
   public enum TypeEnum {
+    DEFAULT("DEFAULT"),
     HTTP("HTTP"),
     HTTP("HTTP"),
     PORT("PORT");
     PORT("PORT");
 
 

+ 24 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Service.java

@@ -73,6 +73,7 @@ public class Service extends BaseResource {
   private KerberosPrincipal kerberosPrincipal = new KerberosPrincipal();
   private KerberosPrincipal kerberosPrincipal = new KerberosPrincipal();
   private String version = null;
   private String version = null;
   private String description = null;
   private String description = null;
+  private String dockerClientConfig = null;
 
 
   /**
   /**
    * A unique service name.
    * A unique service name.
@@ -370,6 +371,27 @@ public class Service extends BaseResource {
     this.kerberosPrincipal = kerberosPrincipal;
     this.kerberosPrincipal = kerberosPrincipal;
   }
   }
 
 
+  @JsonProperty("docker_client_config")
+  @XmlElement(name = "docker_client_config")
+  @SuppressWarnings("checkstyle:hiddenfield")
+  public Service dockerClientConfig(String dockerClientConfig) {
+    this.dockerClientConfig = dockerClientConfig;
+    return this;
+  }
+
+  /**
+   * The Docker client config for the service.
+   * @return dockerClientConfig
+   */
+  @ApiModelProperty(value = "The Docker client config for the service")
+  public String getDockerClientConfig() {
+    return dockerClientConfig;
+  }
+
+  public void setDockerClientConfig(String dockerClientConfig) {
+    this.dockerClientConfig = dockerClientConfig;
+  }
+
   @Override
   @Override
   public boolean equals(java.lang.Object o) {
   public boolean equals(java.lang.Object o) {
     if (this == o) {
     if (this == o) {
@@ -414,6 +436,8 @@ public class Service extends BaseResource {
     sb.append("    queue: ").append(toIndentedString(queue)).append("\n");
     sb.append("    queue: ").append(toIndentedString(queue)).append("\n");
     sb.append("    kerberosPrincipal: ")
     sb.append("    kerberosPrincipal: ")
         .append(toIndentedString(kerberosPrincipal)).append("\n");
         .append(toIndentedString(kerberosPrincipal)).append("\n");
+    sb.append("    dockerClientConfig: ")
+        .append(toIndentedString(dockerClientConfig)).append("\n");
     sb.append("}");
     sb.append("}");
     return sb.toString();
     return sb.toString();
   }
   }

+ 3 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ClientAMProxy.java

@@ -48,8 +48,9 @@ public class ClientAMProxy extends ServerProxy{
     } else {
     } else {
       retryPolicy =
       retryPolicy =
           createRetryPolicy(conf, YarnServiceConf.CLIENT_AM_RETRY_MAX_WAIT_MS,
           createRetryPolicy(conf, YarnServiceConf.CLIENT_AM_RETRY_MAX_WAIT_MS,
-              15 * 60 * 1000, YarnServiceConf.CLIENT_AM_RETRY_MAX_INTERVAL_MS,
-              2 * 1000);
+              YarnServiceConf.DEFAULT_CLIENT_AM_RETRY_MAX_WAIT_MS,
+              YarnServiceConf.CLIENT_AM_RETRY_MAX_INTERVAL_MS,
+              YarnServiceConf.DEFAULT_CLIENT_AM_RETRY_MAX_INTERVAL_MS);
     }
     }
     return createRetriableProxy(confClone, protocol, ugi, rpc, serverAddress,
     return createRetriableProxy(confClone, protocol, ugi, rpc, serverAddress,
         retryPolicy);
         retryPolicy);

+ 33 - 23
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java

@@ -79,6 +79,7 @@ import org.apache.hadoop.yarn.service.utils.ServiceRegistryUtils;
 import org.apache.hadoop.yarn.service.utils.ServiceUtils;
 import org.apache.hadoop.yarn.service.utils.ServiceUtils;
 import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
 import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
 import org.apache.hadoop.yarn.service.utils.ZookeeperUtils;
 import org.apache.hadoop.yarn.service.utils.ZookeeperUtils;
+import org.apache.hadoop.yarn.util.DockerClientConfigHandler;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.util.Times;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
@@ -668,8 +669,8 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
       submissionContext.setApplicationTimeouts(appTimeout);
       submissionContext.setApplicationTimeouts(appTimeout);
     }
     }
     submissionContext.setMaxAppAttempts(YarnServiceConf
     submissionContext.setMaxAppAttempts(YarnServiceConf
-        .getInt(YarnServiceConf.AM_RESTART_MAX, 20, app.getConfiguration(),
-            conf));
+        .getInt(YarnServiceConf.AM_RESTART_MAX, DEFAULT_AM_RESTART_MAX, app
+            .getConfiguration(), conf));
 
 
     setLogAggregationContext(app, conf, submissionContext);
     setLogAggregationContext(app, conf, submissionContext);
 
 
@@ -695,7 +696,7 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
             conf), 1));
             conf), 1));
     String queue = app.getQueue();
     String queue = app.getQueue();
     if (StringUtils.isEmpty(queue)) {
     if (StringUtils.isEmpty(queue)) {
-      queue = conf.get(YARN_QUEUE, "default");
+      queue = conf.get(YARN_QUEUE, DEFAULT_YARN_QUEUE);
     }
     }
     submissionContext.setQueue(queue);
     submissionContext.setQueue(queue);
     submissionContext.setApplicationName(serviceName);
     submissionContext.setApplicationName(serviceName);
@@ -710,7 +711,7 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
     amLaunchContext.setCommands(Collections.singletonList(cmdStr));
     amLaunchContext.setCommands(Collections.singletonList(cmdStr));
     amLaunchContext.setEnvironment(env);
     amLaunchContext.setEnvironment(env);
     amLaunchContext.setLocalResources(localResources);
     amLaunchContext.setLocalResources(localResources);
-    addHdfsDelegationTokenIfSecure(amLaunchContext);
+    addCredentials(amLaunchContext, app);
     submissionContext.setAMContainerSpec(amLaunchContext);
     submissionContext.setAMContainerSpec(amLaunchContext);
     yarnClient.submitApplication(submissionContext);
     yarnClient.submitApplication(submissionContext);
     return submissionContext.getApplicationId();
     return submissionContext.getApplicationId();
@@ -933,28 +934,37 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes,
     return appDir;
     return appDir;
   }
   }
 
 
-  private void addHdfsDelegationTokenIfSecure(ContainerLaunchContext amContext)
+  private void addCredentials(ContainerLaunchContext amContext, Service app)
       throws IOException {
       throws IOException {
-    if (!UserGroupInformation.isSecurityEnabled()) {
-      return;
-    }
-    Credentials credentials = new Credentials();
-    String tokenRenewer = YarnClientUtils.getRmPrincipal(getConfig());
-    if (StringUtils.isEmpty(tokenRenewer)) {
-      throw new IOException(
-          "Can't get Master Kerberos principal for the RM to use as renewer");
-    }
-    // Get hdfs dt
-    final org.apache.hadoop.security.token.Token<?>[] tokens =
-        fs.getFileSystem().addDelegationTokens(tokenRenewer, credentials);
-    if (tokens != null && tokens.length != 0) {
-      for (Token<?> token : tokens) {
-        LOG.debug("Got DT: " + token);
+    Credentials allCreds = new Credentials();
+    // HDFS DT
+    if (UserGroupInformation.isSecurityEnabled()) {
+      String tokenRenewer = YarnClientUtils.getRmPrincipal(getConfig());
+      if (StringUtils.isEmpty(tokenRenewer)) {
+        throw new IOException(
+            "Can't get Master Kerberos principal for the RM to use as renewer");
+      }
+      final org.apache.hadoop.security.token.Token<?>[] tokens =
+          fs.getFileSystem().addDelegationTokens(tokenRenewer, allCreds);
+      if (LOG.isDebugEnabled()) {
+        if (tokens != null && tokens.length != 0) {
+          for (Token<?> token : tokens) {
+            LOG.debug("Got DT: " + token);
+          }
+        }
       }
       }
+    }
+
+    if (!StringUtils.isEmpty(app.getDockerClientConfig())) {
+      allCreds.addAll(DockerClientConfigHandler.readCredentialsFromConfigFile(
+          new Path(app.getDockerClientConfig()), getConfig(), app.getName()));
+    }
+
+    if (allCreds.numberOfTokens() > 0) {
       DataOutputBuffer dob = new DataOutputBuffer();
       DataOutputBuffer dob = new DataOutputBuffer();
-      credentials.writeTokenStorageToStream(dob);
-      ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
-      amContext.setTokens(fsTokens);
+      allCreds.writeTokenStorageToStream(dob);
+      ByteBuffer tokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
+      amContext.setTokens(tokens);
     }
     }
   }
   }
 
 

+ 13 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java

@@ -45,6 +45,7 @@ import org.apache.hadoop.yarn.service.api.records.ServiceState;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEvent;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceId;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceId;
+import org.apache.hadoop.yarn.service.conf.YarnServiceConf;
 import org.apache.hadoop.yarn.service.monitor.probe.MonitorUtils;
 import org.apache.hadoop.yarn.service.monitor.probe.MonitorUtils;
 import org.apache.hadoop.yarn.service.monitor.probe.Probe;
 import org.apache.hadoop.yarn.service.monitor.probe.Probe;
 import org.apache.hadoop.yarn.service.provider.ProviderUtils;
 import org.apache.hadoop.yarn.service.provider.ProviderUtils;
@@ -79,6 +80,9 @@ import static org.apache.hadoop.yarn.service.component.ComponentEventType.*;
 import static org.apache.hadoop.yarn.service.component.ComponentState.*;
 import static org.apache.hadoop.yarn.service.component.ComponentState.*;
 import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.*;
 import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.*;
 import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_FAILURE_THRESHOLD;
 import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_FAILURE_THRESHOLD;
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_CONTAINER_FAILURE_THRESHOLD;
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_READINESS_CHECK_ENABLED;
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_READINESS_CHECK_ENABLED_DEFAULT;
 
 
 public class Component implements EventHandler<ComponentEvent> {
 public class Component implements EventHandler<ComponentEvent> {
   private static final Logger LOG = LoggerFactory.getLogger(Component.class);
   private static final Logger LOG = LoggerFactory.getLogger(Component.class);
@@ -175,9 +179,15 @@ public class Component implements EventHandler<ComponentEvent> {
     dispatcher = scheduler.getDispatcher();
     dispatcher = scheduler.getDispatcher();
     failureTracker =
     failureTracker =
         new ContainerFailureTracker(context, this);
         new ContainerFailureTracker(context, this);
-    probe = MonitorUtils.getProbe(componentSpec.getReadinessCheck());
-    maxContainerFailurePerComp = componentSpec.getConfiguration()
-        .getPropertyInt(CONTAINER_FAILURE_THRESHOLD, 10);
+    if (componentSpec.getReadinessCheck() != null ||
+        YarnServiceConf.getBoolean(DEFAULT_READINESS_CHECK_ENABLED,
+            DEFAULT_READINESS_CHECK_ENABLED_DEFAULT,
+            componentSpec.getConfiguration(), scheduler.getConfig())) {
+      probe = MonitorUtils.getProbe(componentSpec.getReadinessCheck());
+    }
+    maxContainerFailurePerComp = YarnServiceConf.getInt(
+        CONTAINER_FAILURE_THRESHOLD, DEFAULT_CONTAINER_FAILURE_THRESHOLD,
+        componentSpec.getConfiguration(), scheduler.getConfig());
     createNumCompInstances(component.getNumberOfContainers());
     createNumCompInstances(component.getNumberOfContainers());
     setDesiredContainers(component.getNumberOfContainers().intValue());
     setDesiredContainers(component.getNumberOfContainers().intValue());
   }
   }

+ 20 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java

@@ -20,7 +20,9 @@ package org.apache.hadoop.yarn.service.component.instance;
 
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.registry.client.api.RegistryConstants;
 import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
 import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
+import org.apache.hadoop.registry.client.binding.RegistryUtils;
 import org.apache.hadoop.registry.client.types.ServiceRecord;
 import org.apache.hadoop.registry.client.types.ServiceRecord;
 import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies;
 import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.ExitUtil;
@@ -520,6 +522,24 @@ public class ComponentInstance implements EventHandler<ComponentInstanceEvent>,
     }
     }
   }
   }
 
 
+  public String getHostname() {
+    String domain = getComponent().getScheduler().getConfig()
+        .get(RegistryConstants.KEY_DNS_DOMAIN);
+    String hostname;
+    if (domain == null || domain.isEmpty()) {
+      hostname = MessageFormat
+          .format("{0}.{1}.{2}", getCompInstanceName(),
+              getComponent().getContext().service.getName(),
+              RegistryUtils.currentUser());
+    } else {
+      hostname = MessageFormat
+          .format("{0}.{1}.{2}.{3}", getCompInstanceName(),
+              getComponent().getContext().service.getName(),
+              RegistryUtils.currentUser(), domain);
+    }
+    return hostname;
+  }
+
   @Override
   @Override
   public int compareTo(ComponentInstance to) {
   public int compareTo(ComponentInstance to) {
     long delta = containerStartedTime - to.containerStartedTime;
     long delta = containerStartedTime - to.containerStartedTime;

+ 27 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java

@@ -20,27 +20,34 @@ package org.apache.hadoop.yarn.service.conf;
 
 
 import org.apache.hadoop.yarn.service.api.records.Configuration;
 import org.apache.hadoop.yarn.service.api.records.Configuration;
 
 
+// ALL SERVICE AM PROPERTIES ADDED TO THIS FILE MUST BE DOCUMENTED
+// in the yarn site yarn-service/Configurations.md file.
 public class YarnServiceConf {
 public class YarnServiceConf {
 
 
   private static final String YARN_SERVICE_PREFIX = "yarn.service.";
   private static final String YARN_SERVICE_PREFIX = "yarn.service.";
 
 
   // Retry settings for the ServiceClient to talk to Service AppMaster
   // Retry settings for the ServiceClient to talk to Service AppMaster
   public static final String CLIENT_AM_RETRY_MAX_WAIT_MS = "yarn.service.client-am.retry.max-wait-ms";
   public static final String CLIENT_AM_RETRY_MAX_WAIT_MS = "yarn.service.client-am.retry.max-wait-ms";
+  public static final long DEFAULT_CLIENT_AM_RETRY_MAX_WAIT_MS = 15 * 60 * 1000;
   public static final String CLIENT_AM_RETRY_MAX_INTERVAL_MS = "yarn.service.client-am.retry-interval-ms";
   public static final String CLIENT_AM_RETRY_MAX_INTERVAL_MS = "yarn.service.client-am.retry-interval-ms";
+  public static final long DEFAULT_CLIENT_AM_RETRY_MAX_INTERVAL_MS = 2 * 1000;
 
 
   // Retry settings for container failures
   // Retry settings for container failures
   public static final String CONTAINER_RETRY_MAX = "yarn.service.container-failure.retry.max";
   public static final String CONTAINER_RETRY_MAX = "yarn.service.container-failure.retry.max";
+  public static final int DEFAULT_CONTAINER_RETRY_MAX = -1;
   public static final String CONTAINER_RETRY_INTERVAL = "yarn.service.container-failure.retry-interval-ms";
   public static final String CONTAINER_RETRY_INTERVAL = "yarn.service.container-failure.retry-interval-ms";
+  public static final int DEFAULT_CONTAINER_RETRY_INTERVAL = 30000;
   public static final String CONTAINER_FAILURES_VALIDITY_INTERVAL =
   public static final String CONTAINER_FAILURES_VALIDITY_INTERVAL =
       "yarn.service.container-failure.validity-interval-ms";
       "yarn.service.container-failure.validity-interval-ms";
+  public static final long DEFAULT_CONTAINER_FAILURES_VALIDITY_INTERVAL = -1;
 
 
   public static final String AM_RESTART_MAX = "yarn.service.am-restart.max-attempts";
   public static final String AM_RESTART_MAX = "yarn.service.am-restart.max-attempts";
+  public static final int DEFAULT_AM_RESTART_MAX = 20;
   public static final String AM_RESOURCE_MEM = "yarn.service.am-resource.memory";
   public static final String AM_RESOURCE_MEM = "yarn.service.am-resource.memory";
   public static final long DEFAULT_KEY_AM_RESOURCE_MEM = 1024;
   public static final long DEFAULT_KEY_AM_RESOURCE_MEM = 1024;
 
 
-  public static final String DEFAULT_AM_JVM_XMX = " -Xmx768m ";
-
   public static final String YARN_QUEUE = "yarn.service.queue";
   public static final String YARN_QUEUE = "yarn.service.queue";
+  public static final String DEFAULT_YARN_QUEUE = "default";
 
 
   public static final String API_SERVER_ADDRESS = "yarn.service.api-server.address";
   public static final String API_SERVER_ADDRESS = "yarn.service.api-server.address";
   public static final String DEFAULT_API_SERVER_ADDRESS = "0.0.0.0:";
   public static final String DEFAULT_API_SERVER_ADDRESS = "0.0.0.0:";
@@ -67,11 +74,14 @@ public class YarnServiceConf {
    */
    */
   public static final String CONTAINER_FAILURE_THRESHOLD =
   public static final String CONTAINER_FAILURE_THRESHOLD =
       "yarn.service.container-failure-per-component.threshold";
       "yarn.service.container-failure-per-component.threshold";
+  public static final int DEFAULT_CONTAINER_FAILURE_THRESHOLD = 10;
+
   /**
   /**
    * Maximum number of container failures on a node before the node is blacklisted
    * Maximum number of container failures on a node before the node is blacklisted
    */
    */
   public static final String NODE_BLACKLIST_THRESHOLD =
   public static final String NODE_BLACKLIST_THRESHOLD =
       "yarn.service.node-blacklist.threshold";
       "yarn.service.node-blacklist.threshold";
+  public static final int DEFAULT_NODE_BLACKLIST_THRESHOLD = 3;
 
 
   /**
   /**
    * The failure count for CONTAINER_FAILURE_THRESHOLD and NODE_BLACKLIST_THRESHOLD
    * The failure count for CONTAINER_FAILURE_THRESHOLD and NODE_BLACKLIST_THRESHOLD
@@ -79,6 +89,7 @@ public class YarnServiceConf {
    */
    */
   public static final String CONTAINER_FAILURE_WINDOW =
   public static final String CONTAINER_FAILURE_WINDOW =
       "yarn.service.failure-count-reset.window";
       "yarn.service.failure-count-reset.window";
+  public static final long DEFAULT_CONTAINER_FAILURE_WINDOW = 21600;
 
 
   /**
   /**
    * interval between readiness checks.
    * interval between readiness checks.
@@ -86,10 +97,18 @@ public class YarnServiceConf {
   public static final String READINESS_CHECK_INTERVAL = "yarn.service.readiness-check-interval.seconds";
   public static final String READINESS_CHECK_INTERVAL = "yarn.service.readiness-check-interval.seconds";
   public static final int DEFAULT_READINESS_CHECK_INTERVAL = 30; // seconds
   public static final int DEFAULT_READINESS_CHECK_INTERVAL = 30; // seconds
 
 
+  /**
+   * Default readiness check enabled.
+   */
+  public static final String DEFAULT_READINESS_CHECK_ENABLED =
+      "yarn.service.default-readiness-check.enabled";
+  public static final boolean DEFAULT_READINESS_CHECK_ENABLED_DEFAULT = true;
+
   /**
   /**
    * JVM opts.
    * JVM opts.
    */
    */
   public static final String JVM_OPTS = "yarn.service.am.java.opts";
   public static final String JVM_OPTS = "yarn.service.am.java.opts";
+  public static final String DEFAULT_AM_JVM_XMX = " -Xmx768m ";
 
 
   /**
   /**
    * How long to wait until a container is considered dead.
    * How long to wait until a container is considered dead.
@@ -126,6 +145,12 @@ public class YarnServiceConf {
     return userConf.getPropertyInt(name, systemConf.getInt(name, defaultValue));
     return userConf.getPropertyInt(name, systemConf.getInt(name, defaultValue));
   }
   }
 
 
+  public static boolean getBoolean(String name, boolean defaultValue,
+      Configuration userConf, org.apache.hadoop.conf.Configuration systemConf) {
+    return userConf.getPropertyBool(name, systemConf.getBoolean(name,
+        defaultValue));
+  }
+
   public static String get(String name, String defaultVal,
   public static String get(String name, String defaultVal,
       Configuration userConf, org.apache.hadoop.conf.Configuration systemConf) {
       Configuration userConf, org.apache.hadoop.conf.Configuration systemConf) {
     return userConf.getProperty(name, systemConf.get(name, defaultVal));
     return userConf.getProperty(name, systemConf.get(name, defaultVal));

+ 6 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/ServiceMonitor.java

@@ -43,6 +43,7 @@ import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanc
 import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.BECOME_READY;
 import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceEventType.BECOME_READY;
 import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceState.READY;
 import static org.apache.hadoop.yarn.service.component.instance.ComponentInstanceState.READY;
 import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_FAILURE_WINDOW;
 import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_FAILURE_WINDOW;
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_CONTAINER_FAILURE_WINDOW;
 import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_READINESS_CHECK_INTERVAL;
 import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_READINESS_CHECK_INTERVAL;
 import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.READINESS_CHECK_INTERVAL;
 import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.READINESS_CHECK_INTERVAL;
 
 
@@ -81,7 +82,7 @@ public class ServiceMonitor extends AbstractService {
 
 
     // Default 6 hours.
     // Default 6 hours.
     long failureResetInterval = YarnServiceConf
     long failureResetInterval = YarnServiceConf
-        .getLong(CONTAINER_FAILURE_WINDOW, 21600,
+        .getLong(CONTAINER_FAILURE_WINDOW, DEFAULT_CONTAINER_FAILURE_WINDOW,
             context.service.getConfiguration(), conf);
             context.service.getConfiguration(), conf);
 
 
     executorService
     executorService
@@ -109,11 +110,15 @@ public class ServiceMonitor extends AbstractService {
         ProbeStatus status = instance.ping();
         ProbeStatus status = instance.ping();
         if (status.isSuccess()) {
         if (status.isSuccess()) {
           if (instance.getState() == STARTED) {
           if (instance.getState() == STARTED) {
+            LOG.info("Readiness check succeeded for {}: {}", instance
+                .getCompInstanceName(), status);
             // synchronously update the state.
             // synchronously update the state.
             instance.handle(
             instance.handle(
                 new ComponentInstanceEvent(entry.getKey(), BECOME_READY));
                 new ComponentInstanceEvent(entry.getKey(), BECOME_READY));
           }
           }
         } else {
         } else {
+          LOG.info("Readiness check failed for {}: {}", instance
+              .getCompInstanceName(), status);
           if (instance.getState() == READY) {
           if (instance.getState() == READY) {
             instance.handle(
             instance.handle(
                 new ComponentInstanceEvent(entry.getKey(), BECOME_NOT_READY));
                 new ComponentInstanceEvent(entry.getKey(), BECOME_NOT_READY));

+ 99 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/DefaultProbe.java

@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.service.monitor.probe;
+
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
+import org.apache.hadoop.yarn.service.utils.ServiceRegistryUtils;
+import org.apache.hadoop.yarn.service.utils.ServiceUtils;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.Map;
+
+/**
+ * A probe that checks whether the AM has retrieved an IP for a container.
+ * Optional parameters enable a subsequent check for whether a DNS lookup can
+ * be performed for the container's hostname. Configurable properties include:
+ *
+ *   dns.check.enabled - true if DNS check should be performed (default false)
+ *   dns.address - optional IP:port address of DNS server to use for DNS check
+ */
+public class DefaultProbe extends Probe {
+  private final boolean dnsCheckEnabled;
+  private final String dnsAddress;
+
+  public DefaultProbe(Map<String, String> props) {
+    this("Default probe: IP presence", props);
+  }
+
+  protected DefaultProbe(String name, Map<String, String> props) {
+    this.dnsCheckEnabled = getPropertyBool(props,
+        DEFAULT_PROBE_DNS_CHECK_ENABLED,
+        DEFAULT_PROBE_DNS_CHECK_ENABLED_DEFAULT);
+    this.dnsAddress = props.get(DEFAULT_PROBE_DNS_ADDRESS);
+    String additionalName = "";
+    if (dnsCheckEnabled) {
+      if (dnsAddress == null) {
+        additionalName = " with DNS checking";
+      } else {
+        additionalName =  " with DNS checking and DNS server address " +
+            dnsAddress;
+      }
+    }
+    setName(name + additionalName);
+  }
+
+  public static DefaultProbe create() throws IOException {
+    return new DefaultProbe(Collections.emptyMap());
+  }
+
+  public static DefaultProbe create(Map<String, String> props) throws
+      IOException {
+    return new DefaultProbe(props);
+  }
+
+  @Override
+  public ProbeStatus ping(ComponentInstance instance) {
+    ProbeStatus status = new ProbeStatus();
+
+    ContainerStatus containerStatus = instance.getContainerStatus();
+    if (containerStatus == null || ServiceUtils.isEmpty(containerStatus
+        .getIPs())) {
+      status.fail(this, new IOException(
+          instance.getCompInstanceName() + ": IP is not available yet"));
+      return status;
+    }
+
+    String hostname = instance.getHostname();
+    if (dnsCheckEnabled && !ServiceRegistryUtils.registryDNSLookupExists(
+        dnsAddress, hostname)) {
+      status.fail(this, new IOException(
+          instance.getCompInstanceName() + ": DNS checking is enabled, but " +
+              "lookup for " + hostname + " is not available yet"));
+      return status;
+    }
+
+    status.succeed(this);
+    return status;
+  }
+
+  protected boolean isDnsCheckEnabled() {
+    return dnsCheckEnabled;
+  }
+}

+ 21 - 16
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/HttpProbe.java

@@ -17,11 +17,7 @@
 
 
 package org.apache.hadoop.yarn.service.monitor.probe;
 package org.apache.hadoop.yarn.service.monitor.probe;
 
 
-import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
-import org.apache.hadoop.yarn.service.utils.ServiceUtils;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -30,7 +26,20 @@ import java.net.HttpURLConnection;
 import java.net.URL;
 import java.net.URL;
 import java.util.Map;
 import java.util.Map;
 
 
-public class HttpProbe extends Probe {
+/**
+ * A probe that checks whether a successful HTTP response code can be obtained
+ * from a container. A well-formed URL must be provided. The URL is intended
+ * to contain a token ${THIS_HOST} that will be replaced by the IP of the
+ * container. This probe also performs the checks of the {@link DefaultProbe}.
+ * Additional configurable properties include:
+ *
+ *   url - required URL for HTTP connection, e.g. http://${THIS_HOST}:8080
+ *   timeout - connection timeout (default 1000)
+ *   min.success - minimum response code considered successful (default 200)
+ *   max.success - maximum response code considered successful (default 299)
+ *
+ */
+public class HttpProbe extends DefaultProbe {
   protected static final Logger log = LoggerFactory.getLogger(HttpProbe.class);
   protected static final Logger log = LoggerFactory.getLogger(HttpProbe.class);
 
 
   private static final String HOST_TOKEN = "${THIS_HOST}";
   private static final String HOST_TOKEN = "${THIS_HOST}";
@@ -40,9 +49,9 @@ public class HttpProbe extends Probe {
   private final int min, max;
   private final int min, max;
 
 
 
 
-  public HttpProbe(String url, int timeout, int min, int max, Configuration
-      conf) {
-    super("Http probe of " + url + " [" + min + "-" + max + "]", conf);
+  public HttpProbe(String url, int timeout, int min, int max,
+      Map<String, String> props) {
+    super("Http probe of " + url + " [" + min + "-" + max + "]", props);
     this.urlString = url;
     this.urlString = url;
     this.timeout = timeout;
     this.timeout = timeout;
     this.min = min;
     this.min = min;
@@ -59,7 +68,7 @@ public class HttpProbe extends Probe {
         WEB_PROBE_MIN_SUCCESS_DEFAULT);
         WEB_PROBE_MIN_SUCCESS_DEFAULT);
     int maxSuccess = getPropertyInt(props, WEB_PROBE_MAX_SUCCESS,
     int maxSuccess = getPropertyInt(props, WEB_PROBE_MAX_SUCCESS,
         WEB_PROBE_MAX_SUCCESS_DEFAULT);
         WEB_PROBE_MAX_SUCCESS_DEFAULT);
-    return new HttpProbe(urlString, timeout, minSuccess, maxSuccess, null);
+    return new HttpProbe(urlString, timeout, minSuccess, maxSuccess, props);
   }
   }
 
 
 
 
@@ -73,15 +82,11 @@ public class HttpProbe extends Probe {
 
 
   @Override
   @Override
   public ProbeStatus ping(ComponentInstance instance) {
   public ProbeStatus ping(ComponentInstance instance) {
-    ProbeStatus status = new ProbeStatus();
-    ContainerStatus containerStatus = instance.getContainerStatus();
-    if (containerStatus == null || ServiceUtils.isEmpty(containerStatus.getIPs())
-        || StringUtils.isEmpty(containerStatus.getHost())) {
-      status.fail(this, new IOException("IP is not available yet"));
+    ProbeStatus status = super.ping(instance);
+    if (!status.isSuccess()) {
       return status;
       return status;
     }
     }
-
-    String ip = containerStatus.getIPs().get(0);
+    String ip = instance.getContainerStatus().getIPs().get(0);
     HttpURLConnection connection = null;
     HttpURLConnection connection = null;
     try {
     try {
       URL url = new URL(urlString.replace(HOST_TOKEN, ip));
       URL url = new URL(urlString.replace(HOST_TOKEN, ip));

+ 12 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorKeys.java

@@ -22,6 +22,18 @@ package org.apache.hadoop.yarn.service.monitor.probe;
  */
  */
 public interface MonitorKeys {
 public interface MonitorKeys {
 
 
+  /**
+   * Default probing key : DNS check enabled {@value}.
+   */
+  String DEFAULT_PROBE_DNS_CHECK_ENABLED = "dns.check.enabled";
+  /**
+   * Default probing default : DNS check enabled {@value}.
+   */
+  boolean DEFAULT_PROBE_DNS_CHECK_ENABLED_DEFAULT = false;
+  /**
+   * Default probing key : DNS checking address IP:port {@value}.
+   */
+  String DEFAULT_PROBE_DNS_ADDRESS = "dns.address";
   /**
   /**
    * Port probing key : port to attempt to create a TCP connection to {@value}.
    * Port probing key : port to attempt to create a TCP connection to {@value}.
    */
    */

+ 7 - 7
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/MonitorUtils.java

@@ -61,20 +61,20 @@ public final class MonitorUtils {
   }
   }
 
 
   public static Probe getProbe(ReadinessCheck readinessCheck) {
   public static Probe getProbe(ReadinessCheck readinessCheck) {
-    if (readinessCheck == null) {
-      return null;
-    }
-    if (readinessCheck.getType() == null) {
-      return null;
-    }
     try {
     try {
+      if (readinessCheck == null) {
+        return DefaultProbe.create();
+      }
+      if (readinessCheck.getType() == null) {
+        return DefaultProbe.create(readinessCheck.getProperties());
+      }
       switch (readinessCheck.getType()) {
       switch (readinessCheck.getType()) {
       case HTTP:
       case HTTP:
         return HttpProbe.create(readinessCheck.getProperties());
         return HttpProbe.create(readinessCheck.getProperties());
       case PORT:
       case PORT:
         return PortProbe.create(readinessCheck.getProperties());
         return PortProbe.create(readinessCheck.getProperties());
       default:
       default:
-        return null;
+        return DefaultProbe.create(readinessCheck.getProperties());
       }
       }
     } catch (Throwable t) {
     } catch (Throwable t) {
       throw new IllegalArgumentException("Error creating readiness check " +
       throw new IllegalArgumentException("Error creating readiness check " +

+ 12 - 12
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/PortProbe.java

@@ -19,7 +19,6 @@ package org.apache.hadoop.yarn.service.monitor.probe;
 
 
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
-import org.apache.hadoop.yarn.service.utils.ServiceUtils;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -29,15 +28,20 @@ import java.net.Socket;
 import java.util.Map;
 import java.util.Map;
 
 
 /**
 /**
- * Probe for a port being open.
+ * A probe that checks whether a container has a specified port open. This
+ * probe also performs the checks of the {@link DefaultProbe}. Additional
+ * configurable properties include:
+ *
+ *   port - required port for socket connection
+ *   timeout - connection timeout (default 1000)
  */
  */
-public class PortProbe extends Probe {
+public class PortProbe extends DefaultProbe {
   protected static final Logger log = LoggerFactory.getLogger(PortProbe.class);
   protected static final Logger log = LoggerFactory.getLogger(PortProbe.class);
   private final int port;
   private final int port;
   private final int timeout;
   private final int timeout;
 
 
-  public PortProbe(int port, int timeout) {
-    super("Port probe of " + port + " for " + timeout + "ms", null);
+  public PortProbe(int port, int timeout, Map<String, String> props) {
+    super("Port probe of " + port + " for " + timeout + "ms", props);
     this.port = port;
     this.port = port;
     this.timeout = timeout;
     this.timeout = timeout;
   }
   }
@@ -54,7 +58,7 @@ public class PortProbe extends Probe {
     int timeout = getPropertyInt(props, PORT_PROBE_CONNECT_TIMEOUT,
     int timeout = getPropertyInt(props, PORT_PROBE_CONNECT_TIMEOUT,
         PORT_PROBE_CONNECT_TIMEOUT_DEFAULT);
         PORT_PROBE_CONNECT_TIMEOUT_DEFAULT);
 
 
-    return new PortProbe(port, timeout);
+    return new PortProbe(port, timeout, props);
   }
   }
 
 
   /**
   /**
@@ -65,12 +69,8 @@ public class PortProbe extends Probe {
    */
    */
   @Override
   @Override
   public ProbeStatus ping(ComponentInstance instance) {
   public ProbeStatus ping(ComponentInstance instance) {
-    ProbeStatus status = new ProbeStatus();
-
-    if (instance.getContainerStatus() == null || ServiceUtils
-        .isEmpty(instance.getContainerStatus().getIPs())) {
-      status.fail(this, new IOException(
-          instance.getCompInstanceName() + ": IP is not available yet"));
+    ProbeStatus status = super.ping(instance);
+    if (!status.isSuccess()) {
       return status;
       return status;
     }
     }
 
 

+ 13 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/monitor/probe/Probe.java

@@ -18,7 +18,6 @@
 package org.apache.hadoop.yarn.service.monitor.probe;
 package org.apache.hadoop.yarn.service.monitor.probe;
 
 
 import org.apache.commons.lang.StringUtils;
 import org.apache.commons.lang.StringUtils;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
 
 
 import java.io.IOException;
 import java.io.IOException;
@@ -29,18 +28,18 @@ import java.util.Map;
  */
  */
 public abstract class Probe implements MonitorKeys {
 public abstract class Probe implements MonitorKeys {
 
 
-  protected final Configuration conf;
   private String name;
   private String name;
 
 
+  protected Probe() {
+  }
+
   /**
   /**
    * Create a probe of a specific name
    * Create a probe of a specific name
    *
    *
    * @param name probe name
    * @param name probe name
-   * @param conf configuration being stored.
    */
    */
-  public Probe(String name, Configuration conf) {
+  public Probe(String name) {
     this.name = name;
     this.name = name;
-    this.conf = conf;
   }
   }
 
 
 
 
@@ -82,6 +81,15 @@ public abstract class Probe implements MonitorKeys {
     return Integer.parseInt(value);
     return Integer.parseInt(value);
   }
   }
 
 
+  public static boolean getPropertyBool(Map<String, String> props, String name,
+      boolean defaultValue) {
+    String value = props.get(name);
+    if (StringUtils.isEmpty(value)) {
+      return defaultValue;
+    }
+    return Boolean.parseBoolean(value);
+  }
+
   /**
   /**
    * perform any prelaunch initialization
    * perform any prelaunch initialization
    */
    */

+ 11 - 6
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/AbstractProviderService.java

@@ -42,6 +42,9 @@ import java.util.Map.Entry;
 import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_FAILURES_VALIDITY_INTERVAL;
 import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_FAILURES_VALIDITY_INTERVAL;
 import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_RETRY_INTERVAL;
 import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_RETRY_INTERVAL;
 import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_RETRY_MAX;
 import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.CONTAINER_RETRY_MAX;
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_CONTAINER_FAILURES_VALIDITY_INTERVAL;
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_CONTAINER_RETRY_INTERVAL;
+import static org.apache.hadoop.yarn.service.conf.YarnServiceConf.DEFAULT_CONTAINER_RETRY_MAX;
 import static org.apache.hadoop.yarn.service.utils.ServiceApiUtil.$;
 import static org.apache.hadoop.yarn.service.utils.ServiceApiUtil.$;
 
 
 public abstract class AbstractProviderService implements ProviderService,
 public abstract class AbstractProviderService implements ProviderService,
@@ -106,12 +109,14 @@ public abstract class AbstractProviderService implements ProviderService,
     }
     }
 
 
     // By default retry forever every 30 seconds
     // By default retry forever every 30 seconds
-    launcher.setRetryContext(YarnServiceConf
-        .getInt(CONTAINER_RETRY_MAX, -1, service.getConfiguration(),
-            yarnConf), YarnServiceConf
-        .getInt(CONTAINER_RETRY_INTERVAL, 30000, service.getConfiguration(),
+    launcher.setRetryContext(
+        YarnServiceConf.getInt(CONTAINER_RETRY_MAX, DEFAULT_CONTAINER_RETRY_MAX,
+            component.getConfiguration(), yarnConf),
+        YarnServiceConf.getInt(CONTAINER_RETRY_INTERVAL,
+            DEFAULT_CONTAINER_RETRY_INTERVAL, component.getConfiguration(),
             yarnConf),
             yarnConf),
-        YarnServiceConf.getLong(CONTAINER_FAILURES_VALIDITY_INTERVAL, -1,
-            service.getConfiguration(), yarnConf));
+        YarnServiceConf.getLong(CONTAINER_FAILURES_VALIDITY_INTERVAL,
+            DEFAULT_CONTAINER_FAILURES_VALIDITY_INTERVAL,
+            component.getConfiguration(), yarnConf));
   }
   }
 }
 }

+ 1 - 16
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/docker/DockerProviderService.java

@@ -17,8 +17,6 @@
  */
  */
 package org.apache.hadoop.yarn.service.provider.docker;
 package org.apache.hadoop.yarn.service.provider.docker;
 
 
-import org.apache.hadoop.registry.client.api.RegistryConstants;
-import org.apache.hadoop.registry.client.binding.RegistryUtils;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
 import org.apache.hadoop.yarn.service.provider.AbstractProviderService;
 import org.apache.hadoop.yarn.service.provider.AbstractProviderService;
 import org.apache.hadoop.yarn.service.api.records.Service;
 import org.apache.hadoop.yarn.service.api.records.Service;
@@ -26,7 +24,6 @@ import org.apache.hadoop.yarn.service.utils.SliderFileSystem;
 import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher;
 import org.apache.hadoop.yarn.service.containerlaunch.AbstractLauncher;
 
 
 import java.io.IOException;
 import java.io.IOException;
-import java.text.MessageFormat;
 
 
 public class DockerProviderService extends AbstractProviderService
 public class DockerProviderService extends AbstractProviderService
     implements DockerKeys {
     implements DockerKeys {
@@ -38,19 +35,7 @@ public class DockerProviderService extends AbstractProviderService
     launcher.setDockerImage(compInstance.getCompSpec().getArtifact().getId());
     launcher.setDockerImage(compInstance.getCompSpec().getArtifact().getId());
     launcher.setDockerNetwork(compInstance.getCompSpec().getConfiguration()
     launcher.setDockerNetwork(compInstance.getCompSpec().getConfiguration()
         .getProperty(DOCKER_NETWORK));
         .getProperty(DOCKER_NETWORK));
-    String domain = compInstance.getComponent().getScheduler().getConfig()
-        .get(RegistryConstants.KEY_DNS_DOMAIN);
-    String hostname;
-    if (domain == null || domain.isEmpty()) {
-      hostname = MessageFormat
-          .format("{0}.{1}.{2}", compInstance.getCompInstanceName(),
-              service.getName(), RegistryUtils.currentUser());
-    } else {
-      hostname = MessageFormat
-          .format("{0}.{1}.{2}.{3}", compInstance.getCompInstanceName(),
-              service.getName(), RegistryUtils.currentUser(), domain);
-    }
-    launcher.setDockerHostname(hostname);
+    launcher.setDockerHostname(compInstance.getHostname());
     launcher.setRunPrivilegedContainer(
     launcher.setRunPrivilegedContainer(
         compInstance.getCompSpec().getRunPrivilegedContainer());
         compInstance.getCompSpec().getRunPrivilegedContainer());
   }
   }

+ 21 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java

@@ -116,6 +116,13 @@ public class ServiceApiUtil {
       }
       }
     }
     }
 
 
+    // Validate the Docker client config.
+    try {
+      validateDockerClientConfiguration(service, conf);
+    } catch (IOException e) {
+      throw new IllegalArgumentException(e);
+    }
+
     // Validate there are no component name collisions (collisions are not
     // Validate there are no component name collisions (collisions are not
     // currently supported) and add any components from external services
     // currently supported) and add any components from external services
     Configuration globalConf = service.getConfiguration();
     Configuration globalConf = service.getConfiguration();
@@ -214,6 +221,20 @@ public class ServiceApiUtil {
     }
     }
   }
   }
 
 
+  private static void validateDockerClientConfiguration(Service service,
+      org.apache.hadoop.conf.Configuration conf) throws IOException {
+    String dockerClientConfig = service.getDockerClientConfig();
+    if (!StringUtils.isEmpty(dockerClientConfig)) {
+      Path dockerClientConfigPath = new Path(dockerClientConfig);
+      FileSystem fs = dockerClientConfigPath.getFileSystem(conf);
+      if (!fs.exists(dockerClientConfigPath)) {
+        throw new IOException(
+            "The supplied Docker client config does not exist: "
+                + dockerClientConfig);
+      }
+    }
+  }
+
   private static void validateComponent(Component comp, FileSystem fs,
   private static void validateComponent(Component comp, FileSystem fs,
       org.apache.hadoop.conf.Configuration conf)
       org.apache.hadoop.conf.Configuration conf)
       throws IOException {
       throws IOException {

+ 60 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceRegistryUtils.java

@@ -20,9 +20,23 @@ package org.apache.hadoop.yarn.service.utils;
 
 
 import org.apache.hadoop.registry.client.binding.RegistryUtils;
 import org.apache.hadoop.registry.client.binding.RegistryUtils;
 import org.apache.hadoop.yarn.service.conf.YarnServiceConstants;
 import org.apache.hadoop.yarn.service.conf.YarnServiceConstants;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.naming.Context;
+import javax.naming.NameNotFoundException;
+import javax.naming.NamingException;
+import javax.naming.directory.Attributes;
+import javax.naming.directory.DirContext;
+import javax.naming.directory.InitialDirContext;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.Hashtable;
 
 
 
 
 public class ServiceRegistryUtils {
 public class ServiceRegistryUtils {
+  private static final Logger LOG =
+      LoggerFactory.getLogger(ServiceRegistryUtils.class);
 
 
   public static final String SVC_USERS = "/services/yarn/users";
   public static final String SVC_USERS = "/services/yarn/users";
 
 
@@ -53,4 +67,50 @@ public class ServiceRegistryUtils {
   public static String mkUserHomePath(String username) {
   public static String mkUserHomePath(String username) {
     return SVC_USERS + "/" + username;
     return SVC_USERS + "/" + username;
   }
   }
+
+  /**
+   * Determine whether a DNS lookup exists for a given name. If a DNS server
+   * address is provided, the lookup will be performed against this DNS
+   * server. This option is provided because it may be desirable to perform
+   * the lookup against Registry DNS directly to avoid caching of negative
+   * responses that may be performed by other DNS servers, thereby allowing the
+   * lookup to succeed sooner.
+   *
+   * @param addr host:port dns address, or null
+   * @param name name to look up
+   * @return true if a lookup succeeds for the specified name
+   */
+  public static boolean registryDNSLookupExists(String addr, String
+      name) {
+    if (addr == null) {
+      try {
+        InetAddress.getByName(name);
+        return true;
+      } catch (UnknownHostException e) {
+        return false;
+      }
+    }
+
+    String dnsURI = String.format("dns://%s", addr);
+    Hashtable<String, Object> env = new Hashtable<>();
+    env.put(Context.INITIAL_CONTEXT_FACTORY,
+        "com.sun.jndi.dns.DnsContextFactory");
+    env.put(Context.PROVIDER_URL, dnsURI);
+
+    try {
+      DirContext ictx = new InitialDirContext(env);
+      Attributes attrs = ictx.getAttributes(name, new String[]{"A"});
+
+      if (attrs.size() > 0) {
+        return true;
+      }
+    } catch (NameNotFoundException e) {
+      // this doesn't need to be logged
+    } catch (NamingException e) {
+      LOG.error("Got exception when performing DNS lookup", e);
+    }
+
+    return false;
+  }
+
 }
 }

+ 25 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/MockServiceAM.java

@@ -21,11 +21,13 @@ package org.apache.hadoop.yarn.service;
 import com.google.common.base.Supplier;
 import com.google.common.base.Supplier;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Lists;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.DataOutputBuffer;
 import org.apache.hadoop.registry.client.api.RegistryOperations;
 import org.apache.hadoop.registry.client.api.RegistryOperations;
 import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
 import org.apache.hadoop.registry.client.binding.RegistryPathUtils;
 import org.apache.hadoop.registry.client.types.ServiceRecord;
 import org.apache.hadoop.registry.client.types.ServiceRecord;
 import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies;
 import org.apache.hadoop.registry.client.types.yarn.PersistencePolicies;
 import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes;
 import org.apache.hadoop.registry.client.types.yarn.YarnRegistryAttributes;
+import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
@@ -60,6 +62,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
 import java.io.IOException;
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.Collections;
 import java.util.Collections;
 import java.util.Iterator;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.LinkedList;
@@ -96,11 +99,19 @@ public class MockServiceAM extends ServiceMaster {
   private Map<ContainerId, ContainerStatus> containerStatuses =
   private Map<ContainerId, ContainerStatus> containerStatuses =
       new ConcurrentHashMap<>();
       new ConcurrentHashMap<>();
 
 
+  private Credentials amCreds;
+
   public MockServiceAM(Service service) {
   public MockServiceAM(Service service) {
     super(service.getName());
     super(service.getName());
     this.service = service;
     this.service = service;
   }
   }
 
 
+  public MockServiceAM(Service service, Credentials amCreds) {
+    super(service.getName());
+    this.service = service;
+    this.amCreds = amCreds;
+  }
+
   @Override
   @Override
   protected ContainerId getAMContainerId()
   protected ContainerId getAMContainerId()
       throws BadClusterStateException {
       throws BadClusterStateException {
@@ -385,4 +396,18 @@ public class MockServiceAM extends ServiceMaster {
     containerStatuses.put(container.getId(), status);
     containerStatuses.put(container.getId(), status);
   }
   }
 
 
+  @Override
+  protected ByteBuffer recordTokensForContainers()
+      throws IOException {
+    DataOutputBuffer dob = new DataOutputBuffer();
+    if (amCreds == null) {
+      return ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
+    }
+    try {
+      amCreds.writeTokenStorageToStream(dob);
+    } finally {
+      dob.close();
+    }
+    return ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
+  }
 }
 }

+ 50 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestServiceAM.java

@@ -21,6 +21,10 @@ package org.apache.hadoop.yarn.service;
 import com.google.common.collect.ImmutableMap;
 import com.google.common.collect.ImmutableMap;
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.FileUtils;
 import org.apache.curator.test.TestingCluster;
 import org.apache.curator.test.TestingCluster;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -29,6 +33,7 @@ import org.apache.hadoop.yarn.api.records.ResourceTypeInfo;
 import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.hadoop.yarn.client.api.AMRMClient;
 import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
 import org.apache.hadoop.yarn.client.api.async.AMRMClientAsync;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.security.DockerCredentialTokenIdentifier;
 import org.apache.hadoop.yarn.service.api.records.Component;
 import org.apache.hadoop.yarn.service.api.records.Component;
 import org.apache.hadoop.yarn.service.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.service.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.service.api.records.Service;
 import org.apache.hadoop.yarn.service.api.records.Service;
@@ -36,6 +41,7 @@ import org.apache.hadoop.yarn.service.component.ComponentState;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceState;
 import org.apache.hadoop.yarn.service.component.instance.ComponentInstanceState;
 import org.apache.hadoop.yarn.service.conf.YarnServiceConf;
 import org.apache.hadoop.yarn.service.conf.YarnServiceConf;
+import org.apache.hadoop.yarn.util.DockerClientConfigHandler;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.apache.hadoop.yarn.util.resource.ResourceUtils;
 import org.junit.After;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Assert;
@@ -44,14 +50,18 @@ import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
+import java.io.BufferedWriter;
 import java.io.File;
 import java.io.File;
+import java.io.FileWriter;
 import java.io.IOException;
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collection;
 import java.util.List;
 import java.util.List;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.TimeoutException;
 
 
 import static org.apache.hadoop.registry.client.api.RegistryConstants.KEY_REGISTRY_ZK_QUORUM;
 import static org.apache.hadoop.registry.client.api.RegistryConstants.KEY_REGISTRY_ZK_QUORUM;
+import static org.junit.Assert.assertEquals;
 
 
 public class TestServiceAM extends ServiceTestUtils{
 public class TestServiceAM extends ServiceTestUtils{
 
 
@@ -294,4 +304,44 @@ public class TestServiceAM extends ServiceTestUtils{
 
 
     am.stop();
     am.stop();
   }
   }
+
+  @Test
+  public void testRecordTokensForContainers() throws Exception {
+    ApplicationId applicationId = ApplicationId.newInstance(123456, 1);
+    Service exampleApp = new Service();
+    exampleApp.setId(applicationId.toString());
+    exampleApp.setName("testContainerCompleted");
+    exampleApp.addComponent(createComponent("compa", 1, "pwd"));
+
+    String json = "{\"auths\": "
+        + "{\"https://index.docker.io/v1/\": "
+        + "{\"auth\": \"foobarbaz\"},"
+        + "\"registry.example.com\": "
+        + "{\"auth\": \"bazbarfoo\"}}}";
+    File dockerTmpDir = new File("target", "docker-tmp");
+    FileUtils.deleteQuietly(dockerTmpDir);
+    dockerTmpDir.mkdirs();
+    String dockerConfig = dockerTmpDir + "/config.json";
+    BufferedWriter bw = new BufferedWriter(new FileWriter(dockerConfig));
+    bw.write(json);
+    bw.close();
+    Credentials dockerCred =
+        DockerClientConfigHandler.readCredentialsFromConfigFile(
+            new Path(dockerConfig), conf, applicationId.toString());
+
+
+    MockServiceAM am = new MockServiceAM(exampleApp, dockerCred);
+    ByteBuffer amCredBuffer = am.recordTokensForContainers();
+    Credentials amCreds =
+        DockerClientConfigHandler.getCredentialsFromTokensByteBuffer(
+            amCredBuffer);
+
+    assertEquals(2, amCreds.numberOfTokens());
+    for (Token<? extends TokenIdentifier> tk : amCreds.getAllTokens()) {
+      Assert.assertTrue(
+          tk.getKind().equals(DockerCredentialTokenIdentifier.KIND));
+    }
+
+    am.stop();
+  }
 }
 }

+ 0 - 156
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/TestSystemServiceManager.java

@@ -1,156 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.yarn.service;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.registry.client.api.RegistryOperations;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
-import org.apache.hadoop.yarn.service.api.records.Artifact;
-import org.apache.hadoop.yarn.service.api.records.ComponentState;
-import org.apache.hadoop.yarn.service.api.records.Service;
-import org.apache.hadoop.yarn.service.api.records.ServiceState;
-import org.apache.hadoop.yarn.service.exceptions.SliderException;
-import org.apache.hadoop.yarn.service.registry.YarnRegistryViewForProviders;
-import org.apache.hadoop.yarn.service.utils.ServiceApiUtil;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-
-import java.io.IOException;
-import java.util.Map;
-
-import static org.mockito.Mockito.mock;
-
-/**
- * Tests for {@link ServiceManager}.
- */
-public class TestSystemServiceManager {
-
-  @Rule
-  public ServiceTestUtils.ServiceFSWatcher rule =
-      new ServiceTestUtils.ServiceFSWatcher();
-
-  @Test
-  public void testUpgrade() throws IOException, SliderException {
-    ServiceManager serviceManager = createTestServiceManager("testUpgrade");
-    upgrade(serviceManager, "v2", false);
-    Assert.assertEquals("service not upgraded", ServiceState.UPGRADING,
-        serviceManager.getServiceSpec().getState());
-  }
-
-  @Test
-  public void testRestartNothingToUpgrade()
-      throws IOException, SliderException {
-    ServiceManager serviceManager = createTestServiceManager("testRestart");
-    upgrade(serviceManager, "v2", false);
-
-    //make components stable
-    serviceManager.getServiceSpec().getComponents().forEach(comp -> {
-      comp.setState(ComponentState.STABLE);
-    });
-    serviceManager.handle(new ServiceEvent(ServiceEventType.START));
-    Assert.assertEquals("service not re-started", ServiceState.STABLE,
-        serviceManager.getServiceSpec().getState());
-  }
-
-  @Test
-  public void testRestartWithPendingUpgrade()
-      throws IOException, SliderException {
-    ServiceManager serviceManager = createTestServiceManager("testRestart");
-    upgrade(serviceManager, "v2", true);
-    serviceManager.handle(new ServiceEvent(ServiceEventType.START));
-    Assert.assertEquals("service should still be upgrading",
-        ServiceState.UPGRADING, serviceManager.getServiceSpec().getState());
-  }
-
-
-  private void upgrade(ServiceManager service, String version,
-      boolean upgradeArtifact)
-      throws IOException, SliderException {
-    Service upgradedDef = ServiceTestUtils.createExampleApplication();
-    upgradedDef.setName(service.getName());
-    upgradedDef.setVersion(version);
-    if (upgradeArtifact) {
-      Artifact upgradedArtifact = createTestArtifact("2");
-      upgradedDef.getComponents().forEach(component -> {
-        component.setArtifact(upgradedArtifact);
-      });
-    }
-    writeUpgradedDef(upgradedDef);
-    ServiceEvent upgradeEvent = new ServiceEvent(ServiceEventType.UPGRADE);
-    upgradeEvent.setVersion("v2");
-    service.handle(upgradeEvent);
-  }
-
-  private ServiceManager createTestServiceManager(String name)
-      throws IOException {
-    ServiceContext context = new ServiceContext();
-    context.service = createBaseDef(name);
-    context.fs = rule.getFs();
-
-    context.scheduler = new ServiceScheduler(context) {
-      @Override
-      protected YarnRegistryViewForProviders createYarnRegistryOperations(
-          ServiceContext context, RegistryOperations registryClient) {
-        return mock(YarnRegistryViewForProviders.class);
-      }
-    };
-
-    context.scheduler.init(rule.getConf());
-
-    Map<String, org.apache.hadoop.yarn.service.component.Component>
-        componentState = context.scheduler.getAllComponents();
-    context.service.getComponents().forEach(component -> {
-      componentState.put(component.getName(),
-          new org.apache.hadoop.yarn.service.component.Component(component,
-              1L, context));
-    });
-    return new ServiceManager(context);
-  }
-
-  static Service createBaseDef(String name) {
-    ApplicationId applicationId = ApplicationId.newInstance(
-        System.currentTimeMillis(), 1);
-    Service serviceDef = ServiceTestUtils.createExampleApplication();
-    serviceDef.setId(applicationId.toString());
-    serviceDef.setName(name);
-    serviceDef.setState(ServiceState.STARTED);
-    Artifact artifact = createTestArtifact("1");
-
-    serviceDef.getComponents().forEach(component ->
-        component.setArtifact(artifact));
-    return serviceDef;
-  }
-
-  static Artifact createTestArtifact(String artifactId) {
-    Artifact artifact = new Artifact();
-    artifact.setId(artifactId);
-    artifact.setType(Artifact.TypeEnum.TARBALL);
-    return artifact;
-  }
-
-  private void writeUpgradedDef(Service upgradedDef)
-      throws IOException, SliderException {
-    Path upgradePath = rule.getFs().buildClusterUpgradeDirPath(
-        upgradedDef.getName(), upgradedDef.getVersion());
-    ServiceApiUtil.createDirAndPersistApp(rule.getFs(), upgradePath,
-        upgradedDef);
-  }
-
-}

+ 155 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/monitor/probe/TestDefaultProbe.java

@@ -0,0 +1,155 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.service.monitor.probe;
+
+import org.apache.hadoop.yarn.api.records.ContainerStatus;
+import org.apache.hadoop.yarn.service.api.records.ReadinessCheck;
+import org.apache.hadoop.yarn.service.component.instance.ComponentInstance;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
+
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+/**
+ * Tests for default probe.
+ */
+@RunWith(Parameterized.class)
+public class TestDefaultProbe {
+  private final DefaultProbe probe;
+
+  public TestDefaultProbe(Probe probe) {
+    this.probe = (DefaultProbe) probe;
+  }
+
+  @Parameterized.Parameters
+  public static Collection<Object[]> data() {
+    // test run 1: Default probe checks that container has an IP
+    Probe p1 = MonitorUtils.getProbe(null);
+
+    // test run 2: Default probe with DNS check for component instance hostname
+    ReadinessCheck rc2 = new ReadinessCheck()
+        .type(ReadinessCheck.TypeEnum.DEFAULT)
+        .properties(Collections.singletonMap(
+            MonitorKeys.DEFAULT_PROBE_DNS_CHECK_ENABLED, "true"));
+    Probe p2 = MonitorUtils.getProbe(rc2);
+
+    // test run 3: Default probe with DNS check using specific DNS server
+    Map<String, String> props = new HashMap<>();
+    props.put(MonitorKeys.DEFAULT_PROBE_DNS_CHECK_ENABLED, "true");
+    props.put(MonitorKeys.DEFAULT_PROBE_DNS_ADDRESS, "8.8.8.8");
+    ReadinessCheck rc3 = new ReadinessCheck()
+        .type(ReadinessCheck.TypeEnum.DEFAULT).properties(props);
+    Probe p3 = MonitorUtils.getProbe(rc3);
+
+    return Arrays.asList(new Object[][] {{p1}, {p2}, {p3}});
+  }
+
+  @Test
+  public void testDefaultProbe() {
+    // component instance has a good hostname, so probe will eventually succeed
+    // whether or not DNS checking is enabled
+    ComponentInstance componentInstance =
+        createMockComponentInstance("example.com");
+    checkPingResults(probe, componentInstance, false);
+
+    // component instance has a bad hostname, so probe will fail when DNS
+    // checking is enabled
+    componentInstance = createMockComponentInstance("bad.dns.test");
+    checkPingResults(probe, componentInstance, probe.isDnsCheckEnabled());
+  }
+
+  private static void checkPingResults(Probe probe, ComponentInstance
+      componentInstance, boolean expectDNSCheckFailure) {
+    // on the first ping, null container status results in failure
+    ProbeStatus probeStatus = probe.ping(componentInstance);
+    assertFalse("Expected failure for " + probeStatus.toString(),
+        probeStatus.isSuccess());
+    assertTrue("Expected IP failure for " + probeStatus.toString(),
+        probeStatus.toString().contains(
+        componentInstance.getCompInstanceName() + ": IP is not available yet"));
+
+    // on the second ping, container status is retrieved but there are no
+    // IPs, resulting in failure
+    probeStatus = probe.ping(componentInstance);
+    assertFalse("Expected failure for " + probeStatus.toString(),
+        probeStatus.isSuccess());
+    assertTrue("Expected IP failure for " + probeStatus.toString(),
+        probeStatus.toString().contains(componentInstance
+            .getCompInstanceName() + ": IP is not available yet"));
+
+    // on the third ping, IPs are retrieved and success depends on whether or
+    // not a DNS lookup can be performed for the component instance hostname
+    probeStatus = probe.ping(componentInstance);
+    if (expectDNSCheckFailure) {
+      assertFalse("Expected failure for " + probeStatus.toString(),
+          probeStatus.isSuccess());
+      assertTrue("Expected DNS failure for " + probeStatus.toString(),
+          probeStatus.toString().contains(componentInstance
+              .getCompInstanceName() + ": DNS checking is enabled, but lookup" +
+              " for " + componentInstance.getHostname() + " is not available " +
+              "yet"));
+    } else {
+      assertTrue("Expected success for " + probeStatus.toString(),
+          probeStatus.isSuccess());
+    }
+  }
+
+  private static ComponentInstance createMockComponentInstance(String
+      hostname) {
+    ComponentInstance componentInstance = mock(ComponentInstance.class);
+    when(componentInstance.getHostname()).thenReturn(hostname);
+    when(componentInstance.getCompInstanceName()).thenReturn("comp-0");
+    when(componentInstance.getContainerStatus())
+        .thenAnswer(new Answer<ContainerStatus>() {
+          private int count = 0;
+
+          @Override
+          public ContainerStatus answer(InvocationOnMock invocationOnMock) {
+            count++;
+            if (count == 1) {
+              // first call to getContainerStatus returns null
+              return null;
+            } else if (count == 2) {
+              // second call returns a ContainerStatus with no IPs
+              ContainerStatus containerStatus = mock(ContainerStatus.class);
+              when(containerStatus.getIPs()).thenReturn(null);
+              return containerStatus;
+            } else {
+              // third call returns a ContainerStatus with one IP
+              ContainerStatus containerStatus = mock(ContainerStatus.class);
+              when(containerStatus.getIPs())
+                  .thenReturn(Collections.singletonList("1.2.3.4"));
+              return containerStatus;
+            }
+          }
+        });
+    return componentInstance;
+  }
+}

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java

@@ -626,7 +626,7 @@ public abstract class ProtocolHATestBase extends ClientBaseWithFixes {
           ApplicationReport.newInstance(appId, attemptId, "fakeUser",
           ApplicationReport.newInstance(appId, attemptId, "fakeUser",
               "fakeQueue", "fakeApplicationName", "localhost", 0, null,
               "fakeQueue", "fakeApplicationName", "localhost", 0, null,
               YarnApplicationState.FINISHED, "fake an application report", "",
               YarnApplicationState.FINISHED, "fake an application report", "",
-              1000L, 1200L, FinalApplicationStatus.FAILED, null, "", 50f,
+              1000L, 1000L, 1200L, FinalApplicationStatus.FAILED, null, "", 50f,
               "fakeApplicationType", null);
               "fakeApplicationType", null);
       return report;
       return report;
     }
     }

+ 4 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java

@@ -332,7 +332,7 @@ public class TestAHSClient {
           ApplicationReport.newInstance(applicationId,
           ApplicationReport.newInstance(applicationId,
             ApplicationAttemptId.newInstance(applicationId, 1), "user",
             ApplicationAttemptId.newInstance(applicationId, 1), "user",
             "queue", "appname", "host", 124, null,
             "queue", "appname", "host", 124, null,
-            YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
+            YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0,
             FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN",
             FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN",
             null);
             null);
       List<ApplicationReport> applicationReports =
       List<ApplicationReport> applicationReports =
@@ -389,7 +389,7 @@ public class TestAHSClient {
           ApplicationReport.newInstance(applicationId2,
           ApplicationReport.newInstance(applicationId2,
             ApplicationAttemptId.newInstance(applicationId2, 2), "user2",
             ApplicationAttemptId.newInstance(applicationId2, 2), "user2",
             "queue2", "appname2", "host2", 125, null,
             "queue2", "appname2", "host2", 125, null,
-            YarnApplicationState.FINISHED, "diagnostics2", "url2", 2, 2,
+            YarnApplicationState.FINISHED, "diagnostics2", "url2", 2, 2, 2,
             FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.63789f,
             FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.63789f,
             "NON-YARN", null);
             "NON-YARN", null);
       applicationReports.add(newApplicationReport2);
       applicationReports.add(newApplicationReport2);
@@ -399,7 +399,7 @@ public class TestAHSClient {
           ApplicationReport.newInstance(applicationId3,
           ApplicationReport.newInstance(applicationId3,
             ApplicationAttemptId.newInstance(applicationId3, 3), "user3",
             ApplicationAttemptId.newInstance(applicationId3, 3), "user3",
             "queue3", "appname3", "host3", 126, null,
             "queue3", "appname3", "host3", 126, null,
-            YarnApplicationState.RUNNING, "diagnostics3", "url3", 3, 3,
+            YarnApplicationState.RUNNING, "diagnostics3", "url3", 3, 3, 3,
             FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.73789f,
             FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.73789f,
             "MAPREDUCE", null);
             "MAPREDUCE", null);
       applicationReports.add(newApplicationReport3);
       applicationReports.add(newApplicationReport3);
@@ -409,7 +409,7 @@ public class TestAHSClient {
           ApplicationReport.newInstance(applicationId4,
           ApplicationReport.newInstance(applicationId4,
             ApplicationAttemptId.newInstance(applicationId4, 4), "user4",
             ApplicationAttemptId.newInstance(applicationId4, 4), "user4",
             "queue4", "appname4", "host4", 127, null,
             "queue4", "appname4", "host4", 127, null,
-            YarnApplicationState.FAILED, "diagnostics4", "url4", 4, 4,
+            YarnApplicationState.FAILED, "diagnostics4", "url4", 4, 4, 4,
             FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.83789f,
             FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.83789f,
             "NON-MAPREDUCE", null);
             "NON-MAPREDUCE", null);
       applicationReports.add(newApplicationReport4);
       applicationReports.add(newApplicationReport4);

+ 4 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java

@@ -737,7 +737,7 @@ public class TestYarnClient extends ParameterizedSchedulerTestBase {
       ApplicationReport newApplicationReport = ApplicationReport.newInstance(
       ApplicationReport newApplicationReport = ApplicationReport.newInstance(
           applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
           applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
           "user", "queue", "appname", "host", 124, null,
           "user", "queue", "appname", "host", 124, null,
-          YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
+          YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0,
           FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
           FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
       List<ApplicationReport> applicationReports = new ArrayList<ApplicationReport>();
       List<ApplicationReport> applicationReports = new ArrayList<ApplicationReport>();
       applicationReports.add(newApplicationReport);
       applicationReports.add(newApplicationReport);
@@ -812,7 +812,7 @@ public class TestYarnClient extends ParameterizedSchedulerTestBase {
       ApplicationReport newApplicationReport2 = ApplicationReport.newInstance(
       ApplicationReport newApplicationReport2 = ApplicationReport.newInstance(
           applicationId2, ApplicationAttemptId.newInstance(applicationId2, 2),
           applicationId2, ApplicationAttemptId.newInstance(applicationId2, 2),
           "user2", "queue2", "appname2", "host2", 125, null,
           "user2", "queue2", "appname2", "host2", 125, null,
-          YarnApplicationState.FINISHED, "diagnostics2", "url2", 2, 2,
+          YarnApplicationState.FINISHED, "diagnostics2", "url2", 2, 2, 2,
           FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.63789f, "NON-YARN", 
           FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.63789f, "NON-YARN", 
         null);
         null);
       applicationReports.add(newApplicationReport2);
       applicationReports.add(newApplicationReport2);
@@ -821,7 +821,7 @@ public class TestYarnClient extends ParameterizedSchedulerTestBase {
       ApplicationReport newApplicationReport3 = ApplicationReport.newInstance(
       ApplicationReport newApplicationReport3 = ApplicationReport.newInstance(
           applicationId3, ApplicationAttemptId.newInstance(applicationId3, 3),
           applicationId3, ApplicationAttemptId.newInstance(applicationId3, 3),
           "user3", "queue3", "appname3", "host3", 126, null,
           "user3", "queue3", "appname3", "host3", 126, null,
-          YarnApplicationState.RUNNING, "diagnostics3", "url3", 3, 3,
+          YarnApplicationState.RUNNING, "diagnostics3", "url3", 3, 3, 3,
           FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.73789f, "MAPREDUCE",
           FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.73789f, "MAPREDUCE",
         null);
         null);
       applicationReports.add(newApplicationReport3);
       applicationReports.add(newApplicationReport3);
@@ -832,7 +832,7 @@ public class TestYarnClient extends ParameterizedSchedulerTestBase {
               applicationId4,
               applicationId4,
               ApplicationAttemptId.newInstance(applicationId4, 4),
               ApplicationAttemptId.newInstance(applicationId4, 4),
               "user4", "queue4", "appname4", "host4", 127, null,
               "user4", "queue4", "appname4", "host4", 127, null,
-              YarnApplicationState.FAILED, "diagnostics4", "url4", 4, 4,
+              YarnApplicationState.FAILED, "diagnostics4", "url4", 4, 4, 4,
               FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.83789f,
               FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.83789f,
               "NON-MAPREDUCE", null);
               "NON-MAPREDUCE", null);
       applicationReports.add(newApplicationReport4);
       applicationReports.add(newApplicationReport4);

+ 21 - 21
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java

@@ -136,7 +136,7 @@ public class TestYarnCLI {
       ApplicationReport newApplicationReport = ApplicationReport.newInstance(
       ApplicationReport newApplicationReport = ApplicationReport.newInstance(
           applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
           applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
           "user", "queue", "appname", "host", 124, null,
           "user", "queue", "appname", "host", 124, null,
-          YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0,
+          YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0, 0,
           FinalApplicationStatus.SUCCEEDED, usageReport, "N/A", 0.53789f, "YARN",
           FinalApplicationStatus.SUCCEEDED, usageReport, "N/A", 0.53789f, "YARN",
           null, null, false, Priority.newInstance(0), "high-mem", "high-mem");
           null, null, false, Priority.newInstance(0), "high-mem", "high-mem");
       newApplicationReport.setLogAggregationStatus(LogAggregationStatus.SUCCEEDED);
       newApplicationReport.setLogAggregationStatus(LogAggregationStatus.SUCCEEDED);
@@ -383,7 +383,7 @@ public class TestYarnCLI {
     ApplicationReport newApplicationReport = ApplicationReport.newInstance(
     ApplicationReport newApplicationReport = ApplicationReport.newInstance(
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         "user", "queue", "appname", "host", 124, null,
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
+        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null,
         Sets.newHashSet("tag1", "tag3"), false, Priority.UNDEFINED, "", "");
         Sets.newHashSet("tag1", "tag3"), false, Priority.UNDEFINED, "", "");
     List<ApplicationReport> applicationReports =
     List<ApplicationReport> applicationReports =
@@ -394,7 +394,7 @@ public class TestYarnCLI {
     ApplicationReport newApplicationReport2 = ApplicationReport.newInstance(
     ApplicationReport newApplicationReport2 = ApplicationReport.newInstance(
         applicationId2, ApplicationAttemptId.newInstance(applicationId2, 2),
         applicationId2, ApplicationAttemptId.newInstance(applicationId2, 2),
         "user2", "queue2", "appname2", "host2", 125, null,
         "user2", "queue2", "appname2", "host2", 125, null,
-        YarnApplicationState.FINISHED, "diagnostics2", "url2", 2, 2,
+        YarnApplicationState.FINISHED, "diagnostics2", "url2", 2, 2, 2,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.63789f, "NON-YARN", 
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.63789f, "NON-YARN", 
         null, Sets.newHashSet("tag2", "tag3"), false, Priority.UNDEFINED,
         null, Sets.newHashSet("tag2", "tag3"), false, Priority.UNDEFINED,
         "", "");
         "", "");
@@ -404,7 +404,7 @@ public class TestYarnCLI {
     ApplicationReport newApplicationReport3 = ApplicationReport.newInstance(
     ApplicationReport newApplicationReport3 = ApplicationReport.newInstance(
         applicationId3, ApplicationAttemptId.newInstance(applicationId3, 3),
         applicationId3, ApplicationAttemptId.newInstance(applicationId3, 3),
         "user3", "queue3", "appname3", "host3", 126, null,
         "user3", "queue3", "appname3", "host3", 126, null,
-        YarnApplicationState.RUNNING, "diagnostics3", "url3", 3, 3,
+        YarnApplicationState.RUNNING, "diagnostics3", "url3", 3, 3, 3,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.73789f, "MAPREDUCE", 
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.73789f, "MAPREDUCE", 
         null, Sets.newHashSet("tag1", "tag4"), false, Priority.UNDEFINED,
         null, Sets.newHashSet("tag1", "tag4"), false, Priority.UNDEFINED,
         "", "");
         "", "");
@@ -414,7 +414,7 @@ public class TestYarnCLI {
     ApplicationReport newApplicationReport4 = ApplicationReport.newInstance(
     ApplicationReport newApplicationReport4 = ApplicationReport.newInstance(
         applicationId4, ApplicationAttemptId.newInstance(applicationId4, 4),
         applicationId4, ApplicationAttemptId.newInstance(applicationId4, 4),
         "user4", "queue4", "appname4", "host4", 127, null,
         "user4", "queue4", "appname4", "host4", 127, null,
-        YarnApplicationState.FAILED, "diagnostics4", "url4", 4, 4,
+        YarnApplicationState.FAILED, "diagnostics4", "url4", 4, 4, 4,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.83789f,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.83789f,
         "NON-MAPREDUCE", null, Sets.newHashSet("tag1"), false,
         "NON-MAPREDUCE", null, Sets.newHashSet("tag1"), false,
         Priority.UNDEFINED, "", "");
         Priority.UNDEFINED, "", "");
@@ -424,7 +424,7 @@ public class TestYarnCLI {
     ApplicationReport newApplicationReport5 = ApplicationReport.newInstance(
     ApplicationReport newApplicationReport5 = ApplicationReport.newInstance(
         applicationId5, ApplicationAttemptId.newInstance(applicationId5, 5),
         applicationId5, ApplicationAttemptId.newInstance(applicationId5, 5),
         "user5", "queue5", "appname5", "host5", 128, null,
         "user5", "queue5", "appname5", "host5", 128, null,
-        YarnApplicationState.ACCEPTED, "diagnostics5", "url5", 5, 5,
+        YarnApplicationState.ACCEPTED, "diagnostics5", "url5", 5, 5, 5,
         FinalApplicationStatus.KILLED, null, "N/A", 0.93789f, "HIVE", null,
         FinalApplicationStatus.KILLED, null, "N/A", 0.93789f, "HIVE", null,
         Sets.newHashSet("tag2", "tag4"), false, Priority.UNDEFINED, "", "");
         Sets.newHashSet("tag2", "tag4"), false, Priority.UNDEFINED, "", "");
     applicationReports.add(newApplicationReport5);
     applicationReports.add(newApplicationReport5);
@@ -433,7 +433,7 @@ public class TestYarnCLI {
     ApplicationReport newApplicationReport6 = ApplicationReport.newInstance(
     ApplicationReport newApplicationReport6 = ApplicationReport.newInstance(
         applicationId6, ApplicationAttemptId.newInstance(applicationId6, 6),
         applicationId6, ApplicationAttemptId.newInstance(applicationId6, 6),
         "user6", "queue6", "appname6", "host6", 129, null,
         "user6", "queue6", "appname6", "host6", 129, null,
-        YarnApplicationState.SUBMITTED, "diagnostics6", "url6", 6, 6,
+        YarnApplicationState.SUBMITTED, "diagnostics6", "url6", 6, 6, 6,
         FinalApplicationStatus.KILLED, null, "N/A", 0.99789f, "PIG",
         FinalApplicationStatus.KILLED, null, "N/A", 0.99789f, "PIG",
         null, new HashSet<String>(), false, Priority.UNDEFINED, "", "");
         null, new HashSet<String>(), false, Priority.UNDEFINED, "", "");
     applicationReports.add(newApplicationReport6);
     applicationReports.add(newApplicationReport6);
@@ -1007,7 +1007,7 @@ public class TestYarnCLI {
     ApplicationReport newApplicationReport2 = ApplicationReport.newInstance(
     ApplicationReport newApplicationReport2 = ApplicationReport.newInstance(
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         "user", "queue", "appname", "host", 124, null,
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0,
+        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
     when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
     when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
         newApplicationReport2);
         newApplicationReport2);
@@ -1020,7 +1020,7 @@ public class TestYarnCLI {
     ApplicationReport newApplicationReport = ApplicationReport.newInstance(
     ApplicationReport newApplicationReport = ApplicationReport.newInstance(
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         "user", "queue", "appname", "host", 124, null,
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
+        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
     when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
     when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
         newApplicationReport);
         newApplicationReport);
@@ -1059,12 +1059,12 @@ public class TestYarnCLI {
     ApplicationReport newApplicationReport1 = ApplicationReport.newInstance(
     ApplicationReport newApplicationReport1 = ApplicationReport.newInstance(
         applicationId1, ApplicationAttemptId.newInstance(applicationId1, 1),
         applicationId1, ApplicationAttemptId.newInstance(applicationId1, 1),
         "user", "queue", "appname", "host", 124, null,
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0,
+        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
     ApplicationReport newApplicationReport2 = ApplicationReport.newInstance(
     ApplicationReport newApplicationReport2 = ApplicationReport.newInstance(
         applicationId2, ApplicationAttemptId.newInstance(applicationId2, 1),
         applicationId2, ApplicationAttemptId.newInstance(applicationId2, 1),
         "user", "queue", "appname", "host", 124, null,
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0,
+        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.34344f, "YARN", null);
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.34344f, "YARN", null);
     when(client.getApplicationReport(applicationId1)).thenReturn(
     when(client.getApplicationReport(applicationId1)).thenReturn(
         newApplicationReport1);
         newApplicationReport1);
@@ -1084,12 +1084,12 @@ public class TestYarnCLI {
     ApplicationReport newApplicationReport3 = ApplicationReport.newInstance(
     ApplicationReport newApplicationReport3 = ApplicationReport.newInstance(
         applicationId1, ApplicationAttemptId.newInstance(applicationId1, 1),
         applicationId1, ApplicationAttemptId.newInstance(applicationId1, 1),
         "user", "queue", "appname", "host", 124, null,
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
+        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
     ApplicationReport newApplicationReport4 = ApplicationReport.newInstance(
     ApplicationReport newApplicationReport4 = ApplicationReport.newInstance(
         applicationId2, ApplicationAttemptId.newInstance(applicationId2, 1),
         applicationId2, ApplicationAttemptId.newInstance(applicationId2, 1),
         "user", "queue", "appname", "host", 124, null,
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
+        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53345f, "YARN", null);
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53345f, "YARN", null);
     when(client.getApplicationReport(applicationId1)).thenReturn(
     when(client.getApplicationReport(applicationId1)).thenReturn(
         newApplicationReport3);
         newApplicationReport3);
@@ -1127,7 +1127,7 @@ public class TestYarnCLI {
     ApplicationReport newApplicationReport5 = ApplicationReport.newInstance(
     ApplicationReport newApplicationReport5 = ApplicationReport.newInstance(
         applicationId1, ApplicationAttemptId.newInstance(applicationId1, 1),
         applicationId1, ApplicationAttemptId.newInstance(applicationId1, 1),
         "user", "queue", "appname", "host", 124, null,
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
+        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53345f, "YARN", null);
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53345f, "YARN", null);
     when(client.getApplicationReport(applicationId1)).thenReturn(
     when(client.getApplicationReport(applicationId1)).thenReturn(
         newApplicationReport5);
         newApplicationReport5);
@@ -1154,12 +1154,12 @@ public class TestYarnCLI {
     ApplicationReport newApplicationReport5 = ApplicationReport.newInstance(
     ApplicationReport newApplicationReport5 = ApplicationReport.newInstance(
         applicationId1, ApplicationAttemptId.newInstance(applicationId1, 1),
         applicationId1, ApplicationAttemptId.newInstance(applicationId1, 1),
         "user", "queue", "appname", "host", 124, null,
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0,
+        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
     ApplicationReport newApplicationReport6 = ApplicationReport.newInstance(
     ApplicationReport newApplicationReport6 = ApplicationReport.newInstance(
         applicationId2, ApplicationAttemptId.newInstance(applicationId2, 1),
         applicationId2, ApplicationAttemptId.newInstance(applicationId2, 1),
         "user", "queue", "appname", "host", 124, null,
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
+        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53345f, "YARN", null);
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53345f, "YARN", null);
     when(client.getApplicationReport(applicationId1)).thenReturn(
     when(client.getApplicationReport(applicationId1)).thenReturn(
         newApplicationReport5);
         newApplicationReport5);
@@ -1182,7 +1182,7 @@ public class TestYarnCLI {
     ApplicationReport newApplicationReport2 = ApplicationReport.newInstance(
     ApplicationReport newApplicationReport2 = ApplicationReport.newInstance(
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         "user", "queue", "appname", "host", 124, null,
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0,
+        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
     when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
     when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
         newApplicationReport2);
         newApplicationReport2);
@@ -1197,7 +1197,7 @@ public class TestYarnCLI {
     ApplicationReport newApplicationReport = ApplicationReport.newInstance(
     ApplicationReport newApplicationReport = ApplicationReport.newInstance(
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         "user", "queue", "appname", "host", 124, null,
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
+        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
     when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
     when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
         newApplicationReport);
         newApplicationReport);
@@ -1232,7 +1232,7 @@ public class TestYarnCLI {
     ApplicationReport newApplicationReport2 = ApplicationReport.newInstance(
     ApplicationReport newApplicationReport2 = ApplicationReport.newInstance(
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         "user", "queue", "appname", "host", 124, null,
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0,
+        YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
     when(client.getApplicationReport(any(ApplicationId.class)))
     when(client.getApplicationReport(any(ApplicationId.class)))
         .thenReturn(newApplicationReport2);
         .thenReturn(newApplicationReport2);
@@ -1247,7 +1247,7 @@ public class TestYarnCLI {
     ApplicationReport newApplicationReport = ApplicationReport.newInstance(
     ApplicationReport newApplicationReport = ApplicationReport.newInstance(
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         applicationId, ApplicationAttemptId.newInstance(applicationId, 1),
         "user", "queue", "appname", "host", 124, null,
         "user", "queue", "appname", "host", 124, null,
-        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
+        YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0,
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
         FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null);
     when(client.getApplicationReport(any(ApplicationId.class)))
     when(client.getApplicationReport(any(ApplicationId.class)))
         .thenReturn(newApplicationReport);
         .thenReturn(newApplicationReport);
@@ -2015,7 +2015,7 @@ public class TestYarnCLI {
         ApplicationReport.newInstance(applicationId,
         ApplicationReport.newInstance(applicationId,
             ApplicationAttemptId.newInstance(applicationId, 1), "user",
             ApplicationAttemptId.newInstance(applicationId, 1), "user",
             "queue", "appname", "host", 124, null,
             "queue", "appname", "host", 124, null,
-            YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0,
+            YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, 0,
             FinalApplicationStatus.UNDEFINED, null, "N/A", 0.53789f, "YARN",
             FinalApplicationStatus.UNDEFINED, null, "N/A", 0.53789f, "YARN",
             null);
             null);
     when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(
     when(client.getApplicationReport(any(ApplicationId.class))).thenReturn(

+ 11 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java

@@ -220,6 +220,17 @@ public class ApplicationReportPBImpl extends ApplicationReport {
     return p.getStartTime();
     return p.getStartTime();
   }
   }
 
 
+  @Override
+  public long getLaunchTime() {
+    ApplicationReportProtoOrBuilder p = viaProto ? proto : builder;
+    return p.getLaunchTime();
+  }
+
+  @Override
+  public void setLaunchTime(long launchTime) {
+    maybeInitBuilder();
+    builder.setLaunchTime(launchTime);
+  }
   @Override
   @Override
   public long getFinishTime() {
   public long getFinishTime() {
     ApplicationReportProtoOrBuilder p = viaProto ? proto : builder;
     ApplicationReportProtoOrBuilder p = viaProto ? proto : builder;

+ 11 - 6
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java

@@ -119,7 +119,8 @@ public final class DockerClientConfigHandler {
         credentials.addToken(
         credentials.addToken(
             new Text(registryUrl + "-" + applicationId), token);
             new Text(registryUrl + "-" + applicationId), token);
         if (LOG.isDebugEnabled()) {
         if (LOG.isDebugEnabled()) {
-          LOG.debug("Added token: " + token.toString());
+          LOG.debug("Token read from Docker client configuration file: "
+              + token.toString());
         }
         }
       }
       }
     }
     }
@@ -142,7 +143,7 @@ public final class DockerClientConfigHandler {
     tokens.rewind();
     tokens.rewind();
     if (LOG.isDebugEnabled()) {
     if (LOG.isDebugEnabled()) {
       for (Token token : credentials.getAllTokens()) {
       for (Token token : credentials.getAllTokens()) {
-        LOG.debug("Added token: " + token.toString());
+        LOG.debug("Token read from token storage: " + token.toString());
       }
       }
     }
     }
     return credentials;
     return credentials;
@@ -161,9 +162,11 @@ public final class DockerClientConfigHandler {
     ObjectMapper mapper = new ObjectMapper();
     ObjectMapper mapper = new ObjectMapper();
     ObjectNode rootNode = mapper.createObjectNode();
     ObjectNode rootNode = mapper.createObjectNode();
     ObjectNode registryUrlNode = mapper.createObjectNode();
     ObjectNode registryUrlNode = mapper.createObjectNode();
+    boolean foundDockerCred = false;
     if (credentials.numberOfTokens() > 0) {
     if (credentials.numberOfTokens() > 0) {
       for (Token<? extends TokenIdentifier> tk : credentials.getAllTokens()) {
       for (Token<? extends TokenIdentifier> tk : credentials.getAllTokens()) {
         if (tk.getKind().equals(DockerCredentialTokenIdentifier.KIND)) {
         if (tk.getKind().equals(DockerCredentialTokenIdentifier.KIND)) {
+          foundDockerCred = true;
           DockerCredentialTokenIdentifier ti =
           DockerCredentialTokenIdentifier ti =
               (DockerCredentialTokenIdentifier) tk.decodeIdentifier();
               (DockerCredentialTokenIdentifier) tk.decodeIdentifier();
           ObjectNode registryCredNode = mapper.createObjectNode();
           ObjectNode registryCredNode = mapper.createObjectNode();
@@ -176,9 +179,11 @@ public final class DockerClientConfigHandler {
         }
         }
       }
       }
     }
     }
-    rootNode.put(CONFIG_AUTHS_KEY, registryUrlNode);
-    String json =
-        mapper.writerWithDefaultPrettyPrinter().writeValueAsString(rootNode);
-    FileUtils.writeStringToFile(outConfigFile, json, StandardCharsets.UTF_8);
+    if (foundDockerCred) {
+      rootNode.put(CONFIG_AUTHS_KEY, registryUrlNode);
+      String json =
+          mapper.writerWithDefaultPrettyPrinter().writeValueAsString(rootNode);
+      FileUtils.writeStringToFile(outConfigFile, json, StandardCharsets.UTF_8);
+    }
   }
   }
 }
 }

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestApplicatonReport.java

@@ -58,7 +58,7 @@ public class TestApplicatonReport {
     ApplicationReport appReport =
     ApplicationReport appReport =
         ApplicationReport.newInstance(appId, appAttemptId, "user", "queue",
         ApplicationReport.newInstance(appId, appAttemptId, "user", "queue",
           "appname", "host", 124, null, YarnApplicationState.FINISHED,
           "appname", "host", 124, null, YarnApplicationState.FINISHED,
-          "diagnostics", "url", 0, 0, FinalApplicationStatus.SUCCEEDED, null,
+          "diagnostics", "url", 0, 0, 0, FinalApplicationStatus.SUCCEEDED, null,
           "N/A", 0.53789f, YarnConfiguration.DEFAULT_APPLICATION_TYPE, null,
           "N/A", 0.53789f, YarnConfiguration.DEFAULT_APPLICATION_TYPE, null,
           null, false, Priority.newInstance(0),"","");
           null, false, Priority.newInstance(0),"","");
     return appReport;
     return appReport;

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java

@@ -137,7 +137,7 @@ public class ApplicationHistoryManagerImpl extends AbstractService implements
       currentApplicationAttemptId, appHistory.getUser(), appHistory.getQueue(),
       currentApplicationAttemptId, appHistory.getUser(), appHistory.getQueue(),
       appHistory.getApplicationName(), host, rpcPort, null,
       appHistory.getApplicationName(), host, rpcPort, null,
       appHistory.getYarnApplicationState(), appHistory.getDiagnosticsInfo(),
       appHistory.getYarnApplicationState(), appHistory.getDiagnosticsInfo(),
-      trackingUrl, appHistory.getStartTime(), appHistory.getFinishTime(),
+      trackingUrl, appHistory.getStartTime(), 0, appHistory.getFinishTime(),
       appHistory.getFinalApplicationStatus(), null, "", 100,
       appHistory.getFinalApplicationStatus(), null, "", 100,
       appHistory.getApplicationType(), null);
       appHistory.getApplicationType(), null);
   }
   }

+ 2 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java

@@ -391,7 +391,7 @@ public class BuilderUtils {
       ApplicationId applicationId, ApplicationAttemptId applicationAttemptId,
       ApplicationId applicationId, ApplicationAttemptId applicationAttemptId,
       String user, String queue, String name, String host, int rpcPort,
       String user, String queue, String name, String host, int rpcPort,
       Token clientToAMToken, YarnApplicationState state, String diagnostics,
       Token clientToAMToken, YarnApplicationState state, String diagnostics,
-      String url, long startTime, long finishTime,
+      String url, long startTime, long launchTime, long finishTime,
       FinalApplicationStatus finalStatus,
       FinalApplicationStatus finalStatus,
       ApplicationResourceUsageReport appResources, String origTrackingUrl,
       ApplicationResourceUsageReport appResources, String origTrackingUrl,
       float progress, String appType, Token amRmToken, Set<String> tags,
       float progress, String appType, Token amRmToken, Set<String> tags,
@@ -410,6 +410,7 @@ public class BuilderUtils {
     report.setDiagnostics(diagnostics);
     report.setDiagnostics(diagnostics);
     report.setTrackingUrl(url);
     report.setTrackingUrl(url);
     report.setStartTime(startTime);
     report.setStartTime(startTime);
+    report.setLaunchTime(launchTime);
     report.setFinishTime(finishTime);
     report.setFinishTime(finishTime);
     report.setFinalApplicationStatus(finalStatus);
     report.setFinalApplicationStatus(finalStatus);
     report.setApplicationResourceUsageReport(appResources);
     report.setApplicationResourceUsageReport(appResources);

+ 3 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java

@@ -242,10 +242,9 @@ public class AppBlock extends HtmlBlock {
         .__("FinalStatus Reported by AM:",
         .__("FinalStatus Reported by AM:",
             clairfyAppFinalStatus(app.getFinalAppStatus()))
             clairfyAppFinalStatus(app.getFinalAppStatus()))
         .__("Started:", Times.format(app.getStartedTime()))
         .__("Started:", Times.format(app.getStartedTime()))
-        .__(
-            "Elapsed:",
-            StringUtils.formatTime(Times.elapsed(app.getStartedTime(),
-                app.getFinishedTime())))
+        .__("Launched:", Times.format(app.getLaunchTime()))
+        .__("Finished:", Times.format(app.getFinishedTime()))
+        .__("Elapsed:", StringUtils.formatTime(app.getElapsedTime()))
         .__(
         .__(
             "Tracking URL:",
             "Tracking URL:",
             app.getTrackingUrl() == null
             app.getTrackingUrl() == null

+ 4 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java

@@ -150,7 +150,9 @@ public class AppsBlock extends HtmlBlock {
         html.table("#apps").thead().tr().th(".id", "ID").th(".user", "User")
         html.table("#apps").thead().tr().th(".id", "ID").th(".user", "User")
           .th(".name", "Name").th(".type", "Application Type")
           .th(".name", "Name").th(".type", "Application Type")
           .th(".queue", "Queue").th(".priority", "Application Priority")
           .th(".queue", "Queue").th(".priority", "Application Priority")
-          .th(".starttime", "StartTime").th(".finishtime", "FinishTime")
+          .th(".starttime", "StartTime")
+          .th(".launchtime", "LaunchTime")
+          .th(".finishtime", "FinishTime")
           .th(".state", "State").th(".finalstatus", "FinalStatus")
           .th(".state", "State").th(".finalstatus", "FinalStatus")
           .th(".progress", "Progress").th(".ui", "Tracking UI").__().__().tbody();
           .th(".progress", "Progress").th(".ui", "Tracking UI").__().__().tbody();
 
 
@@ -188,6 +190,7 @@ public class AppsBlock extends HtmlBlock {
             .getQueue()))).append("\",\"").append(String
             .getQueue()))).append("\",\"").append(String
                 .valueOf(app.getPriority()))
                 .valueOf(app.getPriority()))
         .append("\",\"").append(app.getStartedTime())
         .append("\",\"").append(app.getStartedTime())
+        .append("\",\"").append(app.getLaunchTime())
         .append("\",\"").append(app.getFinishedTime())
         .append("\",\"").append(app.getFinishedTime())
         .append("\",\"")
         .append("\",\"")
         .append(app.getAppState() == null ? UNAVAILABLE : app.getAppState())
         .append(app.getAppState() == null ? UNAVAILABLE : app.getAppState())

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/WebPageUtils.java

@@ -51,7 +51,7 @@ public class WebPageUtils {
     sb.append("[\n")
     sb.append("[\n")
       .append("{'sType':'natural', 'aTargets': [0]")
       .append("{'sType':'natural', 'aTargets': [0]")
       .append(", 'mRender': parseHadoopID }")
       .append(", 'mRender': parseHadoopID }")
-      .append("\n, {'sType':'numeric', 'aTargets': [6, 7]")
+      .append("\n, {'sType':'numeric', 'aTargets': [6, 7, 8]")
       .append(", 'mRender': renderHadoopDate }")
       .append(", 'mRender': renderHadoopDate }")
       .append("\n, {'sType':'numeric', bSearchable:false, 'aTargets':");
       .append("\n, {'sType':'numeric', bSearchable:false, 'aTargets':");
     if (isFairSchedulerPage) {
     if (isFairSchedulerPage) {

+ 6 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java

@@ -55,6 +55,7 @@ public class AppInfo {
   protected FinalApplicationStatus finalAppStatus;
   protected FinalApplicationStatus finalAppStatus;
   protected long submittedTime;
   protected long submittedTime;
   protected long startedTime;
   protected long startedTime;
+  private long launchTime;
   protected long finishedTime;
   protected long finishedTime;
   protected long elapsedTime;
   protected long elapsedTime;
   protected String applicationTags;
   protected String applicationTags;
@@ -88,6 +89,7 @@ public class AppInfo {
     originalTrackingUrl = app.getOriginalTrackingUrl();
     originalTrackingUrl = app.getOriginalTrackingUrl();
     submittedTime = app.getStartTime();
     submittedTime = app.getStartTime();
     startedTime = app.getStartTime();
     startedTime = app.getStartTime();
+    launchTime = app.getLaunchTime();
     finishedTime = app.getFinishTime();
     finishedTime = app.getFinishTime();
     elapsedTime = Times.elapsed(startedTime, finishedTime);
     elapsedTime = Times.elapsed(startedTime, finishedTime);
     finalAppStatus = app.getFinalApplicationStatus();
     finalAppStatus = app.getFinalApplicationStatus();
@@ -198,6 +200,10 @@ public class AppInfo {
     return submittedTime;
     return submittedTime;
   }
   }
 
 
+  public long getLaunchTime() {
+    return launchTime;
+  }
+
   public long getStartedTime() {
   public long getStartedTime() {
     return startedTime;
     return startedTime;
   }
   }

+ 0 - 30
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DirectoryCollection.java

@@ -36,8 +36,6 @@ import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang.RandomStringUtils;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileContext;
@@ -490,9 +488,6 @@ public class DirectoryCollection {
             new DiskErrorInformation(DiskErrorCause.DISK_FULL, msg));
             new DiskErrorInformation(DiskErrorCause.DISK_FULL, msg));
           continue;
           continue;
         }
         }
-
-        // create a random dir to make sure fs isn't in read-only mode
-        verifyDirUsingMkdir(testDir);
       } catch (IOException ie) {
       } catch (IOException ie) {
         ret.put(dir,
         ret.put(dir,
           new DiskErrorInformation(DiskErrorCause.OTHER, ie.getMessage()));
           new DiskErrorInformation(DiskErrorCause.OTHER, ie.getMessage()));
@@ -501,31 +496,6 @@ public class DirectoryCollection {
     return ret;
     return ret;
   }
   }
 
 
-  /**
-   * Function to test whether a dir is working correctly by actually creating a
-   * random directory.
-   *
-   * @param dir
-   *          the dir to test
-   */
-  private void verifyDirUsingMkdir(File dir) throws IOException {
-
-    String randomDirName = RandomStringUtils.randomAlphanumeric(5);
-    File target = new File(dir, randomDirName);
-    int i = 0;
-    while (target.exists()) {
-
-      randomDirName = RandomStringUtils.randomAlphanumeric(5) + i;
-      target = new File(dir, randomDirName);
-      i++;
-    }
-    try {
-      diskValidator.checkStatus(target);
-    } finally {
-      FileUtils.deleteQuietly(target);
-    }
-  }
-
   private boolean isDiskUsageOverPercentageLimit(File dir,
   private boolean isDiskUsageOverPercentageLimit(File dir,
       float diskUtilizationPercentageCutoff) {
       float diskUtilizationPercentageCutoff) {
     float freePercentage =
     float freePercentage =

+ 0 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java


+ 0 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java


+ 0 - 19
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java

@@ -34,7 +34,6 @@ import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.Groups;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.security.authorize.ProxyUsers;
@@ -254,9 +253,6 @@ public class ResourceManager extends CompositeService implements Recoverable {
     // load core-site.xml
     // load core-site.xml
     loadConfigurationXml(YarnConfiguration.CORE_SITE_CONFIGURATION_FILE);
     loadConfigurationXml(YarnConfiguration.CORE_SITE_CONFIGURATION_FILE);
 
 
-    // Refresh user to group mappings during init.
-    refreshUserToGroupMappingsWithConf();
-
     // Do refreshSuperUserGroupsConfiguration with loaded core-site.xml
     // Do refreshSuperUserGroupsConfiguration with loaded core-site.xml
     // Or use RM specific configurations to overwrite the common ones first
     // Or use RM specific configurations to overwrite the common ones first
     // if they exist
     // if they exist
@@ -340,21 +336,6 @@ public class ResourceManager extends CompositeService implements Recoverable {
     super.serviceInit(this.conf);
     super.serviceInit(this.conf);
   }
   }
 
 
-  private void refreshUserToGroupMappingsWithConf()
-      throws YarnException, IOException {
-    Configuration newConf = new Configuration(false);
-    InputStream confFileInputStream =
-        configurationProvider
-        .getConfigurationInputStream(newConf, YarnConfiguration.CORE_SITE_CONFIGURATION_FILE);
-    if (confFileInputStream != null) {
-      newConf.addResource(confFileInputStream);
-    }
-
-    // Do refreshUserToGroupsMappings with loaded core-site.xml
-    Groups.getUserToGroupsMappingServiceWithLoadedConfiguration(newConf)
-        .refresh();
-  }
-
   private void loadConfigurationXml(String configurationFile)
   private void loadConfigurationXml(String configurationFile)
       throws YarnException, IOException {
       throws YarnException, IOException {
     InputStream configurationInputStream =
     InputStream configurationInputStream =

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java

@@ -303,7 +303,7 @@ public class AMLauncher implements Runnable {
         LOG.info("Launching master" + application.getAppAttemptId());
         LOG.info("Launching master" + application.getAppAttemptId());
         launch();
         launch();
         handler.handle(new RMAppAttemptEvent(application.getAppAttemptId(),
         handler.handle(new RMAppAttemptEvent(application.getAppAttemptId(),
-            RMAppAttemptEventType.LAUNCHED));
+            RMAppAttemptEventType.LAUNCHED, System.currentTimeMillis()));
       } catch(Exception ie) {
       } catch(Exception ie) {
         String message = "Error launching " + application.getAppAttemptId()
         String message = "Error launching " + application.getAppAttemptId()
             + ". Got exception: " + StringUtils.stringifyException(ie);
             + ". Got exception: " + StringUtils.stringifyException(ie);

Some files were not shown because too many files changed in this diff