瀏覽代碼

YARN-321. Forwarding YARN-321 branch to latest trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/YARN-321@1560467 13f79535-47bb-0310-9956-ffa450edef68
Vinod Kumar Vavilapalli 11 年之前
父節點
當前提交
c0f37f96d7
共有 15 個文件被更改,包括 473 次插入287 次删除
  1. 6 3
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
  3. 11 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
  4. 6 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  5. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
  6. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
  7. 200 217
      hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html
  8. 113 55
      hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js
  9. 20 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css
  10. 3 0
      hadoop-mapreduce-project/CHANGES.txt
  11. 5 5
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java
  12. 10 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
  13. 87 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java
  14. 3 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLogAppender.java
  15. 5 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ContainerLogAppender.java

+ 6 - 3
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -113,9 +113,6 @@ Trunk (Unreleased)
 
     HADOOP-10177. Create CLI tools for managing keys. (Larry McCay via omalley)
 
-    HADOOP-10143 replace WritableFactories's hashmap with ConcurrentHashMap
-    (Liang Xie via stack)
-
   BUG FIXES
 
     HADOOP-9451. Fault single-layer config if node group topology is enabled.
@@ -424,6 +421,9 @@ Release 2.4.0 - UNRELEASED
     HADOOP-9420. Add percentile or max metric for rpcQueueTime, processing time.
     (Liang Xie via wang)
 
+    HADOOP-10143 replace WritableFactories's hashmap with ConcurrentHashMap
+    (Liang Xie via stack)
+
   OPTIMIZATIONS
 
     HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn)
@@ -533,6 +533,9 @@ Release 2.4.0 - UNRELEASED
 
     HADOOP-10235. Hadoop tarball has 2 versions of stax-api JARs. (tucu)
 
+    HADOOP-10252. HttpServer can't start if hostname is not specified. (Jimmy
+    Xiang via atm)
+
 Release 2.3.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java

@@ -455,7 +455,7 @@ public class HttpServer implements FilterContainer {
   public HttpServer(String name, String bindAddress, int port,
       boolean findPort, Configuration conf, AccessControlList adminsAcl, 
       Connector connector, String[] pathSpecs) throws IOException {
-    this(new Builder().setName(name)
+    this(new Builder().setName(name).hostName(bindAddress)
         .addEndpoint(URI.create("http://" + bindAddress + ":" + port))
         .setFindPort(findPort).setConf(conf).setACL(adminsAcl)
         .setConnector(connector).setPathSpec(pathSpecs));

+ 11 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java

@@ -524,6 +524,17 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     Assert.assertFalse(HttpServer.isInstrumentationAccessAllowed(context, request, response));
   }
 
+  @Test
+  @SuppressWarnings("deprecation")
+  public void testOldConstructor() throws Exception {
+    HttpServer server = new HttpServer("test", "0.0.0.0", 0, false);
+    try {
+      server.start();
+    } finally {
+      server.stop();
+    }
+  }
+
   @Test public void testBindAddress() throws Exception {
     checkBindAddress("localhost", 0, false).stop();
     // hang onto this one for a bit more testing

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -695,6 +695,9 @@ Release 2.4.0 - UNRELEASED
 
     HDFS-5704. Change OP_UPDATE_BLOCKS with a new OP_ADD_BLOCK. (jing9)
 
+    HDFS-5434. Change block placement policy constructors from package private
+    to protected. (Buddy Taylor via Arpit Agarwal)
+
   OPTIMIZATIONS
 
     HDFS-5239.  Allow FSNamesystem lock fairness to be configurable (daryn)
@@ -778,6 +781,9 @@ Release 2.4.0 - UNRELEASED
     HDFS-5800. Fix a typo in DFSClient.renewLease().  (Kousuke Saruta
     via szetszwo)
 
+    HDFS-5748. Too much information shown in the dfs health page.
+    (Haohui Mai via brandonli)
+
   BREAKDOWN OF HDFS-2832 SUBTASKS AND RELATED JIRAS
 
     HDFS-4985. Add storage type to the protocol and expose it in block report

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java

@@ -79,7 +79,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
    */
   protected int tolerateHeartbeatMultiplier;
 
-  BlockPlacementPolicyDefault(Configuration conf,  FSClusterStats stats,
+  protected BlockPlacementPolicyDefault(Configuration conf, FSClusterStats stats,
                            NetworkTopology clusterMap) {
     initialize(conf, stats, clusterMap);
   }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java

@@ -46,12 +46,12 @@ import org.apache.hadoop.net.NodeBase;
  */
 public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefault {
 
-  BlockPlacementPolicyWithNodeGroup(Configuration conf,  FSClusterStats stats,
+  protected BlockPlacementPolicyWithNodeGroup(Configuration conf,  FSClusterStats stats,
       NetworkTopology clusterMap) {
     initialize(conf, stats, clusterMap);
   }
 
-  BlockPlacementPolicyWithNodeGroup() {
+  protected BlockPlacementPolicyWithNodeGroup() {
   }
 
   @Override

+ 200 - 217
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html

@@ -23,25 +23,53 @@
 <title>Namenode information</title>
 </head>
 <body>
+
+<header class="navbar navbar-inverse bs-docs-nav" role="banner">
+<div class="container">
+  <div class="navbar-header">
+    <a href="http://hadoop.apache.org/core" class="navbar-brand">Hadoop</a>
+  </div>
+
+  <ul class="nav navbar-nav" id="ui-tabs">
+    <li><a href="#tab-overview">Overview</a></li>
+    <li><a href="#tab-datanode">Datanodes</a></li>
+    <li><a href="#tab-snapshot">Snapshot</a></li>
+    <li><a href="#tab-startup-progress">Startup Progress</a></li>
+    <li class="dropdown">
+      <a href="#" class="dropdown-toggle" data-toggle="dropdown">Utilities <b class="caret"></b></a>
+      <ul class="dropdown-menu">
+        <li><a href="explorer.html">Browse the file system</a></li>
+        <li><a href="logs">Logs</a></li>
+      </ul>
+    </li>
+  </ul>
+</div>
+</header>
+
 <div class="container">
-<div class="alert alert-danger" id="alert-panel" style="display:none">
-<button type="button" class="close" onclick="$('#alert-panel').hide();">&times;</button>
-<div class="alert-body" id="alert-panel-body"></div>
+
+<div id="alert-panel">
+  <div class="alert alert-danger">
+    <button type="button" class="close" onclick="$('#alert-panel').hide();">&times;</button>
+    <div class="alert-body" id="alert-panel-body"></div>
+  </div>
 </div>
-<div id="panel"></div>
+
+<div class="tab-content">
+  <div class="tab-pane" id="tab-overview"></div>
+  <div class="tab-pane" id="tab-datanode"></div>
+  <div class="tab-pane" id="tab-snapshot"></div>
+  <div class="tab-pane" id="tab-startup-progress"></div>
 </div>
+
 <div class="row">
-<hr />
-<div class="col-xs-2"><p><a href="http://hadoop.apache.org/core">Hadoop</a>, 2013.</p></div>
-<div class="col-xs-1 pull-right"><a style="color: #ddd" href="dfshealth.jsp">Legacy UI</a></div>
+  <hr />
+  <div class="col-xs-2"><p><a href="http://hadoop.apache.org/core">Hadoop</a>, 2014.</p></div>
+  <div class="col-xs-1 pull-right"><a style="color: #ddd" href="dfshealth.jsp">Legacy UI</a></div>
+</div>
 </div>
 
 <script type="text/x-dust-template" id="tmpl-dfshealth">
-<div class="page-header">
-  {#nnstat}
-  <h1>NameNode '{HostAndPort}' ({State})</h1>
-  {/nnstat}
-</div>
 
 {#nn}
 {@if cond="{DistinctVersionCount} > 1"}
@@ -71,238 +99,193 @@
 {/if}
 {/nn}
 
-<div class="panel panel-success">
-  <div class="panel-heading">Overview</div>
-  <div class="panel-body">
-    {#nn}
-    <table class="table table-bordered">
-      <tr><th>Started:</th><td>{NNStarted}</td></tr>
-      <tr><th>Version:</th><td>{Version}</td></tr>
-      <tr><th>Compiled:</th><td>{CompileInfo}</td></tr>
-      <tr><th>Cluster ID:</th><td>{ClusterId}</td></tr>
-      <tr><th>Block Pool ID:</th><td>{BlockPoolId}</td></tr>
-    </table>
-    {/nn}
-  </div>
-</div>
-
-<p><a href="explorer.html">Browse the filesystem</a></p>
-<p><a href="/logs/">NameNode Logs</a></p>
-
-<hr/>
-
-<div class="panel panel-success">
-  <div class="panel-heading">Cluster Summary</div>
-  <div class="panel-body">
+<div class="page-header"><h1>Overview {#nnstat}<small>'{HostAndPort}' ({State})</small>{/nnstat}</h1></div>
+{#nn}
+<table class="table table-bordered table-striped">
+  <tr><th>Started:</th><td>{NNStarted}</td></tr>
+  <tr><th>Version:</th><td>{Version}</td></tr>
+  <tr><th>Compiled:</th><td>{CompileInfo}</td></tr>
+  <tr><th>Cluster ID:</th><td>{ClusterId}</td></tr>
+  <tr><th>Block Pool ID:</th><td>{BlockPoolId}</td></tr>
+</table>
+{/nn}
 
-    <p>
-      Security is {#nnstat}{#SecurityEnabled}on{:else}off{/SecurityEnabled}{/nnstat}.</p>
-    <p>{#nn}{#Safemode}{.}{:else}Safemode is off.{/Safemode}{/nn}</p>
+<div class="page-header"><h1>Summary</h1></div>
+<p>
+  Security is {#nnstat}{#SecurityEnabled}on{:else}off{/SecurityEnabled}{/nnstat}.</p>
+<p>{#nn}{#Safemode}{.}{:else}Safemode is off.{/Safemode}{/nn}</p>
 
-    <p>
-      {#fs}
-      {FilesTotal} files and directories, {BlocksTotal} blocks = {@math key="{FilesTotal}" method="add" operand="{BlocksTotal}"/} total filesystem object(s).
-      {#helper_fs_max_objects/}
-      {/fs}
-    </p>
-    {#mem.HeapMemoryUsage}
-    <p>Heap Memory used {used|fmt_bytes} of {committed|fmt_bytes} Heap Memory. Max Heap Memory is {max|fmt_bytes}. </p>
-    {/mem.HeapMemoryUsage}
+<p>
+  {#fs}
+  {FilesTotal} files and directories, {BlocksTotal} blocks = {@math key="{FilesTotal}" method="add" operand="{BlocksTotal}"/} total filesystem object(s).
+  {#helper_fs_max_objects/}
+  {/fs}
+</p>
+{#mem.HeapMemoryUsage}
+<p>Heap Memory used {used|fmt_bytes} of {committed|fmt_bytes} Heap Memory. Max Heap Memory is {max|fmt_bytes}. </p>
+{/mem.HeapMemoryUsage}
 
-    {#mem.NonHeapMemoryUsage}
-    <p>Non Heap Memory used {used|fmt_bytes} of {committed|fmt_bytes} Commited Non Heap Memory. Max Non Heap Memory is {max|fmt_bytes}. </p>
-    {/mem.NonHeapMemoryUsage}
+{#mem.NonHeapMemoryUsage}
+<p>Non Heap Memory used {used|fmt_bytes} of {committed|fmt_bytes} Commited Non Heap Memory. Max Non Heap Memory is {max|fmt_bytes}. </p>
+{/mem.NonHeapMemoryUsage}
 
-    {#nn}
-    <table class="table table-bordered table-striped">
-      <tr><th> Configured Capacity:</th><td>{Total|fmt_bytes}</td></tr>
-      <tr><th> DFS Used:</th><td>{Used|fmt_bytes}</td></tr>
-      <tr><th> Non DFS Used:</th><td>{NonDfsUsedSpace|fmt_bytes}</td></tr>
-      <tr><th> DFS Remaining:</th><td>{Free|fmt_bytes}</td></tr>
-      <tr><th> DFS Used%:</th><td>{PercentUsed|fmt_percentage}</td></tr>
-      <tr><th> DFS Remaining%:</th><td>{PercentRemaining|fmt_percentage}</td></tr>
-      <tr><th> Block Pool Used:</th><td>{BlockPoolUsedSpace|fmt_bytes}</td></tr>
-      <tr><th> Block Pool Used%:</th><td>{PercentBlockPoolUsed|fmt_percentage}</td></tr>
-      <tr><th> DataNodes usages% (Min/Median/Max/stdDev): </th>
+{#nn}
+<table class="table table-bordered table-striped">
+  <tr><th> Configured Capacity:</th><td>{Total|fmt_bytes}</td></tr>
+  <tr><th> DFS Used:</th><td>{Used|fmt_bytes}</td></tr>
+  <tr><th> Non DFS Used:</th><td>{NonDfsUsedSpace|fmt_bytes}</td></tr>
+  <tr><th> DFS Remaining:</th><td>{Free|fmt_bytes}</td></tr>
+  <tr><th> DFS Used%:</th><td>{PercentUsed|fmt_percentage}</td></tr>
+  <tr><th> DFS Remaining%:</th><td>{PercentRemaining|fmt_percentage}</td></tr>
+  <tr><th> Block Pool Used:</th><td>{BlockPoolUsedSpace|fmt_bytes}</td></tr>
+  <tr><th> Block Pool Used%:</th><td>{PercentBlockPoolUsed|fmt_percentage}</td></tr>
+  <tr><th> DataNodes usages% (Min/Median/Max/stdDev): </th>
 	<td>{#NodeUsage.nodeUsage}{min} / {median} / {max} / {stdDev}{/NodeUsage.nodeUsage}</td></tr>
-      {/nn}
+{/nn}
 
-      {#fs}
-      <tr><th><a href="#nodelist-operation">Live Nodes</a></th><td>{NumLiveDataNodes} (Decommissioned: {NumDecomLiveDataNodes})</td></tr>
-      <tr><th><a href="#nodelist-operation">Dead Nodes</a></th><td>{NumDeadDataNodes} (Decommissioned: {NumDecomDeadDataNodes})</td></tr>
-      <tr><th><a href="#nodelist-decom">Decommissioning Nodes</a></th><td>{NumDecommissioningDataNodes}</td></tr>
-      <tr><th title="Excludes missing blocks.">Number of Under-Replicated Blocks</th><td>{UnderReplicatedBlocks}</td></tr>
-      {/fs}
-    </table>
-  </div>
-</div>
+{#fs}
+  <tr><th><a href="#tab-datanode">Live Nodes</a></th><td>{NumLiveDataNodes} (Decommissioned: {NumDecomLiveDataNodes})</td></tr>
+  <tr><th><a href="#tab-datanode">Dead Nodes</a></th><td>{NumDeadDataNodes} (Decommissioned: {NumDecomDeadDataNodes})</td></tr>
+  <tr><th><a href="#tab-datanode">Decommissioning Nodes</a></th><td>{NumDecommissioningDataNodes}</td></tr>
+  <tr><th title="Excludes missing blocks.">Number of Under-Replicated Blocks</th><td>{UnderReplicatedBlocks}</td></tr>
+{/fs}
+</table>
 
-<hr/>
-<div class="panel panel-success">
-  <div class="panel-heading">NameNode Journal Status</div>
-  <div class="panel-body">
-    <p><b>Current transaction ID:</b> {nn.JournalTransactionInfo.LastAppliedOrWrittenTxId}</p>
-    <table class="table" title="NameNode Journals">
-      <thead>
+<div class="page-header"><h1>Namenode Journal Status</h1></div>
+<p><b>Current transaction ID:</b> {nn.JournalTransactionInfo.LastAppliedOrWrittenTxId}</p>
+<table class="table" title="NameNode Journals">
+  <thead>
 	<tr><th>Journal Manager</th><th>State</th></tr>
-      </thead>
-      <tbody>
+  </thead>
+  <tbody>
 	{#nn.NameJournalStatus}
 	<tr><td>{manager}</td><td>{stream}</td></tr>
 	{/nn.NameJournalStatus}
-      </tbody>
-    </table>
-  </div>
-</div>
+  </tbody>
+</table>
 
-<hr/>
-<div class="panel panel-success">
-  <div class="panel-heading">NameNode Storage</div>
-  <div class="panel-body">
-    <table class="table" title="NameNode Storage">
-      <thead><tr><td><b>Storage Directory</b></td><td><b>Type</b></td><td><b>State</b></td></tr></thead>
-      {#nn.NameDirStatuses}
-      {#active}{#helper_dir_status type="Active"/}{/active}
-      {#failed}{#helper_dir_status type="Failed"/}{/failed}
-      {/nn.NameDirStatuses}
-    </table>
-  </div>
-</div>
-<hr/>
+<div class="page-header"><h1>NameNode Storage</h1></div>
+<table class="table" title="NameNode Storage">
+  <thead><tr><td><b>Storage Directory</b></td><td><b>Type</b></td><td><b>State</b></td></tr></thead>
+  {#nn.NameDirStatuses}
+  {#active}{#helper_dir_status type="Active"/}{/active}
+  {#failed}{#helper_dir_status type="Failed"/}{/failed}
+  {/nn.NameDirStatuses}
+</table>
+</script>
 
-<div class="panel panel-success">
-  <div class="panel-heading">Snapshot Summary</div>
-  <div class="panel-body">
-    {#fs.SnapshotStats}
-    <table class="table" title="Snapshot Summary">
-      <thead><tr><td><b>Snapshottable directories</b></td>
-	  <td><b>Snapshotted directories</b></td></tr>
-      </thead>
-      <tbody>
-	<tr>
-	  <td>{SnapshottableDirectories}</td>
-	  <td>{Snapshots}</td>
-	</tr>
-      </tbody>
-    </table>
-    {/fs.SnapshotStats}
-  </div>
-</div>
-<hr/>
+<script type="text/x-dust-template" id="tmpl-snapshot">
+<div class="page-header"><h1>Snapshot Summary</h1></div>
+<p><b>Snapshottable directories</b>: {SnapshottableDirectories}</p>
+<p><b>Snapshotted directories</b>: {Snapshots}</p>
+</script>
+
+<script type="text/x-dust-template" id="tmpl-datanode">
+<div class="page-header"><h1>Datanode Information</h1></div>
+<div class="page-header"><h1><small>In operation</small></h1></div>
+<small>
+<table class="table">
+  <thead>
+    <tr>
+      <th>Node</th>
+      <th>Last contact</th>
+      <th>Admin State</th>
+      <th>Capacity</th>
+      <th>Used</th>
+      <th>Non DFS Used</th>
+      <th>Remaining</th>
+      <th>Blocks</th>
+      <th>Block pool used</th>
+      <th>Failed Volumes</th>
+      <th>Version</th>
+    </tr>
+  </thead>
+  {#LiveNodes}
+  <tr>
+    <td>{name} ({xferaddr})</td>
+    <td>{lastContact}</td>
+    <td>{adminState}</td>
+    <td>{capacity|fmt_bytes}</td>
+    <td>{used|fmt_bytes}</td>
+    <td>{nonDfsUsedSpace|fmt_bytes}</td>
+    <td>{remaining|fmt_bytes}</td>
+    <td>{numBlocks}</td>
+    <td>{blockPoolUsed|fmt_bytes} ({blockPoolUsedPercent|fmt_percentage})</td>
+    <td>{volfails}</td>
+    <td>{version}</td>
+  </tr>
+  {/LiveNodes}
+  {#DeadNodes}
+  <tr class="danger">
+    <td>{name} ({xferaddr})</td>
+    <td>{lastContact}</td>
+    <td>Dead{?decomissioned}, Decomissioned{/decomissioned}</td>
+    <td>-</td>
+    <td>-</td>
+    <td>-</td>
+    <td>-</td>
+    <td>-</td>
+    <td>-</td>
+    <td>-</td>
+    <td>-</td>
+  </tr>
+  {/DeadNodes}
+</table>
+</small>
+
+<div class="page-header"><h1><small>Decomissioning</small></h1></div>
+<small>
+<table class="table">
+  <thead>
+    <tr>
+      <th>Node</th>
+      <th>Last contact</th>
+      <th>Under replicated blocks</th>
+      <th>Blocks with no live replicas</th>
+      <th>Under Replicated Blocks <br/>In files under construction</th>
+    </tr>
+  </thead>
+  {#DecomNodes}
+  <tr>
+    <td>{name} ({xferaddr})</td>
+    <td>{lastContact}</td>
+    <td>{underReplicatedBlocks}</td>
+    <td>{decommissionOnlyReplicas}</td>
+    <td>{underReplicateInOpenFiles}</td>
+  </tr>
+  {/DecomNodes}
+</table>
+</small>
+</script>
 
-{#startup}
-<div class="panel panel-success">
-  <div class="panel-heading">Startup Progress</div>
-  <div class="panel-body">
-    <p>Elapsed Time: {elapsedTime|fmt_time}, Percent Complete: {percentComplete|fmt_percentage}</p>
-    <table class="table">
-      <thead>
-	<tr>
+<script type="text/x-dust-template" id="tmpl-startup-progress">
+<div class="page-header"><h1>Startup Progress</h1></div>
+<p>Elapsed Time: {elapsedTime|fmt_time}, Percent Complete: {percentComplete|fmt_percentage}</p>
+<table class="table">
+  <thead>
+	<tr class="active">
 	  <th>Phase</th>
-	  <th>Completion</th>
-	  <th>Elapsed Time</th>
+	  <th style="text-align:center">Completion</th>
+	  <th style="text-align:center">Elapsed Time</th>
 	</tr>
-      </thead>
-      <tbody>
+  </thead>
+  <tbody>
 	{#phases}
 	<tr class="phase">
 	  <td class="startupdesc">{desc} {file} {size|fmt_bytes}</td>
-	  <td>{percentComplete|fmt_percentage}</td>
-	  <td>{elapsedTime|fmt_time}</td>
+	  <td style="text-align:center">{percentComplete|fmt_percentage}</td>
+	  <td style="text-align:center">{elapsedTime|fmt_time}</td>
 	</tr>
 	{#steps root_file=file}
 	<tr class="step">
 	  <td class="startupdesc">{stepDesc} {stepFile} {stepSize|fmt_bytes} ({count}/{total})</td>
-	  <td>{percentComplete|fmt_percentage}</td>
+	  <td style="text-align:center">{percentComplete|fmt_percentage}</td>
 	  <td></td>
 	</tr>
 	{/steps}
 	{/phases}
-    </table>
-  </div>
-</div>
-{/startup}
-
-<hr/>
-<div class="panel panel-success">
-  <div class="panel-heading">Datanode Information</div>
-  <div class="panel-body">
-    <div class="panel panel-default" id="nodelist-operation">
-      <div class="panel-heading">Nodes in operation</div>
-      <div class="panel-body">
-        <table class="table">
-          <thead>
-            <tr>
-              <th>Node</th>
-              <th>Last contact</th>
-              <th>Admin State</th>
-              <th>Capacity</th>
-              <th>Used</th>
-              <th>Non DFS Used</th>
-              <th>Remaining</th>
-              <th>Blocks</th>
-              <th>Block pool used</th>
-              <th>Failed Volumes</th>
-            </tr>
-          </thead>
-          {#nn.LiveNodes}
-          <tr>
-            <td>{name} ({xferaddr})</td>
-            <td>{lastContact}</td>
-            <td>{adminState}</td>
-            <td>{capacity|fmt_bytes}</td>
-            <td>{used|fmt_bytes}</td>
-            <td>{nonDfsUsedSpace|fmt_bytes}</td>
-            <td>{remaining|fmt_bytes}</td>
-            <td>{numBlocks}</td>
-            <td>{blockPoolUsed|fmt_bytes} ({blockPoolUsedPercent|fmt_percentage})</td>
-            <td>{volfails}</td>
-          </tr>
-          {/nn.LiveNodes}
-          {#nn.DeadNodes}
-          <tr class="danger">
-            <td>{name} ({xferaddr})</td>
-            <td>{lastContact}</td>
-            <td>Dead{?decomissioned}, Decomissioned{/decomissioned}</td>
-            <td>-</td>
-            <td>-</td>
-            <td>-</td>
-            <td>-</td>
-            <td>-</td>
-            <td>-</td>
-            <td>-</td>
-          </tr>
-          {/nn.DeadNodes}
-        </table>
-      </div>
-    </div>
-    <div class="panel panel-default" id="nodelist-decom">
-      <div class="panel-heading">Nodes being decomissioned</div>
-      <div class="panel-body">
-        <table class="table">
-          <thead>
-            <tr>
-              <th>Node</th>
-              <th>Last contact</th>
-              <th>Under replicated blocks</th>
-              <th>Blocks with no live replicas</th>
-              <th>Under Replicated Blocks <br/>In files under construction</th>
-            </tr>
-          </thead>
-          {#nn.DecomNodes}
-          <tr>
-            <td>{name} ({xferaddr})</td>
-            <td>{lastContact}</td>
-            <td>{underReplicatedBlocks}</td>
-            <td>{decommissionOnlyReplicas}</td>
-	    <td>{underReplicateInOpenFiles}</td>
-	  </tr>
-	  {/nn.DecomNodes}
-	</table>
-      </div>
-    </div>
-  </div>
-</div>
+  </tbody>
+</table>
 </script>
 
 <script type="text/javascript" src="/static/jquery-1.10.2.min.js">

+ 113 - 55
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js

@@ -18,10 +18,20 @@
 (function () {
   "use strict";
 
-  var data = {};
+  dust.loadSource(dust.compile($('#tmpl-dfshealth').html(), 'dfshealth'));
+  dust.loadSource(dust.compile($('#tmpl-startup-progress').html(), 'startup-progress'));
+  dust.loadSource(dust.compile($('#tmpl-datanode').html(), 'datanode-info'));
+  dust.loadSource(dust.compile($('#tmpl-snapshot').html(), 'snapshot-info'));
 
-  function render() {
-    var helpers = {
+  function load_overview() {
+    var BEANS = [
+      {"name": "nn",      "url": "/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo"},
+      {"name": "nnstat",  "url": "/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus"},
+      {"name": "fs",      "url": "/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState"},
+      {"name": "mem",     "url": "/jmx?qry=java.lang:type=Memory"},
+    ];
+
+    var HELPERS = {
       'helper_fs_max_objects': function (chunk, ctx, bodies, params) {
         var o = ctx.current();
         if (o.MaxObjects > 0) {
@@ -37,35 +47,53 @@
       }
     };
 
-    var base = dust.makeBase(helpers);
+    var data = {};
 
-    dust.loadSource(dust.compile($('#tmpl-dfshealth').html(), 'dfshealth'));
-    dust.render('dfshealth', base.push(data), function(err, out) {
-      $('#panel').html(out);
-    });
-  }
+    // Workarounds for the fact that JMXJsonServlet returns non-standard JSON strings
+    function data_workaround(d) {
+      d.nn.JournalTransactionInfo = JSON.parse(d.nn.JournalTransactionInfo);
+      d.nn.NameJournalStatus = JSON.parse(d.nn.NameJournalStatus);
+      d.nn.NameDirStatuses = JSON.parse(d.nn.NameDirStatuses);
+      d.nn.NodeUsage = JSON.parse(d.nn.NodeUsage);
+      d.nn.CorruptFiles = JSON.parse(d.nn.CorruptFiles);
+      return d;
+    }
 
-  var BEANS = [
-    {"name": "nn",      "url": "/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo"},
-    {"name": "nnstat",  "url": "/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus"},
-    {"name": "fs",      "url": "/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState"},
-    {"name": "mem",     "url": "/jmx?qry=java.lang:type=Memory"},
-    {"name": "startup", "url": "/startupProgress"}
-  ];
-
-  // Workarounds for the fact that JMXJsonServlet returns non-standard JSON strings
-  function data_workaround(d) {
-    function node_map_to_array(nodes) {
-      var res = [];
-      for (var n in nodes) {
-        var p = nodes[n];
-        p.name = n;
-        res.push(p);
-      }
-      return res;
+    load_json(
+      BEANS,
+      function(d) {
+        for (var k in d) {
+          data[k] = d[k].beans[0];
+        }
+        data = data_workaround(data);
+        render();
+      },
+      function (url, jqxhr, text, err) {
+        show_err_msg('<p>Failed to retrieve data from ' + url + ', cause: ' + err + '</p>');
+      });
+
+    function render() {
+      var base = dust.makeBase(HELPERS);
+      dust.render('dfshealth', base.push(data), function(err, out) {
+        $('#tab-overview').html(out);
+        $('a[href="#tab-datanode"]').click(load_datanode_info);
+        $('#ui-tabs a[href="#tab-overview"]').tab('show');
+      });
     }
+  }
+  $('#ui-tabs a[href="#tab-overview"]').click(load_overview);
 
-    function startup_progress_workaround(r) {
+  function show_err_msg(msg) {
+    $('#alert-panel-body').html(msg);
+    $('#alert-panel').show();
+  }
+
+  function ajax_error_handler(url, jqxhr, text, err) {
+    show_err_msg('<p>Failed to retrieve data from ' + url + ', cause: ' + err + '</p>');
+  }
+
+  function load_startup_progress() {
+    function workaround(r) {
       function rename_property(o, s, d) {
         if (o[s] !== undefined) {
           o[d] = o[s];
@@ -86,36 +114,66 @@
       });
       return r;
     }
+    $.get('/startupProgress', function (resp) {
+      var data = workaround(resp);
+      dust.render('startup-progress', data, function(err, out) {
+        $('#tab-startup-progress').html(out);
+        $('#ui-tabs a[href="#tab-startup-progress"]').tab('show');
+      });
+    }).error(ajax_error_handler);
+  }
+
+  $('#ui-tabs a[href="#tab-startup-progress"]').click(load_startup_progress);
+
+  function load_datanode_info() {
+    function workaround(r) {
+      function node_map_to_array(nodes) {
+        var res = [];
+        for (var n in nodes) {
+          var p = nodes[n];
+          p.name = n;
+          res.push(p);
+        }
+        return res;
+      }
 
-    d.nn.JournalTransactionInfo = JSON.parse(d.nn.JournalTransactionInfo);
-    d.nn.NameJournalStatus = JSON.parse(d.nn.NameJournalStatus);
-    d.nn.NameDirStatuses = JSON.parse(d.nn.NameDirStatuses);
-    d.nn.NodeUsage = JSON.parse(d.nn.NodeUsage);
-    d.nn.LiveNodes = node_map_to_array(JSON.parse(d.nn.LiveNodes));
-    d.nn.DeadNodes = node_map_to_array(JSON.parse(d.nn.DeadNodes));
-    d.nn.DecomNodes = node_map_to_array(JSON.parse(d.nn.DecomNodes));
-    d.nn.CorruptFiles = JSON.parse(d.nn.CorruptFiles);
-
-    d.fs.SnapshotStats = JSON.parse(d.fs.SnapshotStats);
-    d.startup = startup_progress_workaround(d.startup);
-    return d;
+      r.LiveNodes = node_map_to_array(JSON.parse(r.LiveNodes));
+      r.DeadNodes = node_map_to_array(JSON.parse(r.DeadNodes));
+      r.DecomNodes = node_map_to_array(JSON.parse(r.DecomNodes));
+      return r;
+    }
+
+    $.get('/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo', function (resp) {
+      var data = workaround(resp.beans[0]);
+      dust.render('datanode-info', data, function(err, out) {
+        $('#tab-datanode').html(out);
+        $('#ui-tabs a[href="#tab-datanode"]').tab('show');
+      });
+    }).error(ajax_error_handler);
   }
 
-  function show_err_msg(msg) {
-    $('#alert-panel-body').html(msg);
-    $('#alert-panel').show();
+  $('a[href="#tab-datanode"]').click(load_datanode_info);
+
+  function load_snapshot_info() {
+    $.get('/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState', function (resp) {
+      var data = JSON.parse(resp.beans[0].SnapshotStats);
+      dust.render('snapshot-info', data, function(err, out) {
+        $('#tab-snapshot').html(out);
+        $('#ui-tabs a[href="#tab-snapshot"]').tab('show');
+      });
+    }).error(ajax_error_handler);
   }
 
-  load_json(
-    BEANS,
-    function(d) {
-      for (var k in d) {
-        data[k] = k === "startup" ? d[k] : d[k].beans[0];
-      }
-      data = data_workaround(data);
-      render();
-    },
-    function (url, jqxhr, text, err) {
-      show_err_msg('<p>Failed to retrieve data from ' + url + ', cause: ' + err + '</p>');
-    });
+  $('#ui-tabs a[href="#tab-snapshot"]').click(load_snapshot_info);
+
+  var hash = window.location.hash;
+  if (hash === "#tab-datanode") {
+    load_datanode_info();
+  } else if (hash === "#tab-snapshot") {
+    load_snapshot_info();
+  } else if (hash === "#tab-startup-progress") {
+    load_startup_progress();
+  } else {
+    load_overview();
+  }
 })();

+ 20 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/hadoop.css

@@ -192,4 +192,24 @@ div.security {
 .panel-success > .panel-heading {
   color: #fff !important;
   background-color: #5FA33E !important;
+}
+
+header.bs-docs-nav, header.bs-docs-nav .navbar-brand {
+  border-radius: 0px;
+  background-color: #5fa33e;
+  color: #fff;
+}
+
+#ui-tabs > li > a {
+  color: #dcf0d3;
+}
+
+#ui-tabs .active a {
+  color: #fff;
+  background-color: #446633;
+}
+
+#alert-panel {
+  margin-top:20px;
+  display: none;
 }

+ 3 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -285,6 +285,9 @@ Release 2.4.0 - UNRELEASED
 
     MAPREDUCE-5729. mapred job -list throws NPE (kasha)
 
+    MAPREDUCE-5693. Restore MRv1 behavior for log flush (Gera Shegalov via
+    jlowe)
+
 Release 2.3.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 5 - 5
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java

@@ -27,6 +27,7 @@ import java.net.URI;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.List;
+import java.util.concurrent.ScheduledExecutorService;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -61,7 +62,6 @@ import org.apache.hadoop.yarn.api.ApplicationConstants;
 import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.util.ConverterUtils;
-import org.apache.log4j.LogManager;
 
 /**
  * The main() for MapReduce task processes.
@@ -123,6 +123,7 @@ class YarnChild {
     LOG.debug("PID: " + System.getenv().get("JVM_PID"));
     Task task = null;
     UserGroupInformation childUGI = null;
+    ScheduledExecutorService logSyncer = null;
 
     try {
       int idleLoopCount = 0;
@@ -161,6 +162,8 @@ class YarnChild {
       // set job classloader if configured before invoking the task
       MRApps.setJobClassLoader(job);
 
+      logSyncer = TaskLog.createLogSyncer();
+
       // Create a final reference to the task for the doAs block
       final Task taskFinal = task;
       childUGI.doAs(new PrivilegedExceptionAction<Object>() {
@@ -214,10 +217,7 @@ class YarnChild {
     } finally {
       RPC.stopProxy(umbilical);
       DefaultMetricsSystem.shutdown();
-      // Shutting down log4j of the child-vm...
-      // This assumes that on return from Task.run()
-      // there is no more logging done.
-      LogManager.shutdown();
+      TaskLog.syncLogsShutdown(logSyncer);
     }
   }
 

+ 10 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java

@@ -31,6 +31,7 @@ import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.commons.io.IOUtils;
@@ -45,6 +46,7 @@ import org.apache.hadoop.mapred.FileOutputCommitter;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.LocalContainerLauncher;
 import org.apache.hadoop.mapred.TaskAttemptListenerImpl;
+import org.apache.hadoop.mapred.TaskLog;
 import org.apache.hadoop.mapred.TaskUmbilicalProtocol;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.OutputCommitter;
@@ -212,6 +214,7 @@ public class MRAppMaster extends CompositeService {
   boolean errorHappenedShutDown = false;
   private String shutDownMessage = null;
   JobStateInternal forcedState = null;
+  private final ScheduledExecutorService logSyncer;
 
   private long recoveredJobStartTime = 0;
 
@@ -240,6 +243,7 @@ public class MRAppMaster extends CompositeService {
     this.nmHttpPort = nmHttpPort;
     this.metrics = MRAppMetrics.create();
     this.maxAppAttempts = maxAppAttempts;
+    logSyncer = TaskLog.createLogSyncer();
     LOG.info("Created MRAppMaster for application " + applicationAttemptId);
   }
 
@@ -1078,6 +1082,12 @@ public class MRAppMaster extends CompositeService {
     // All components have started, start the job.
     startJobs();
   }
+  
+  @Override
+  public void stop() {
+    super.stop();
+    TaskLog.syncLogsShutdown(logSyncer);
+  }
 
   private void processRecovery() {
     if (appAttemptID.getAttemptId() == 1) {

+ 87 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java

@@ -23,12 +23,17 @@ import java.io.BufferedReader;
 import java.io.DataOutputStream;
 import java.io.File;
 import java.io.FileInputStream;
+import java.io.Flushable;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStreamReader;
 import java.util.ArrayList;
 import java.util.Enumeration;
 import java.util.List;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -44,6 +49,8 @@ import org.apache.hadoop.io.SecureIOUtils;
 import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.mapreduce.util.ProcessTree;
 import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.log4j.Appender;
 import org.apache.log4j.LogManager;
@@ -262,7 +269,86 @@ public class TaskLog {
     }
     writeToIndexFile(logLocation, isCleanup);
   }
-  
+
+  public static synchronized void syncLogsShutdown(
+    ScheduledExecutorService scheduler) 
+  {
+    // flush standard streams
+    //
+    System.out.flush();
+    System.err.flush();
+
+    if (scheduler != null) {
+      scheduler.shutdownNow();
+    }
+
+    // flush & close all appenders
+    LogManager.shutdown(); 
+  }
+
+  @SuppressWarnings("unchecked")
+  public static synchronized void syncLogs() {
+    // flush standard streams
+    //
+    System.out.flush();
+    System.err.flush();
+
+    // flush flushable appenders
+    //
+    final Logger rootLogger = Logger.getRootLogger();
+    flushAppenders(rootLogger);
+    final Enumeration<Logger> allLoggers = rootLogger.getLoggerRepository().
+      getCurrentLoggers();
+    while (allLoggers.hasMoreElements()) {
+      final Logger l = allLoggers.nextElement();
+      flushAppenders(l);
+    }
+  }
+
+  @SuppressWarnings("unchecked")
+  private static void flushAppenders(Logger l) {
+    final Enumeration<Appender> allAppenders = l.getAllAppenders();
+    while (allAppenders.hasMoreElements()) {
+      final Appender a = allAppenders.nextElement();
+      if (a instanceof Flushable) {
+        try {
+          ((Flushable) a).flush();
+        } catch (IOException ioe) {
+          System.err.println(a + ": Failed to flush!"
+            + StringUtils.stringifyException(ioe));
+        }
+      }
+    }
+  }
+
+  public static ScheduledExecutorService createLogSyncer() {
+    final ScheduledExecutorService scheduler =
+      Executors.newSingleThreadScheduledExecutor(
+        new ThreadFactory() {
+          @Override
+          public Thread newThread(Runnable r) {
+            final Thread t = Executors.defaultThreadFactory().newThread(r);
+            t.setDaemon(true);
+            t.setName("Thread for syncLogs");
+            return t;
+          }
+        });
+    ShutdownHookManager.get().addShutdownHook(new Runnable() {
+        @Override
+        public void run() {
+          TaskLog.syncLogsShutdown(scheduler);
+        }
+      }, 50);
+    scheduler.scheduleWithFixedDelay(
+        new Runnable() {
+          @Override
+          public void run() {
+            TaskLog.syncLogs();
+          }
+        }, 0L, 5L, TimeUnit.SECONDS);
+    return scheduler;
+  }
+
   /**
    * The filter for userlogs.
    */

+ 3 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLogAppender.java

@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.mapred;
 
+import java.io.Flushable;
 import java.util.LinkedList;
 import java.util.Queue;
 
@@ -31,7 +32,7 @@ import org.apache.log4j.spi.LoggingEvent;
  * 
  */
 @InterfaceStability.Unstable
-public class TaskLogAppender extends FileAppender {
+public class TaskLogAppender extends FileAppender implements Flushable {
   private String taskId; //taskId should be managed as String rather than TaskID object
   //so that log4j can configure it from the configuration(log4j.properties). 
   private Integer maxEvents;
@@ -92,6 +93,7 @@ public class TaskLogAppender extends FileAppender {
     }
   }
   
+  @Override
   public void flush() {
     if (qw != null) {
       qw.flush();

+ 5 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ContainerLogAppender.java

@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn;
 
 import java.io.File;
+import java.io.Flushable;
 import java.util.LinkedList;
 import java.util.Queue;
 
@@ -33,7 +34,9 @@ import org.apache.log4j.spi.LoggingEvent;
  */
 @Public
 @Unstable
-public class ContainerLogAppender extends FileAppender {
+public class ContainerLogAppender extends FileAppender
+  implements Flushable
+{
   private String containerLogDir;
   //so that log4j can configure it from the configuration(log4j.properties). 
   private int maxEvents;
@@ -65,6 +68,7 @@ public class ContainerLogAppender extends FileAppender {
     }
   }
   
+  @Override
   public void flush() {
     if (qw != null) {
       qw.flush();