浏览代码

Merge trunk to HDFS-2802 branch

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1446507 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas 12 年之前
父节点
当前提交
a9f6a27e93
共有 67 个文件被更改,包括 1863 次插入2543 次删除
  1. 28 9
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 4 0
      hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
  3. 45 106
      hadoop-common-project/hadoop-common/pom.xml
  4. 6 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/AbstractMapWritable.java
  5. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SortedMapWritable.java
  6. 24 16
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
  7. 15 14
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
  8. 6 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
  9. 6 1
      hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm
  10. 0 23
      hadoop-common-project/hadoop-common/src/test/ddl/string.jr
  11. 0 63
      hadoop-common-project/hadoop-common/src/test/ddl/test.jr
  12. 14 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSortedMapWritable.java
  13. 0 122
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/FromCpp.java
  14. 0 311
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/RecordBench.java
  15. 0 124
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/TestBuffer.java
  16. 0 201
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/TestRecordIO.java
  17. 0 241
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/TestRecordVersioning.java
  18. 0 115
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/ToCpp.java
  19. 7 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  20. 70 55
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  21. 16 51
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
  22. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  23. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  24. 56 32
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  25. 23 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
  26. 20 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java
  27. 82 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
  28. 13 0
      hadoop-mapreduce-project/CHANGES.txt
  29. 4 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
  30. 14 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java
  31. 54 49
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
  32. 167 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
  33. 58 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java
  34. 18 51
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
  35. 3 9
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalClientProtocolProvider.java
  36. 0 18
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
  37. 8 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
  38. 61 65
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java
  39. 1 5
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksBlock.java
  40. 14 22
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksPage.java
  41. 1 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestLineRecordReader.java
  42. 1 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/TestChainMapReduce.java
  43. 4 18
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java
  44. 86 0
      hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java
  45. 13 0
      hadoop-project/src/site/site.xml
  46. 0 279
      hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/typedbytes/TestIO.java
  47. 6 0
      hadoop-yarn-project/CHANGES.txt
  48. 21 54
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
  49. 21 55
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
  50. 13 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java
  51. 5 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/InfoBlock.java
  52. 19 45
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
  53. 17 57
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
  54. 16 16
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java
  55. 2 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/DefaultLCEResourcesHandler.java
  56. 13 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
  57. 60 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UserInfo.java
  58. 39 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java
  59. 5 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java
  60. 7 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java
  61. 6 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java
  62. 30 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java
  63. 28 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/UsersInfo.java
  64. 40 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java
  65. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
  66. 146 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java
  67. 422 270
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm

+ 28 - 9
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -146,11 +146,10 @@ Trunk (Unreleased)
     HADOOP-9162. Add utility to check native library availability.
     HADOOP-9162. Add utility to check native library availability.
     (Binglin Chang via suresh)
     (Binglin Chang via suresh)
 
 
-    HADOOP-8924. Add maven plugin alternative to shell script to save
-    package-info.java. (Chris Nauroth via suresh)
-
     HADOOP-9277. Improve javadoc for FileContext. (Andrew Wang via suresh)
     HADOOP-9277. Improve javadoc for FileContext. (Andrew Wang via suresh)
 
 
+    HADOOP-9218 Document the Rpc-wrappers used internally (sanjay Radia)
+
   BUG FIXES
   BUG FIXES
 
 
     HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang)
     HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang)
@@ -321,9 +320,6 @@ Trunk (Unreleased)
     HADOOP-9202. test-patch.sh fails during mvn eclipse:eclipse if patch adds
     HADOOP-9202. test-patch.sh fails during mvn eclipse:eclipse if patch adds
     a new module to the build (Chris Nauroth via bobby)
     a new module to the build (Chris Nauroth via bobby)
 
 
-    HADOOP-9245. mvn clean without running mvn install before fails.
-    (Karthik Kambatla via suresh)
-
     HADOOP-9249. hadoop-maven-plugins version-info goal causes build failure
     HADOOP-9249. hadoop-maven-plugins version-info goal causes build failure
     when running with Clover. (Chris Nauroth via suresh)
     when running with Clover. (Chris Nauroth via suresh)
 
 
@@ -336,9 +332,6 @@ Trunk (Unreleased)
 
 
     HADOOP-8589 ViewFs tests fail when tests and home dirs are nested (sanjay Radia)
     HADOOP-8589 ViewFs tests fail when tests and home dirs are nested (sanjay Radia)
 
 
-    HADOOP-9246 Execution phase for hadoop-maven-plugin should be
-    process-resources (Karthik Kambatla and Chris Nauroth via jlowe)
-
     HADOOP-9190. packaging docs is broken. (Andy Isaacson via atm)
     HADOOP-9190. packaging docs is broken. (Andy Isaacson via atm)
 
 
 Release 2.0.4-beta - UNRELEASED
 Release 2.0.4-beta - UNRELEASED
@@ -354,12 +347,32 @@ Release 2.0.4-beta - UNRELEASED
     HADOOP-9253. Capture ulimit info in the logs at service start time.
     HADOOP-9253. Capture ulimit info in the logs at service start time.
     (Arpit Gupta via suresh)
     (Arpit Gupta via suresh)
 
 
+    HADOOP-8924. Add maven plugin alternative to shell script to save
+    package-info.java. (Chris Nauroth via suresh)
+
+    HADOOP-9117. replace protoc ant plugin exec with a maven plugin. (tucu)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
 
 
     HADOOP-9294. GetGroupsTestBase fails on Windows. (Chris Nauroth via suresh)
     HADOOP-9294. GetGroupsTestBase fails on Windows. (Chris Nauroth via suresh)
 
 
+    HADOOP-9305. Add support for running the Hadoop client on 64-bit AIX. (atm)
+
+    HADOOP-9245. mvn clean without running mvn install before fails.
+    (Karthik Kambatla via suresh)
+
+    HADOOP-9246 Execution phase for hadoop-maven-plugin should be
+    process-resources (Karthik Kambatla and Chris Nauroth via jlowe)
+
+    HADOOP-9297. remove old record IO generation and tests. (tucu)
+
+    HADOOP-9154. SortedMapWritable#putAll() doesn't add key/value classes to
+    the map. (Karthik Kambatla via tomwhite)
+
+    HADOOP-9304. remove addition of avro genreated-sources dirs to build. (tucu)
+
 Release 2.0.3-alpha - 2013-02-06 
 Release 2.0.3-alpha - 2013-02-06 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -1341,6 +1354,12 @@ Release 0.23.7 - UNRELEASED
 
 
   BUG FIXES
   BUG FIXES
 
 
+    HADOOP-9302. HDFS docs not linked from top level (Andy Isaacson via
+    tgraves)
+
+    HADOOP-9303. command manual dfsadmin missing entry for restoreFailedStorage
+    option (Andy Isaacson via tgraves)
+
 Release 0.23.6 - UNRELEASED
 Release 0.23.6 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 4 - 0
hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml

@@ -286,6 +286,10 @@
       <!-- protobuf generated code -->
       <!-- protobuf generated code -->
       <Class name="~org\.apache\.hadoop\.security\.proto\.SecurityProtos.*"/>
       <Class name="~org\.apache\.hadoop\.security\.proto\.SecurityProtos.*"/>
     </Match>
     </Match>
+    <Match>
+      <!-- protobuf generated code -->
+      <Class name="~org\.apache\.hadoop\.ipc\.protobuf\.TestProtos.*"/>
+    </Match>
 
 
     <!--
     <!--
        Manually checked, misses child thread manually syncing on parent's intrinsic lock.
        Manually checked, misses child thread manually syncing on parent's intrinsic lock.

+ 45 - 106
hadoop-common-project/hadoop-common/pom.xml

@@ -293,6 +293,51 @@
               </source>
               </source>
             </configuration>
             </configuration>
           </execution>
           </execution>
+          <execution>
+            <id>compile-protoc</id>
+            <phase>generate-sources</phase>
+            <goals>
+              <goal>protoc</goal>
+            </goals>
+            <configuration>
+              <imports>
+                <param>${basedir}/src/main/proto</param>
+              </imports>
+              <source>
+                <directory>${basedir}/src/main/proto</directory>
+                <includes>
+                  <include>HAServiceProtocol.proto</include>
+                  <include>IpcConnectionContext.proto</include>
+                  <include>ProtocolInfo.proto</include>
+                  <include>RpcHeader.proto</include>
+                  <include>ZKFCProtocol.proto</include>
+                  <include>ProtobufRpcEngine.proto</include>
+                  <include>Security.proto</include>
+                </includes>
+              </source>
+              <output>${project.build.directory}/generated-sources/java</output>
+            </configuration>
+          </execution>
+          <execution>
+            <id>compile-test-protoc</id>
+            <phase>generate-test-sources</phase>
+            <goals>
+              <goal>protoc</goal>
+            </goals>
+            <configuration>
+              <imports>
+                <param>${basedir}/src/test/proto</param>
+              </imports>
+              <source>
+                <directory>${basedir}/src/test/proto</directory>
+                <includes>
+                  <include>test.proto</include>
+                  <include>test_rpc_service.proto</include>
+                </includes>
+              </source>
+              <output>${project.build.directory}/generated-test-sources/java</output>
+            </configuration>
+          </execution>
         </executions>
         </executions>
       </plugin>
       </plugin>
       <plugin>
       <plugin>
@@ -331,39 +376,6 @@
         <groupId>org.apache.maven.plugins</groupId>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-antrun-plugin</artifactId>
         <artifactId>maven-antrun-plugin</artifactId>
         <executions>
         <executions>
-          <execution>
-            <id>create-protobuf-generated-sources-directory</id>
-            <phase>initialize</phase>
-            <goals>
-              <goal>run</goal>
-            </goals>
-            <configuration>
-              <target>
-                <mkdir dir="target/generated-sources/java" />
-                <mkdir dir="target/generated-test-sources/java" />
-              </target>
-            </configuration>
-          </execution>
-          <execution>
-            <id>generate-test-sources</id>
-            <phase>generate-test-sources</phase>
-            <goals>
-              <goal>run</goal>
-            </goals>
-            <configuration>
-              <target>
-
-                <mkdir dir="${project.build.directory}/generated-test-sources/java"/>
-
-                <taskdef name="recordcc" classname="org.apache.hadoop.record.compiler.ant.RccTask">
-                  <classpath refid="maven.compile.classpath"/>
-                </taskdef>
-                <recordcc destdir="${project.build.directory}/generated-test-sources/java">
-                  <fileset dir="${basedir}/src/test/ddl" includes="**/*.jr"/>
-                </recordcc>
-              </target>
-            </configuration>
-          </execution>
           <execution>
           <execution>
             <id>create-log-dir</id>
             <id>create-log-dir</id>
             <phase>process-test-resources</phase>
             <phase>process-test-resources</phase>
@@ -417,79 +429,6 @@
           </execution>
           </execution>
         </executions>
         </executions>
       </plugin>
       </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>exec-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>compile-proto</id>
-            <phase>generate-sources</phase>
-            <goals>
-              <goal>exec</goal>
-            </goals>
-            <configuration>
-              <executable>protoc</executable>
-              <arguments>
-                <argument>-Isrc/main/proto/</argument>
-                <argument>--java_out=target/generated-sources/java</argument>
-                <argument>src/main/proto/HAServiceProtocol.proto</argument>
-                <argument>src/main/proto/IpcConnectionContext.proto</argument>
-                <argument>src/main/proto/ProtocolInfo.proto</argument>
-                <argument>src/main/proto/RpcHeader.proto</argument>
-                <argument>src/main/proto/ZKFCProtocol.proto</argument>
-                <argument>src/main/proto/ProtobufRpcEngine.proto</argument>
-                <argument>src/main/proto/Security.proto</argument>
-              </arguments>
-            </configuration>
-          </execution>
-          <execution>
-            <id>compile-test-proto</id>
-            <phase>generate-test-sources</phase>
-            <goals>
-              <goal>exec</goal>
-            </goals>
-            <configuration>
-              <executable>protoc</executable>
-              <arguments>
-                <argument>-Isrc/test/proto/</argument>
-                <argument>--java_out=target/generated-test-sources/java</argument>
-                <argument>src/test/proto/test.proto</argument>
-                <argument>src/test/proto/test_rpc_service.proto</argument>
-              </arguments>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>add-source</id>
-            <phase>generate-sources</phase>
-            <goals>
-              <goal>add-source</goal>
-            </goals>
-            <configuration>
-              <sources>
-                <source>${project.build.directory}/generated-sources/java</source>
-              </sources>
-            </configuration>
-          </execution>
-          <execution>
-            <id>add-test-source</id>
-            <phase>generate-sources</phase>
-            <goals>
-              <goal>add-test-source</goal>
-            </goals>
-            <configuration>
-              <sources>
-                <source>${project.build.directory}/generated-test-sources/java</source>
-              </sources>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
       <plugin>
       <plugin>
         <groupId>org.apache.rat</groupId>
         <groupId>org.apache.rat</groupId>
         <artifactId>apache-rat-plugin</artifactId>
         <artifactId>apache-rat-plugin</artifactId>

+ 6 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/AbstractMapWritable.java

@@ -29,6 +29,8 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 
 
+import com.google.common.annotations.VisibleForTesting;
+
 /**
 /**
  * Abstract base class for MapWritable and SortedMapWritable
  * Abstract base class for MapWritable and SortedMapWritable
  * 
  * 
@@ -45,10 +47,12 @@ public abstract class AbstractMapWritable implements Writable, Configurable {
   private AtomicReference<Configuration> conf;
   private AtomicReference<Configuration> conf;
   
   
   /* Class to id mappings */
   /* Class to id mappings */
-  private Map<Class, Byte> classToIdMap = new ConcurrentHashMap<Class, Byte>();
+  @VisibleForTesting
+  Map<Class, Byte> classToIdMap = new ConcurrentHashMap<Class, Byte>();
   
   
   /* Id to Class mappings */
   /* Id to Class mappings */
-  private Map<Byte, Class> idToClassMap = new ConcurrentHashMap<Byte, Class>();
+  @VisibleForTesting
+  Map<Byte, Class> idToClassMap = new ConcurrentHashMap<Byte, Class>();
   
   
   /* The number of new classes (those not established by the constructor) */
   /* The number of new classes (those not established by the constructor) */
   private volatile byte newClasses = 0;
   private volatile byte newClasses = 0;

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/SortedMapWritable.java

@@ -141,7 +141,7 @@ public class SortedMapWritable extends AbstractMapWritable
     for (Map.Entry<? extends WritableComparable, ? extends Writable> e:
     for (Map.Entry<? extends WritableComparable, ? extends Writable> e:
       t.entrySet()) {
       t.entrySet()) {
       
       
-      instance.put(e.getKey(), e.getValue());
+      put(e.getKey(), e.getValue());
     }
     }
   }
   }
 
 

+ 24 - 16
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java

@@ -62,7 +62,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   
   
   static { // Register the rpcRequest deserializer for WritableRpcEngine 
   static { // Register the rpcRequest deserializer for WritableRpcEngine 
     org.apache.hadoop.ipc.Server.registerProtocolEngine(
     org.apache.hadoop.ipc.Server.registerProtocolEngine(
-        RPC.RpcKind.RPC_PROTOCOL_BUFFER, RpcRequestWritable.class,
+        RPC.RpcKind.RPC_PROTOCOL_BUFFER, RpcRequestWrapper.class,
         new Server.ProtoBufRpcInvoker());
         new Server.ProtoBufRpcInvoker());
   }
   }
 
 
@@ -122,7 +122,7 @@ public class ProtobufRpcEngine implements RpcEngine {
     public Invoker(Class<?> protocol, Client.ConnectionId connId,
     public Invoker(Class<?> protocol, Client.ConnectionId connId,
         Configuration conf, SocketFactory factory) {
         Configuration conf, SocketFactory factory) {
       this.remoteId = connId;
       this.remoteId = connId;
-      this.client = CLIENTS.getClient(conf, factory, RpcResponseWritable.class);
+      this.client = CLIENTS.getClient(conf, factory, RpcResponseWrapper.class);
       this.protocolName = RPC.getProtocolName(protocol);
       this.protocolName = RPC.getProtocolName(protocol);
       this.clientProtocolVersion = RPC
       this.clientProtocolVersion = RPC
           .getProtocolVersion(protocol);
           .getProtocolVersion(protocol);
@@ -191,7 +191,7 @@ public class ProtobufRpcEngine implements RpcEngine {
       }
       }
 
 
       RequestProto rpcRequest = constructRpcRequest(method, args);
       RequestProto rpcRequest = constructRpcRequest(method, args);
-      RpcResponseWritable val = null;
+      RpcResponseWrapper val = null;
       
       
       if (LOG.isTraceEnabled()) {
       if (LOG.isTraceEnabled()) {
         LOG.trace(Thread.currentThread().getId() + ": Call -> " +
         LOG.trace(Thread.currentThread().getId() + ": Call -> " +
@@ -199,8 +199,8 @@ public class ProtobufRpcEngine implements RpcEngine {
             " {" + TextFormat.shortDebugString((Message) args[1]) + "}");
             " {" + TextFormat.shortDebugString((Message) args[1]) + "}");
       }
       }
       try {
       try {
-        val = (RpcResponseWritable) client.call(RPC.RpcKind.RPC_PROTOCOL_BUFFER,
-            new RpcRequestWritable(rpcRequest), remoteId);
+        val = (RpcResponseWrapper) client.call(RPC.RpcKind.RPC_PROTOCOL_BUFFER,
+            new RpcRequestWrapper(rpcRequest), remoteId);
 
 
       } catch (Throwable e) {
       } catch (Throwable e) {
         if (LOG.isTraceEnabled()) {
         if (LOG.isTraceEnabled()) {
@@ -268,16 +268,20 @@ public class ProtobufRpcEngine implements RpcEngine {
   }
   }
 
 
   /**
   /**
-   * Writable Wrapper for Protocol Buffer Requests
+   * Wrapper for Protocol Buffer Requests
+   * 
+   * Note while this wrapper is writable, the request on the wire is in
+   * Protobuf. Several methods on {@link org.apache.hadoop.ipc.Server and RPC} 
+   * use type Writable as a wrapper to work across multiple RpcEngine kinds.
    */
    */
-  private static class RpcRequestWritable implements Writable {
+  private static class RpcRequestWrapper implements Writable {
     RequestProto message;
     RequestProto message;
 
 
     @SuppressWarnings("unused")
     @SuppressWarnings("unused")
-    public RpcRequestWritable() {
+    public RpcRequestWrapper() {
     }
     }
 
 
-    RpcRequestWritable(RequestProto message) {
+    RpcRequestWrapper(RequestProto message) {
       this.message = message;
       this.message = message;
     }
     }
 
 
@@ -303,16 +307,20 @@ public class ProtobufRpcEngine implements RpcEngine {
   }
   }
 
 
   /**
   /**
-   * Writable Wrapper for Protocol Buffer Responses
+   *  Wrapper for Protocol Buffer Responses
+   * 
+   * Note while this wrapper is writable, the request on the wire is in
+   * Protobuf. Several methods on {@link org.apache.hadoop.ipc.Server and RPC} 
+   * use type Writable as a wrapper to work across multiple RpcEngine kinds.
    */
    */
-  private static class RpcResponseWritable implements Writable {
+  private static class RpcResponseWrapper implements Writable {
     byte[] responseMessage;
     byte[] responseMessage;
 
 
     @SuppressWarnings("unused")
     @SuppressWarnings("unused")
-    public RpcResponseWritable() {
+    public RpcResponseWrapper() {
     }
     }
 
 
-    public RpcResponseWritable(Message message) {
+    public RpcResponseWrapper(Message message) {
       this.responseMessage = message.toByteArray();
       this.responseMessage = message.toByteArray();
     }
     }
 
 
@@ -336,7 +344,7 @@ public class ProtobufRpcEngine implements RpcEngine {
   @InterfaceStability.Unstable
   @InterfaceStability.Unstable
   static Client getClient(Configuration conf) {
   static Client getClient(Configuration conf) {
     return CLIENTS.getClient(conf, SocketFactory.getDefault(),
     return CLIENTS.getClient(conf, SocketFactory.getDefault(),
-        RpcResponseWritable.class);
+        RpcResponseWrapper.class);
   }
   }
   
   
  
  
@@ -425,7 +433,7 @@ public class ProtobufRpcEngine implements RpcEngine {
        */
        */
       public Writable call(RPC.Server server, String connectionProtocolName,
       public Writable call(RPC.Server server, String connectionProtocolName,
           Writable writableRequest, long receiveTime) throws Exception {
           Writable writableRequest, long receiveTime) throws Exception {
-        RpcRequestWritable request = (RpcRequestWritable) writableRequest;
+        RpcRequestWrapper request = (RpcRequestWrapper) writableRequest;
         RequestProto rpcRequest = request.message;
         RequestProto rpcRequest = request.message;
         String methodName = rpcRequest.getMethodName();
         String methodName = rpcRequest.getMethodName();
         
         
@@ -487,7 +495,7 @@ public class ProtobufRpcEngine implements RpcEngine {
         } catch (Exception e) {
         } catch (Exception e) {
           throw e;
           throw e;
         }
         }
-        return new RpcResponseWritable(result);
+        return new RpcResponseWrapper(result);
       }
       }
     }
     }
   }
   }

+ 15 - 14
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java

@@ -316,7 +316,8 @@ public class UserGroupInformation {
         return is64Bit ? "com.ibm.security.auth.module.Win64LoginModule"
         return is64Bit ? "com.ibm.security.auth.module.Win64LoginModule"
             : "com.ibm.security.auth.module.NTLoginModule";
             : "com.ibm.security.auth.module.NTLoginModule";
       } else if (aix) {
       } else if (aix) {
-        return "com.ibm.security.auth.module.AIXLoginModule";
+        return is64Bit ? "com.ibm.security.auth.module.AIX64LoginModule"
+            : "com.ibm.security.auth.module.AIXLoginModule";
       } else {
       } else {
         return "com.ibm.security.auth.module.LinuxLoginModule";
         return "com.ibm.security.auth.module.LinuxLoginModule";
       }
       }
@@ -331,24 +332,24 @@ public class UserGroupInformation {
   private static Class<? extends Principal> getOsPrincipalClass() {
   private static Class<? extends Principal> getOsPrincipalClass() {
     ClassLoader cl = ClassLoader.getSystemClassLoader();
     ClassLoader cl = ClassLoader.getSystemClassLoader();
     try {
     try {
+      String principalClass = null;
       if (ibmJava) {
       if (ibmJava) {
-        if (windows) {
-          return (Class<? extends Principal>) (is64Bit
-            ? cl.loadClass("com.ibm.security.auth.UsernamePrincipal")
-            : cl.loadClass("com.ibm.security.auth.NTUserPrincipal"));
-        } else if (aix) {
-          return (Class<? extends Principal>)
-             cl.loadClass("com.ibm.security.auth.AIXPrincipal");
+        if (is64Bit) {
+          principalClass = "com.ibm.security.auth.UsernamePrincipal";
         } else {
         } else {
-          return (Class<? extends Principal>) (is64Bit
-            ? cl.loadClass("com.ibm.security.auth.UsernamePrincipal")
-            : cl.loadClass("com.ibm.security.auth.LinuxPrincipal"));
+          if (windows) {
+            principalClass = "com.ibm.security.auth.NTUserPrincipal";
+          } else if (aix) {
+            principalClass = "com.ibm.security.auth.AIXPrincipal";
+          } else {
+            principalClass = "com.ibm.security.auth.LinuxPrincipal";
+          }
         }
         }
       } else {
       } else {
-        return (Class<? extends Principal>) (windows
-           ? cl.loadClass("com.sun.security.auth.NTUserPrincipal")
-           : cl.loadClass("com.sun.security.auth.UnixPrincipal"));
+        principalClass = windows ? "com.sun.security.auth.NTUserPrincipal"
+            : "com.sun.security.auth.UnixPrincipal";
       }
       }
+      return (Class<? extends Principal>) cl.loadClass(principalClass);
     } catch (ClassNotFoundException e) {
     } catch (ClassNotFoundException e) {
       LOG.error("Unable to find JAAS classes:" + e.getMessage());
       LOG.error("Unable to find JAAS classes:" + e.getMessage());
     }
     }

+ 6 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java

@@ -268,7 +268,12 @@ public class GenericOptionsParser {
     }
     }
 
 
     if (line.hasOption("jt")) {
     if (line.hasOption("jt")) {
-      conf.set("mapred.job.tracker", line.getOptionValue("jt"), 
+      String optionValue = line.getOptionValue("jt");
+      if (optionValue.equalsIgnoreCase("local")) {
+        conf.set("mapreduce.framework.name", optionValue);
+      }
+
+      conf.set("yarn.resourcemanager.address", optionValue, 
           "from -jt command line option");
           "from -jt command line option");
     }
     }
     if (line.hasOption("conf")) {
     if (line.hasOption("conf")) {

+ 6 - 1
hadoop-common-project/hadoop-common/src/site/apt/CommandsManual.apt.vm

@@ -350,10 +350,11 @@ Administration Commands
 
 
    Runs a HDFS dfsadmin client.
    Runs a HDFS dfsadmin client.
 
 
-   Usage: <<<hadoop dfsadmin [GENERIC_OPTIONS] [-report] [-safemode enter | leave | get | wait] [-refreshNodes] [-finalizeUpgrade] [-upgradeProgress status | details | force] [-metasave filename] [-setQuota <quota> <dirname>...<dirname>] [-clrQuota <dirname>...<dirname>] [-help [cmd]]>>>
+   Usage: <<<hadoop dfsadmin [GENERIC_OPTIONS] [-report] [-safemode enter | leave | get | wait] [-refreshNodes] [-finalizeUpgrade] [-upgradeProgress status | details | force] [-metasave filename] [-setQuota <quota> <dirname>...<dirname>] [-clrQuota <dirname>...<dirname>] [-restoreFailedStorage true|false|check] [-help [cmd]]>>>
 
 
 *-----------------+-----------------------------------------------------------+
 *-----------------+-----------------------------------------------------------+
 || COMMAND_OPTION || Description
 || COMMAND_OPTION || Description
+*-----------------+-----------------------------------------------------------+
 | -report         | Reports basic filesystem information and statistics.
 | -report         | Reports basic filesystem information and statistics.
 *-----------------+-----------------------------------------------------------+
 *-----------------+-----------------------------------------------------------+
 | -safemode enter / leave / get / wait | Safe mode maintenance command. Safe
 | -safemode enter / leave / get / wait | Safe mode maintenance command. Safe
@@ -403,6 +404,10 @@ Administration Commands
                   | 2. user is not an administrator.  It does not fault if the
                   | 2. user is not an administrator.  It does not fault if the
                   | directory has no quota.
                   | directory has no quota.
 *-----------------+-----------------------------------------------------------+
 *-----------------+-----------------------------------------------------------+
+| -restoreFailedStorage true / false / check | This option will turn on/off automatic attempt to restore failed storage replicas.
+                  | If a failed storage becomes available again the system will attempt to restore
+                  | edits and/or fsimage during checkpoint. 'check' option will return current setting.
+*-----------------+-----------------------------------------------------------+
 | -help [cmd]     | Displays help for the given command or all commands if none
 | -help [cmd]     | Displays help for the given command or all commands if none
                   | is specified.
                   | is specified.
 *-----------------+-----------------------------------------------------------+
 *-----------------+-----------------------------------------------------------+

+ 0 - 23
hadoop-common-project/hadoop-common/src/test/ddl/string.jr

@@ -1,23 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-module org.apache.hadoop.record {
-    class RecString {
-        ustring data;
-    }
-}
-

+ 0 - 63
hadoop-common-project/hadoop-common/src/test/ddl/test.jr

@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-module org.apache.hadoop.record {
-    class RecRecord0 {
-        ustring     stringVal;
-    }
-
-    class RecRecord1 {
-        boolean         boolVal;
-        byte            byteVal;
-        int             intVal;
-        long            longVal;
-        float           floatVal; // testing inline comment
-        double          doubleVal; /* testing comment */
-        ustring         stringVal; /* testing multi-line
-                                    * comment */
-        buffer          bufferVal; // testing another // inline comment 
-        vector<ustring> vectorVal;
-        map<ustring, ustring>   mapVal;
-        RecRecord0      recordVal;
-    }
-    
-  class RecRecordOld {
-  	ustring name;
-  	vector<long> ivec;
-  	vector<vector<RecRecord0>> svec;
-  	RecRecord0 inner;
-  	vector<vector<vector<ustring>>> strvec;
-  	float i1;
-  	map<byte, ustring> map1;
-  	vector<map<int, long>> mvec1;
-  	vector<map<int, long>> mvec2;
-  }
-  
-  /* RecRecordNew is a lot like RecRecordOld. Helps test for versioning. */
-  class RecRecordNew {
-  	ustring name2;
-  	RecRecord0 inner;
-  	vector<int> ivec;
-  	vector<vector<int>> svec;
-  	vector<vector<vector<ustring>>> strvec;
-		int i1;  	
-		map<long, ustring> map1;
-  	vector<map<int, long>> mvec2;	
-  }
-  
-}
-

+ 14 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSortedMapWritable.java

@@ -164,4 +164,18 @@ public class TestSortedMapWritable {
     assertTrue(failureReason, !mapA.equals(mapB));
     assertTrue(failureReason, !mapA.equals(mapB));
     assertTrue(failureReason, !mapB.equals(mapA));
     assertTrue(failureReason, !mapB.equals(mapA));
   }
   }
+
+  @Test(timeout = 1000)
+  public void testPutAll() {
+    SortedMapWritable map1 = new SortedMapWritable();
+    SortedMapWritable map2 = new SortedMapWritable();
+    map1.put(new Text("key"), new Text("value"));
+    map2.putAll(map1);
+
+    assertEquals("map1 entries don't match map2 entries", map1, map2);
+    assertTrue(
+        "map2 doesn't have class information from map1",
+        map2.classToIdMap.containsKey(Text.class)
+            && map2.idToClassMap.containsValue(Text.class));
+  }
 }
 }

+ 0 - 122
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/FromCpp.java

@@ -1,122 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record;
-
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.TreeMap;
-import junit.framework.*;
-
-/**
- */
-public class FromCpp extends TestCase {
-    
-  public FromCpp(String testName) {
-    super(testName);
-  }
-
-  @Override
-  protected void setUp() throws Exception {
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-  }
-    
-  public void testBinary() {
-    File tmpfile;
-    try {
-      tmpfile = new File("/temp/hadooptmp.dat");
-      RecRecord1 r1 = new RecRecord1();
-      r1.setBoolVal(true);
-      r1.setByteVal((byte)0x66);
-      r1.setFloatVal(3.145F);
-      r1.setDoubleVal(1.5234);
-      r1.setIntVal(4567);
-      r1.setLongVal(0x5a5a5a5a5a5aL);
-      r1.setStringVal("random text");
-      r1.setBufferVal(new Buffer());
-      r1.setVectorVal(new ArrayList<String>());
-      r1.setMapVal(new TreeMap<String,String>());
-      FileInputStream istream = new FileInputStream(tmpfile);
-      BinaryRecordInput in = new BinaryRecordInput(istream);
-      RecRecord1 r2 = new RecRecord1();
-      r2.deserialize(in, "");
-      istream.close();
-      assertTrue(r1.equals(r2));
-    } catch (IOException ex) {
-      ex.printStackTrace();
-    } 
-  }
-    
-  public void testCsv() {
-    File tmpfile;
-    try {
-      tmpfile = new File("/temp/hadooptmp.txt");
-      RecRecord1 r1 = new RecRecord1();
-      r1.setBoolVal(true);
-      r1.setByteVal((byte)0x66);
-      r1.setFloatVal(3.145F);
-      r1.setDoubleVal(1.5234);
-      r1.setIntVal(4567);
-      r1.setLongVal(0x5a5a5a5a5a5aL);
-      r1.setStringVal("random text");
-      r1.setBufferVal(new Buffer());
-      r1.setVectorVal(new ArrayList<String>());
-      r1.setMapVal(new TreeMap<String,String>());
-      FileInputStream istream = new FileInputStream(tmpfile);
-      CsvRecordInput in = new CsvRecordInput(istream);
-      RecRecord1 r2 = new RecRecord1();
-      r2.deserialize(in, "");
-      istream.close();
-      assertTrue(r1.equals(r2));
-    } catch (IOException ex) {
-      ex.printStackTrace();
-    } 
-  }
-
-  public void testXml() {
-    File tmpfile;
-    try {
-      tmpfile = new File("/temp/hadooptmp.xml");
-      RecRecord1 r1 = new RecRecord1();
-      r1.setBoolVal(true);
-      r1.setByteVal((byte)0x66);
-      r1.setFloatVal(3.145F);
-      r1.setDoubleVal(1.5234);
-      r1.setIntVal(4567);
-      r1.setLongVal(0x5a5a5a5a5a5aL);
-      r1.setStringVal("random text");
-      r1.setBufferVal(new Buffer());
-      r1.setVectorVal(new ArrayList<String>());
-      r1.setMapVal(new TreeMap<String,String>());
-      FileInputStream istream = new FileInputStream(tmpfile);
-      XmlRecordInput in = new XmlRecordInput(istream);
-      RecRecord1 r2 = new RecRecord1();
-      r2.deserialize(in, "");
-      istream.close();
-      assertTrue(r1.equals(r2));
-    } catch (IOException ex) {
-      ex.printStackTrace();
-    } 
-  }
-
-}

+ 0 - 311
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/RecordBench.java

@@ -1,311 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.lang.reflect.Method;
-import java.util.Random;
-
-/**
- * Benchmark for various types of serializations
- */
-public class RecordBench {
-  
-  private static class Times {
-    long init;
-    long serialize;
-    long deserialize;
-    long write;
-    long readFields;
-  };
-  
-  private static final long SEED = 0xDEADBEEFL;
-  private static final Random rand = new Random();
-  
-  /** Do not allow to create a new instance of RecordBench */
-  private RecordBench() {}
-  
-  private static void initBuffers(Record[] buffers) {
-    final int BUFLEN = 32;
-    for (int idx = 0; idx < buffers.length; idx++) {
-      buffers[idx] = new RecBuffer();
-      int buflen = rand.nextInt(BUFLEN);
-      byte[] bytes = new byte[buflen];
-      rand.nextBytes(bytes);
-      ((RecBuffer)buffers[idx]).setData(new Buffer(bytes));
-    }
-  }
-  
-  private static void initStrings(Record[] strings) {
-    final int STRLEN = 32;
-    for (int idx = 0; idx < strings.length; idx++) {
-      strings[idx] = new RecString();
-      int strlen = rand.nextInt(STRLEN);
-      StringBuilder sb = new StringBuilder(strlen);
-      for (int ich = 0; ich < strlen; ich++) {
-        int cpt = 0;
-        while (true) {
-          cpt = rand.nextInt(0x10FFFF+1);
-          if (Utils.isValidCodePoint(cpt)) {
-            break;
-          }
-        }
-        sb.appendCodePoint(cpt);
-      }
-      ((RecString)strings[idx]).setData(sb.toString());
-    }
-  }
-  
-  private static void initInts(Record[] ints) {
-    for (int idx = 0; idx < ints.length; idx++) {
-      ints[idx] = new RecInt();
-      ((RecInt)ints[idx]).setData(rand.nextInt());
-    }
-  }
-  
-  private static Record[] makeArray(String type, int numRecords, Times times) {
-    Method init = null;
-    try {
-      init = RecordBench.class.getDeclaredMethod("init"+
-                                                 toCamelCase(type) + "s",
-                                                 new Class[] {Record[].class});
-    } catch (NoSuchMethodException ex) {
-      throw new RuntimeException(ex);
-    }
-
-    Record[] records = new Record[numRecords];
-    times.init = System.nanoTime();
-    try {
-      init.invoke(null, new Object[]{records});
-    } catch (Exception ex) {
-      throw new RuntimeException(ex);
-    }
-    times.init = System.nanoTime() - times.init;
-    return records;
-  }
-  
-  private static void runBinaryBench(String type, int numRecords, Times times)
-    throws IOException {
-    Record[] records = makeArray(type, numRecords, times);
-    ByteArrayOutputStream bout = new ByteArrayOutputStream();
-    BinaryRecordOutput rout = new BinaryRecordOutput(bout);
-    DataOutputStream dout = new DataOutputStream(bout);
-    
-    for(int idx = 0; idx < numRecords; idx++) {
-      records[idx].serialize(rout);
-    }
-    bout.reset();
-    
-    times.serialize = System.nanoTime();
-    for(int idx = 0; idx < numRecords; idx++) {
-      records[idx].serialize(rout);
-    }
-    times.serialize = System.nanoTime() - times.serialize;
-    
-    byte[] serialized = bout.toByteArray();
-    ByteArrayInputStream bin = new ByteArrayInputStream(serialized);
-    BinaryRecordInput rin = new BinaryRecordInput(bin);
-    
-    times.deserialize = System.nanoTime();
-    for(int idx = 0; idx < numRecords; idx++) {
-      records[idx].deserialize(rin);
-    }
-    times.deserialize = System.nanoTime() - times.deserialize;
-    
-    bout.reset();
-    
-    times.write = System.nanoTime();
-    for(int idx = 0; idx < numRecords; idx++) {
-      records[idx].write(dout);
-    }
-    times.write = System.nanoTime() - times.write;
-    
-    bin.reset();
-    DataInputStream din = new DataInputStream(bin);
-    
-    times.readFields = System.nanoTime();
-    for(int idx = 0; idx < numRecords; idx++) {
-      records[idx].readFields(din);
-    }
-    times.readFields = System.nanoTime() - times.readFields;
-  }
-  
-  private static void runCsvBench(String type, int numRecords, Times times)
-    throws IOException {
-    Record[] records = makeArray(type, numRecords, times);
-    ByteArrayOutputStream bout = new ByteArrayOutputStream();
-    CsvRecordOutput rout = new CsvRecordOutput(bout);
-    
-    for(int idx = 0; idx < numRecords; idx++) {
-      records[idx].serialize(rout);
-    }
-    bout.reset();
-    
-    times.serialize = System.nanoTime();
-    for(int idx = 0; idx < numRecords; idx++) {
-      records[idx].serialize(rout);
-    }
-    times.serialize = System.nanoTime() - times.serialize;
-    
-    byte[] serialized = bout.toByteArray();
-    ByteArrayInputStream bin = new ByteArrayInputStream(serialized);
-    CsvRecordInput rin = new CsvRecordInput(bin);
-    
-    times.deserialize = System.nanoTime();
-    for(int idx = 0; idx < numRecords; idx++) {
-      records[idx].deserialize(rin);
-    }
-    times.deserialize = System.nanoTime() - times.deserialize;
-  }
-  
-  private static void runXmlBench(String type, int numRecords, Times times)
-    throws IOException {
-    Record[] records = makeArray(type, numRecords, times);
-    ByteArrayOutputStream bout = new ByteArrayOutputStream();
-    XmlRecordOutput rout = new XmlRecordOutput(bout);
-    
-    for(int idx = 0; idx < numRecords; idx++) {
-      records[idx].serialize(rout);
-    }
-    bout.reset();
-    
-    bout.write("<records>\n".getBytes());
-    
-    times.serialize = System.nanoTime();
-    for(int idx = 0; idx < numRecords; idx++) {
-      records[idx].serialize(rout);
-    }
-    times.serialize = System.nanoTime() - times.serialize;
-    
-    bout.write("</records>\n".getBytes());
-    
-    byte[] serialized = bout.toByteArray();
-    ByteArrayInputStream bin = new ByteArrayInputStream(serialized);
-        
-    times.deserialize = System.nanoTime();
-    XmlRecordInput rin = new XmlRecordInput(bin);
-    for(int idx = 0; idx < numRecords; idx++) {
-      records[idx].deserialize(rin);
-    }
-    times.deserialize = System.nanoTime() - times.deserialize;
-  }
-
-  private static void printTimes(String type,
-                                 String format,
-                                 int numRecords,
-                                 Times times) {
-    System.out.println("Type: " + type + " Format: " + format +
-                       " #Records: "+numRecords);
-    if (times.init != 0) {
-      System.out.println("Initialization Time (Per record) : "+
-                         times.init/numRecords + " Nanoseconds");
-    }
-    
-    if (times.serialize != 0) {
-      System.out.println("Serialization Time (Per Record) : "+
-                         times.serialize/numRecords + " Nanoseconds");
-    }
-    
-    if (times.deserialize != 0) {
-      System.out.println("Deserialization Time (Per Record) : "+
-                         times.deserialize/numRecords + " Nanoseconds");
-    }
-    
-    if (times.write != 0) {
-      System.out.println("Write Time (Per Record) : "+
-                         times.write/numRecords + " Nanoseconds");
-    }
-    
-    if (times.readFields != 0) {
-      System.out.println("ReadFields Time (Per Record) : "+
-                         times.readFields/numRecords + " Nanoseconds");
-    }
-    
-    System.out.println();
-  }
-  
-  private static String toCamelCase(String inp) {
-    char firstChar = inp.charAt(0);
-    if (Character.isLowerCase(firstChar)) {
-      return ""+Character.toUpperCase(firstChar) + inp.substring(1);
-    }
-    return inp;
-  }
-  
-  private static void exitOnError() {
-    String usage = "RecordBench {buffer|string|int}"+
-      " {binary|csv|xml} <numRecords>";
-    System.out.println(usage);
-    System.exit(1);
-  }
-  
-  /**
-   * @param args the command line arguments
-   */
-  public static void main(String[] args) throws IOException {
-    String version = "RecordBench v0.1";
-    System.out.println(version+"\n");
-    
-    if (args.length != 3) {
-      exitOnError();
-    }
-    
-    String typeName = args[0];
-    String format = args[1];
-    int numRecords = Integer.decode(args[2]).intValue();
-    
-    Method bench = null;
-    try {
-      bench = RecordBench.class.getDeclaredMethod("run"+
-                                                  toCamelCase(format) + "Bench",
-                                                  new Class[] {String.class, Integer.TYPE, Times.class});
-    } catch (NoSuchMethodException ex) {
-      ex.printStackTrace();
-      exitOnError();
-    }
-    
-    if (numRecords < 0) {
-      exitOnError();
-    }
-    
-    // dry run
-    rand.setSeed(SEED);
-    Times times = new Times();
-    try {
-      bench.invoke(null, new Object[] {typeName, numRecords, times});
-    } catch (Exception ex) {
-      ex.printStackTrace();
-      System.exit(1);
-    }
-    
-    // timed run
-    rand.setSeed(SEED);
-    try {
-      bench.invoke(null, new Object[] {typeName, numRecords, times});
-    } catch (Exception ex) {
-      ex.printStackTrace();
-      System.exit(1);
-    }
-    printTimes(typeName, format, numRecords, times);
-  }
-}

+ 0 - 124
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/TestBuffer.java

@@ -1,124 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record;
-
-import junit.framework.*;
-
-/**
- * A Unit test for Record I/O Buffer class
- */
-public class TestBuffer extends TestCase {
-  
-  public TestBuffer(String testName) {
-    super(testName);
-  }
-  
-  /**
-   * Test of set method, of class org.apache.hadoop.record.Buffer.
-   */
-  public void testSet() {
-    final byte[] bytes = new byte[10];
-    final Buffer instance = new Buffer();
-    
-    instance.set(bytes);
-    
-    assertEquals("set failed", bytes, instance.get());
-  }
-  
-  /**
-   * Test of copy method, of class org.apache.hadoop.record.Buffer.
-   */
-  public void testCopy() {
-    final byte[] bytes = new byte[10];
-    final int offset = 6;
-    final int length = 3;
-    for (int idx = 0; idx < 10; idx ++) {
-      bytes[idx] = (byte) idx;
-    }
-    final Buffer instance = new Buffer();
-    
-    instance.copy(bytes, offset, length);
-    
-    assertEquals("copy failed", 3, instance.getCapacity());
-    assertEquals("copy failed", 3, instance.get().length);
-    for (int idx = 0; idx < 3; idx++) {
-      assertEquals("Buffer content corrupted", idx+6, instance.get()[idx]);
-    }
-  }
-  
-  /**
-   * Test of getCount method, of class org.apache.hadoop.record.Buffer.
-   */
-  public void testGetCount() {
-    final Buffer instance = new Buffer();
-    
-    final int expResult = 0;
-    final int result = instance.getCount();
-    assertEquals("getSize failed", expResult, result);
-  }
-  
-  /**
-   * Test of getCapacity method, of class org.apache.hadoop.record.Buffer.
-   */
-  public void testGetCapacity() {
-    final Buffer instance = new Buffer();
-    
-    final int expResult = 0;
-    final int result = instance.getCapacity();
-    assertEquals("getCapacity failed", expResult, result);
-    
-    instance.setCapacity(100);
-    assertEquals("setCapacity failed", 100, instance.getCapacity());
-  }
-  
-  /**
-   * Test of truncate method, of class org.apache.hadoop.record.Buffer.
-   */
-  public void testTruncate() {
-    final Buffer instance = new Buffer();
-    instance.setCapacity(100);
-    assertEquals("setCapacity failed", 100, instance.getCapacity());
-    
-    instance.truncate();
-    assertEquals("truncate failed", 0, instance.getCapacity());
-  }
-  
-  /**
-   * Test of append method, of class org.apache.hadoop.record.Buffer.
-   */
-  public void testAppend() {
-    final byte[] bytes = new byte[100];
-    final int offset = 0;
-    final int length = 100;
-    for (int idx = 0; idx < 100; idx++) {
-      bytes[idx] = (byte) (100-idx);
-    }
-    
-    final Buffer instance = new Buffer();
-    
-    instance.append(bytes, offset, length);
-    
-    assertEquals("Buffer size mismatch", 100, instance.getCount());
-    
-    for (int idx = 0; idx < 100; idx++) {
-      assertEquals("Buffer contents corrupted", 100-idx, instance.get()[idx]);
-    }
-    
-  }
-}

+ 0 - 201
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/TestRecordIO.java

@@ -1,201 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record;
-
-import java.io.IOException;
-import junit.framework.*;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.util.ArrayList;
-import java.util.TreeMap;
-
-/**
- */
-public class TestRecordIO extends TestCase {
-    
-  public TestRecordIO(String testName) {
-    super(testName);
-  }
-
-  @Override
-  protected void setUp() throws Exception {
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-  }
-    
-  public void testBinary() {
-    File tmpfile;
-    try {
-      tmpfile = File.createTempFile("hadooprec", ".dat");
-      FileOutputStream ostream = new FileOutputStream(tmpfile);
-      BinaryRecordOutput out = new BinaryRecordOutput(ostream);
-      RecRecord1 r1 = new RecRecord1();
-      r1.setBoolVal(true);
-      r1.setByteVal((byte)0x66);
-      r1.setFloatVal(3.145F);
-      r1.setDoubleVal(1.5234);
-      r1.setIntVal(-4567);
-      r1.setLongVal(-2367L);
-      r1.setStringVal("random text");
-      r1.setBufferVal(new Buffer());
-      r1.setVectorVal(new ArrayList<String>());
-      r1.setMapVal(new TreeMap<String,String>());
-      RecRecord0 r0 = new RecRecord0();
-      r0.setStringVal("other random text");
-      r1.setRecordVal(r0);
-      r1.serialize(out, "");
-      ostream.close();
-      FileInputStream istream = new FileInputStream(tmpfile);
-      BinaryRecordInput in = new BinaryRecordInput(istream);
-      RecRecord1 r2 = new RecRecord1();
-      r2.deserialize(in, "");
-      istream.close();
-      tmpfile.delete();
-      assertTrue("Serialized and deserialized records do not match.", r1.equals(r2));
-    } catch (IOException ex) {
-      ex.printStackTrace();
-    } 
-  }
-    
-  public void testCsv() {
-    File tmpfile;
-    try {
-      tmpfile = File.createTempFile("hadooprec", ".txt");
-      FileOutputStream ostream = new FileOutputStream(tmpfile);
-      CsvRecordOutput out = new CsvRecordOutput(ostream);
-      RecRecord1 r1 = new RecRecord1();
-      r1.setBoolVal(true);
-      r1.setByteVal((byte)0x66);
-      r1.setFloatVal(3.145F);
-      r1.setDoubleVal(1.5234);
-      r1.setIntVal(4567);
-      r1.setLongVal(0x5a5a5a5a5a5aL);
-      r1.setStringVal("random text");
-      r1.setBufferVal(new Buffer());
-      r1.setVectorVal(new ArrayList<String>());
-      r1.setMapVal(new TreeMap<String,String>());
-      RecRecord0 r0 = new RecRecord0();
-      r0.setStringVal("other random text");
-      r1.setRecordVal(r0);
-      r1.serialize(out, "");
-      ostream.close();
-      FileInputStream istream = new FileInputStream(tmpfile);
-      CsvRecordInput in = new CsvRecordInput(istream);
-      RecRecord1 r2 = new RecRecord1();
-      r2.deserialize(in, "");
-      istream.close();
-      tmpfile.delete();
-      assertTrue("Serialized and deserialized records do not match.", r1.equals(r2));
-            
-    } catch (IOException ex) {
-      ex.printStackTrace();
-    }
-  }
-
-  public void testToString() {
-    try {
-      RecRecord1 r1 = new RecRecord1();
-      r1.setBoolVal(true);
-      r1.setByteVal((byte)0x66);
-      r1.setFloatVal(3.145F);
-      r1.setDoubleVal(1.5234);
-      r1.setIntVal(4567);
-      r1.setLongVal(0x5a5a5a5a5a5aL);
-      r1.setStringVal("random text");
-      byte[] barr = new byte[256];
-      for (int idx = 0; idx < 256; idx++) {
-        barr[idx] = (byte) idx;
-      }
-      r1.setBufferVal(new Buffer(barr));
-      r1.setVectorVal(new ArrayList<String>());
-      r1.setMapVal(new TreeMap<String,String>());
-      RecRecord0 r0 = new RecRecord0();
-      r0.setStringVal("other random text");
-      r1.setRecordVal(r0);
-      System.err.println("Illustrating toString bug"+r1.toString());
-      System.err.println("Illustrating toString bug"+r1.toString());
-    } catch (Throwable ex) {
-      assertTrue("Record.toString cannot be invoked twice in succession."+
-                 "This bug has been fixed in the latest version.", false);
-    }
-  }
-    
-  public void testXml() {
-    File tmpfile;
-    try {
-      tmpfile = File.createTempFile("hadooprec", ".xml");
-      FileOutputStream ostream = new FileOutputStream(tmpfile);
-      XmlRecordOutput out = new XmlRecordOutput(ostream);
-      RecRecord1 r1 = new RecRecord1();
-      r1.setBoolVal(true);
-      r1.setByteVal((byte)0x66);
-      r1.setFloatVal(3.145F);
-      r1.setDoubleVal(1.5234);
-      r1.setIntVal(4567);
-      r1.setLongVal(0x5a5a5a5a5a5aL);
-      r1.setStringVal("ran\002dom &lt; %text<&more\uffff");
-      r1.setBufferVal(new Buffer());
-      r1.setVectorVal(new ArrayList<String>());
-      r1.setMapVal(new TreeMap<String,String>());
-      RecRecord0 r0 = new RecRecord0();
-      r0.setStringVal("other %rando\007m &amp; >&more text");
-      r1.setRecordVal(r0);
-      r1.serialize(out, "");
-      ostream.close();
-      FileInputStream istream = new FileInputStream(tmpfile);
-      XmlRecordInput in = new XmlRecordInput(istream);
-      RecRecord1 r2 = new RecRecord1();
-      r2.deserialize(in, "");
-      istream.close();
-      tmpfile.delete();
-      assertTrue("Serialized and deserialized records do not match.", r1.equals(r2));
-    } catch (IOException ex) {
-      ex.printStackTrace();
-    } 
-  }
-    
-  public void testCloneable() {
-    RecRecord1 r1 = new RecRecord1();
-    r1.setBoolVal(true);
-    r1.setByteVal((byte)0x66);
-    r1.setFloatVal(3.145F);
-    r1.setDoubleVal(1.5234);
-    r1.setIntVal(-4567);
-    r1.setLongVal(-2367L);
-    r1.setStringVal("random text");
-    r1.setBufferVal(new Buffer());
-    r1.setVectorVal(new ArrayList<String>());
-    r1.setMapVal(new TreeMap<String,String>());
-    RecRecord0 r0 = new RecRecord0();
-    r0.setStringVal("other random text");
-    r1.setRecordVal(r0);
-    try {
-      RecRecord1 r2 = (RecRecord1) r1.clone();
-      assertTrue("Cloneable semantics violated. r1==r2", r1 != r2);
-      assertTrue("Cloneable semantics violated. r1.getClass() != r2.getClass()",
-                 r1.getClass() == r2.getClass());
-      assertTrue("Cloneable semantics violated. !r2.equals(r1)", r2.equals(r1));
-    } catch (final CloneNotSupportedException ex) {
-      ex.printStackTrace();
-    }
-  }
-}

+ 0 - 241
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/TestRecordVersioning.java

@@ -1,241 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record;
-
-import java.io.IOException;
-import junit.framework.*;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.util.ArrayList;
-import java.util.TreeMap;
-import org.apache.hadoop.record.meta.RecordTypeInfo;
-
-/**
- */
-public class TestRecordVersioning extends TestCase {
-    
-  public TestRecordVersioning(String testName) {
-    super(testName);
-  }
-
-  @Override
-  protected void setUp() throws Exception {
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-  }
-    
-  /* 
-   * basic versioning
-   * write out a record and its type info, read it back using its typeinfo
-   */
-  public void testBasic() {
-    File tmpfile, tmpRTIfile;
-    try {
-      tmpfile = File.createTempFile("hadooprec", ".dat");
-      tmpRTIfile = File.createTempFile("hadooprti", ".dat");
-      FileOutputStream ostream = new FileOutputStream(tmpfile);
-      BinaryRecordOutput out = new BinaryRecordOutput(ostream);
-      FileOutputStream oRTIstream = new FileOutputStream(tmpRTIfile);
-      BinaryRecordOutput outRTI = new BinaryRecordOutput(oRTIstream);
-      RecRecord1 r1 = new RecRecord1();
-      r1.setBoolVal(true);
-      r1.setByteVal((byte)0x66);
-      r1.setFloatVal(3.145F);
-      r1.setDoubleVal(1.5234);
-      r1.setIntVal(-4567);
-      r1.setLongVal(-2367L);
-      r1.setStringVal("random text");
-      r1.setBufferVal(new Buffer());
-      r1.setVectorVal(new ArrayList<String>());
-      r1.setMapVal(new TreeMap<String,String>());
-      RecRecord0 r0 = new RecRecord0();
-      r0.setStringVal("other random text");
-      r1.setRecordVal(r0);
-      r1.serialize(out, "");
-      ostream.close();
-      // write out the type info
-      RecRecord1.getTypeInfo().serialize(outRTI);
-      oRTIstream.close();
-      
-      // read
-      FileInputStream istream = new FileInputStream(tmpfile);
-      BinaryRecordInput in = new BinaryRecordInput(istream);
-      FileInputStream iRTIstream = new FileInputStream(tmpRTIfile);
-      BinaryRecordInput inRTI = new BinaryRecordInput(iRTIstream);
-      RecordTypeInfo rti = new RecordTypeInfo();
-      rti.deserialize(inRTI);
-      iRTIstream.close();
-      RecRecord1.setTypeFilter(rti);
-      RecRecord1 r2 = new RecRecord1();
-      r2.deserialize(in, "");
-      istream.close();
-      tmpfile.delete();
-      tmpRTIfile.delete();
-      assertTrue("Serialized and deserialized versioned records do not match.", r1.equals(r2));
-    } catch (IOException ex) {
-      ex.printStackTrace();
-    } 
-  }
-    
-  /* 
-   * versioning
-   * write out a record and its type info, read back a similar record using the written record's typeinfo
-   */
-  public void testVersioning() {
-    File tmpfile, tmpRTIfile;
-    try {
-      tmpfile = File.createTempFile("hadooprec", ".dat");
-      tmpRTIfile = File.createTempFile("hadooprti", ".dat");
-      FileOutputStream ostream = new FileOutputStream(tmpfile);
-      BinaryRecordOutput out = new BinaryRecordOutput(ostream);
-      FileOutputStream oRTIstream = new FileOutputStream(tmpRTIfile);
-      BinaryRecordOutput outRTI = new BinaryRecordOutput(oRTIstream);
-
-      // we create an array of records to write
-      ArrayList<RecRecordOld> recsWrite = new ArrayList<RecRecordOld>();
-      int i, j, k, l;
-      for (i=0; i<5; i++) {
-        RecRecordOld s1Rec = new RecRecordOld();
-
-        s1Rec.setName("This is record s1: " + i);
-
-        ArrayList<Long> iA = new ArrayList<Long>();
-        for (j=0; j<3; j++) {
-          iA.add(new Long(i+j));
-        }
-        s1Rec.setIvec(iA);
-
-        ArrayList<ArrayList<RecRecord0>> ssVec = new ArrayList<ArrayList<RecRecord0>>();
-        for (j=0; j<2; j++) {
-          ArrayList<RecRecord0> sVec = new ArrayList<RecRecord0>();
-          for (k=0; k<3; k++) {
-            RecRecord0 sRec = new RecRecord0("This is record s: ("+j+": "+k+")");
-            sVec.add(sRec);
-          }
-          ssVec.add(sVec);
-        }
-        s1Rec.setSvec(ssVec);
-
-        s1Rec.setInner(new RecRecord0("This is record s: " + i));
-
-        ArrayList<ArrayList<ArrayList<String>>> aaaVec = new ArrayList<ArrayList<ArrayList<String>>>();
-        for (l=0; l<2; l++) {
-          ArrayList<ArrayList<String>> aaVec = new ArrayList<ArrayList<String>>();
-          for (j=0; j<2; j++) {
-            ArrayList<String> aVec = new ArrayList<String>();
-            for (k=0; k<3; k++) {
-              aVec.add(new String("THis is a nested string: (" + l + ": " + j + ": " + k + ")"));
-            }
-            aaVec.add(aVec);
-          }
-          aaaVec.add(aaVec);
-        }
-        s1Rec.setStrvec(aaaVec);
-
-        s1Rec.setI1(100+i);
-
-        java.util.TreeMap<Byte,String> map1 = new java.util.TreeMap<Byte,String>();
-        map1.put(new Byte("23"), "23");
-        map1.put(new Byte("11"), "11");
-        s1Rec.setMap1(map1);
-
-        java.util.TreeMap<Integer,Long> m1 = new java.util.TreeMap<Integer,Long>();
-        java.util.TreeMap<Integer,Long> m2 = new java.util.TreeMap<Integer,Long>();
-        m1.put(new Integer(5), 5L);
-        m1.put(new Integer(10), 10L);
-        m2.put(new Integer(15), 15L);
-        m2.put(new Integer(20), 20L);
-        java.util.ArrayList<java.util.TreeMap<Integer,Long>> vm1 = new java.util.ArrayList<java.util.TreeMap<Integer,Long>>();
-        vm1.add(m1);
-        vm1.add(m2);
-        s1Rec.setMvec1(vm1);
-        java.util.ArrayList<java.util.TreeMap<Integer,Long>> vm2 = new java.util.ArrayList<java.util.TreeMap<Integer,Long>>();
-        vm2.add(m1);
-        s1Rec.setMvec2(vm2);
-
-        // add to our list
-        recsWrite.add(s1Rec);
-      }
-
-      // write out to file
-      for (RecRecordOld rec: recsWrite) {
-        rec.serialize(out);
-      }
-      ostream.close();
-      // write out the type info
-      RecRecordOld.getTypeInfo().serialize(outRTI);
-      oRTIstream.close();
-
-      // read
-      FileInputStream istream = new FileInputStream(tmpfile);
-      BinaryRecordInput in = new BinaryRecordInput(istream);
-      FileInputStream iRTIstream = new FileInputStream(tmpRTIfile);
-      BinaryRecordInput inRTI = new BinaryRecordInput(iRTIstream);
-      RecordTypeInfo rti = new RecordTypeInfo();
-
-      // read type info
-      rti.deserialize(inRTI);
-      iRTIstream.close();
-      RecRecordNew.setTypeFilter(rti);
-
-      // read records
-      ArrayList<RecRecordNew> recsRead = new ArrayList<RecRecordNew>();
-      for (i=0; i<recsWrite.size(); i++) {
-        RecRecordNew s2Rec = new RecRecordNew();
-        s2Rec.deserialize(in);
-        recsRead.add(s2Rec);
-      }
-      istream.close();
-      tmpfile.delete();
-      tmpRTIfile.delete();
-
-      // compare
-      for (i=0; i<recsRead.size(); i++) {
-        RecRecordOld s1Out = recsWrite.get(i);
-        RecRecordNew s2In = recsRead.get(i);
-        assertTrue("Incorrectly read name2 field", null == s2In.getName2());
-        assertTrue("Error comparing inner fields", (0 == s1Out.getInner().compareTo(s2In.getInner())));
-        assertTrue("Incorrectly read ivec field", null == s2In.getIvec());
-        assertTrue("Incorrectly read svec field", null == s2In.getSvec());
-        for (j=0; j<s2In.getStrvec().size(); j++) {
-          ArrayList<ArrayList<String>> ss2Vec = s2In.getStrvec().get(j);
-          ArrayList<ArrayList<String>> ss1Vec = s1Out.getStrvec().get(j);
-          for (k=0; k<ss2Vec.size(); k++) {
-            ArrayList<String> s2Vec = ss2Vec.get(k);
-            ArrayList<String> s1Vec = ss1Vec.get(k);
-            for (l=0; l<s2Vec.size(); l++) {
-              assertTrue("Error comparing strVec fields", (0 == s2Vec.get(l).compareTo(s1Vec.get(l))));
-            }
-          }
-        }
-        assertTrue("Incorrectly read map1 field", null == s2In.getMap1());
-        for (j=0; j<s2In.getMvec2().size(); j++) {
-          assertTrue("Error comparing mvec2 fields", (s2In.getMvec2().get(j).equals(s1Out.getMvec2().get(j))));
-        }
-      }
-
-    } catch (IOException ex) {
-      ex.printStackTrace();
-    } 
-  }
-
-}

+ 0 - 115
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/record/ToCpp.java

@@ -1,115 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.record;
-
-import java.io.IOException;
-import junit.framework.*;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.util.ArrayList;
-import java.util.TreeMap;
-
-/**
- */
-public class ToCpp extends TestCase {
-    
-  public ToCpp(String testName) {
-    super(testName);
-  }
-
-  @Override
-  protected void setUp() throws Exception {
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-  }
-    
-  public void testBinary() {
-    File tmpfile;
-    try {
-      tmpfile = new File("/tmp/hadooptemp.dat");
-      FileOutputStream ostream = new FileOutputStream(tmpfile);
-      BinaryRecordOutput out = new BinaryRecordOutput(ostream);
-      RecRecord1 r1 = new RecRecord1();
-      r1.setBoolVal(true);
-      r1.setByteVal((byte)0x66);
-      r1.setFloatVal(3.145F);
-      r1.setDoubleVal(1.5234);
-      r1.setIntVal(4567);
-      r1.setLongVal(0x5a5a5a5a5a5aL);
-      r1.setStringVal("random text");
-      r1.setBufferVal(new Buffer());
-      r1.setVectorVal(new ArrayList<String>());
-      r1.setMapVal(new TreeMap<String,String>());
-      r1.serialize(out, "");
-      ostream.close();
-    } catch (IOException ex) {
-      ex.printStackTrace();
-    } 
-  }
-    
-  public void testCsv() {
-    File tmpfile;
-    try {
-      tmpfile = new File("/tmp/hadooptemp.txt");
-      FileOutputStream ostream = new FileOutputStream(tmpfile);
-      CsvRecordOutput out = new CsvRecordOutput(ostream);
-      RecRecord1 r1 = new RecRecord1();
-      r1.setBoolVal(true);
-      r1.setByteVal((byte)0x66);
-      r1.setFloatVal(3.145F);
-      r1.setDoubleVal(1.5234);
-      r1.setIntVal(4567);
-      r1.setLongVal(0x5a5a5a5a5a5aL);
-      r1.setStringVal("random text");
-      r1.setBufferVal(new Buffer());
-      r1.setVectorVal(new ArrayList<String>());
-      r1.setMapVal(new TreeMap<String,String>());
-      r1.serialize(out, "");
-      ostream.close();
-    } catch (IOException ex) {
-      ex.printStackTrace();
-    } 
-  }
-
-  public void testXml() {
-    File tmpfile;
-    try {
-      tmpfile = new File("/tmp/hadooptemp.xml");
-      FileOutputStream ostream = new FileOutputStream(tmpfile);
-      XmlRecordOutput out = new XmlRecordOutput(ostream);
-      RecRecord1 r1 = new RecRecord1();
-      r1.setBoolVal(true);
-      r1.setByteVal((byte)0x66);
-      r1.setFloatVal(3.145F);
-      r1.setDoubleVal(1.5234);
-      r1.setIntVal(4567);
-      r1.setLongVal(0x5a5a5a5a5a5aL);
-      r1.setStringVal("random text");
-      r1.setBufferVal(new Buffer());
-      r1.setVectorVal(new ArrayList<String>());
-      r1.setMapVal(new TreeMap<String,String>());
-      r1.serialize(out, "");
-      ostream.close();
-    } catch (IOException ex) {
-      ex.printStackTrace();
-    } 
-  }
-}

+ 7 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -317,6 +317,10 @@ Release 2.0.4-beta - UNRELEASED
     HDFS-4471. Namenode WebUI file browsing does not work with wildcard
     HDFS-4471. Namenode WebUI file browsing does not work with wildcard
     addresses configured. (Andrew Wang via atm)
     addresses configured. (Andrew Wang via atm)
 
 
+    HDFS-4342. Directories configured in dfs.namenode.edits.dir.required
+    but not in dfs.namenode.edits.dir are silently ignored.  (Arpit Agarwal
+    via szetszwo)
+
 Release 2.0.3-alpha - 2013-02-06
 Release 2.0.3-alpha - 2013-02-06
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -2292,6 +2296,9 @@ Release 0.23.7 - UNRELEASED
 
 
     HDFS-4288. NN accepts incremental BR as IBR in safemode (daryn via kihwal)
     HDFS-4288. NN accepts incremental BR as IBR in safemode (daryn via kihwal)
 
 
+    HDFS-4495. Allow client-side lease renewal to be retried beyond soft-limit
+    (kihwal)
+
 Release 0.23.6 - UNRELEASED
 Release 0.23.6 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 70 - 55
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -209,7 +209,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
             </goals>
             </goals>
             <configuration>
             <configuration>
               <compile>false</compile>
               <compile>false</compile>
-              <workingDirectory>${project.build.directory}/generated-src/main/jsp</workingDirectory>
+              <workingDirectory>${project.build.directory}/generated-sources/java</workingDirectory>
               <webFragmentFile>${project.build.directory}/hdfs-jsp-servlet-definitions.xml</webFragmentFile>
               <webFragmentFile>${project.build.directory}/hdfs-jsp-servlet-definitions.xml</webFragmentFile>
               <packageName>org.apache.hadoop.hdfs.server.namenode</packageName>
               <packageName>org.apache.hadoop.hdfs.server.namenode</packageName>
               <sources>
               <sources>
@@ -228,7 +228,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
             </goals>
             </goals>
             <configuration>
             <configuration>
               <compile>false</compile>
               <compile>false</compile>
-              <workingDirectory>${project.build.directory}/generated-src/main/jsp</workingDirectory>
+              <workingDirectory>${project.build.directory}/generated-sources/java</workingDirectory>
               <webFragmentFile>${project.build.directory}/secondary-jsp-servlet-definitions.xml</webFragmentFile>
               <webFragmentFile>${project.build.directory}/secondary-jsp-servlet-definitions.xml</webFragmentFile>
               <packageName>org.apache.hadoop.hdfs.server.namenode</packageName>
               <packageName>org.apache.hadoop.hdfs.server.namenode</packageName>
               <sources>
               <sources>
@@ -247,7 +247,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
             </goals>
             </goals>
             <configuration>
             <configuration>
               <compile>false</compile>
               <compile>false</compile>
-              <workingDirectory>${project.build.directory}/generated-src/main/jsp</workingDirectory>
+              <workingDirectory>${project.build.directory}/generated-sources/java</workingDirectory>
               <webFragmentFile>${project.build.directory}/journal-jsp-servlet-definitions.xml</webFragmentFile>
               <webFragmentFile>${project.build.directory}/journal-jsp-servlet-definitions.xml</webFragmentFile>
               <packageName>org.apache.hadoop.hdfs.server.journalservice</packageName>
               <packageName>org.apache.hadoop.hdfs.server.journalservice</packageName>
               <sources>
               <sources>
@@ -266,7 +266,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
             </goals>
             </goals>
             <configuration>
             <configuration>
               <compile>false</compile>
               <compile>false</compile>
-              <workingDirectory>${project.build.directory}/generated-src/main/jsp</workingDirectory>
+              <workingDirectory>${project.build.directory}/generated-sources/java</workingDirectory>
               <webFragmentFile>${project.build.directory}/datanode-jsp-servlet-definitions.xml</webFragmentFile>
               <webFragmentFile>${project.build.directory}/datanode-jsp-servlet-definitions.xml</webFragmentFile>
               <packageName>org.apache.hadoop.hdfs.server.datanode</packageName>
               <packageName>org.apache.hadoop.hdfs.server.datanode</packageName>
               <sources>
               <sources>
@@ -301,7 +301,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
         <artifactId>build-helper-maven-plugin</artifactId>
         <artifactId>build-helper-maven-plugin</artifactId>
         <executions>
         <executions>
           <execution>
           <execution>
-            <id>add-source</id>
+            <id>add-jsp-generated-sources-directory</id>
             <phase>generate-sources</phase>
             <phase>generate-sources</phase>
             <goals>
             <goals>
               <goal>add-source</goal>
               <goal>add-source</goal>
@@ -309,7 +309,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
             <configuration>
             <configuration>
               <sources>
               <sources>
                 <source>${project.build.directory}/generated-sources/java</source>
                 <source>${project.build.directory}/generated-sources/java</source>
-                <source>${project.build.directory}/generated-src/main/jsp</source>
               </sources>
               </sources>
             </configuration>
             </configuration>
           </execution>
           </execution>
@@ -323,14 +322,14 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
         </configuration>
         </configuration>
         <executions>
         <executions>
           <execution>
           <execution>
-            <id>create-protobuf-generated-sources-directory</id>
+            <id>create-jsp-generated-sources-directory</id>
             <phase>initialize</phase>
             <phase>initialize</phase>
             <goals>
             <goals>
               <goal>run</goal>
               <goal>run</goal>
             </goals>
             </goals>
             <configuration>
             <configuration>
               <target>
               <target>
-                <mkdir dir="target/generated-sources/java" />
+                <mkdir dir="${project.build.directory}/generated-sources/java" />
               </target>
               </target>
             </configuration>
             </configuration>
           </execution>
           </execution>
@@ -408,80 +407,96 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
         </executions>
         </executions>
       </plugin>
       </plugin>
       <plugin>
       <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>exec-maven-plugin</artifactId>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-maven-plugins</artifactId>
         <executions>
         <executions>
           <execution>
           <execution>
-            <id>compile-proto</id>
+            <id>compile-protoc</id>
             <phase>generate-sources</phase>
             <phase>generate-sources</phase>
             <goals>
             <goals>
-              <goal>exec</goal>
+              <goal>protoc</goal>
             </goals>
             </goals>
             <configuration>
             <configuration>
-              <executable>protoc</executable>
-              <arguments>
-                <argument>-I../../hadoop-common-project/hadoop-common/src/main/proto/</argument>
-                <argument>-Isrc/main/proto/</argument>
-                <argument>--java_out=target/generated-sources/java</argument>
-                <argument>src/main/proto/hdfs.proto</argument>
-                <argument>src/main/proto/GetUserMappingsProtocol.proto</argument>
-                <argument>src/main/proto/HAZKInfo.proto</argument>
-                <argument>src/main/proto/InterDatanodeProtocol.proto</argument>
-                <argument>src/main/proto/JournalProtocol.proto</argument>
-                <argument>src/main/proto/RefreshAuthorizationPolicyProtocol.proto</argument>
-                <argument>src/main/proto/RefreshUserMappingsProtocol.proto</argument>
-                <argument>src/main/proto/datatransfer.proto</argument>
-              </arguments>
+              <imports>
+                <param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
+                <param>${basedir}/src/main/proto</param>
+              </imports>
+              <source>
+                <directory>${basedir}/src/main/proto</directory>
+                <includes>
+                  <include>GetUserMappingsProtocol.proto</include>
+                  <include>HAZKInfo.proto</include>
+                  <include>InterDatanodeProtocol.proto</include>
+                  <include>JournalProtocol.proto</include>
+                  <include>RefreshAuthorizationPolicyProtocol.proto</include>
+                  <include>RefreshUserMappingsProtocol.proto</include>
+                  <include>datatransfer.proto</include>
+                  <include>hdfs.proto</include>
+                </includes>
+              </source>
+              <output>${project.build.directory}/generated-sources/java</output>
             </configuration>
             </configuration>
           </execution>
           </execution>
           <execution>
           <execution>
-            <id>compile-proto-datanode</id>
+            <id>compile-protoc-datanode</id>
             <phase>generate-sources</phase>
             <phase>generate-sources</phase>
             <goals>
             <goals>
-              <goal>exec</goal>
+              <goal>protoc</goal>
             </goals>
             </goals>
             <configuration>
             <configuration>
-              <executable>protoc</executable>
-              <arguments>
-                <argument>-I../../hadoop-common-project/hadoop-common/src/main/proto/</argument>
-                <argument>-Isrc/main/proto/</argument>
-                <argument>--java_out=target/generated-sources/java</argument>
-                <argument>src/main/proto/ClientDatanodeProtocol.proto</argument>
-                <argument>src/main/proto/DatanodeProtocol.proto</argument>
-              </arguments>
+              <imports>
+                <param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
+                <param>${basedir}/src/main/proto</param>
+              </imports>
+              <source>
+                <directory>${basedir}/src/main/proto</directory>
+                <includes>
+                  <include>ClientDatanodeProtocol.proto</include>
+                  <include>DatanodeProtocol.proto</include>
+                </includes>
+              </source>
+              <output>${project.build.directory}/generated-sources/java</output>
             </configuration>
             </configuration>
           </execution>
           </execution>
           <execution>
           <execution>
-            <id>compile-proto-namenode</id>
+            <id>compile-protoc-namenode</id>
             <phase>generate-sources</phase>
             <phase>generate-sources</phase>
             <goals>
             <goals>
-              <goal>exec</goal>
+              <goal>protoc</goal>
             </goals>
             </goals>
             <configuration>
             <configuration>
-              <executable>protoc</executable>
-              <arguments>
-                <argument>-I../../hadoop-common-project/hadoop-common/src/main/proto/</argument>
-                <argument>-Isrc/main/proto/</argument>
-                <argument>--java_out=target/generated-sources/java</argument>
-                <argument>src/main/proto/ClientNamenodeProtocol.proto</argument>
-                <argument>src/main/proto/NamenodeProtocol.proto</argument>
-              </arguments>
+              <imports>
+                <param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
+                <param>${basedir}/src/main/proto</param>
+              </imports>
+              <source>
+                <directory>${basedir}/src/main/proto</directory>
+                <includes>
+                  <include>ClientNamenodeProtocol.proto</include>
+                  <include>NamenodeProtocol.proto</include>
+                </includes>
+              </source>
+              <output>${project.build.directory}/generated-sources/java</output>
             </configuration>
             </configuration>
           </execution>
           </execution>
           <execution>
           <execution>
-            <id>compile-proto-qjournal</id>
+            <id>compile-protoc-qjournal</id>
             <phase>generate-sources</phase>
             <phase>generate-sources</phase>
             <goals>
             <goals>
-              <goal>exec</goal>
+              <goal>protoc</goal>
             </goals>
             </goals>
             <configuration>
             <configuration>
-              <executable>protoc</executable>
-              <arguments>
-                <argument>-I../../hadoop-common-project/hadoop-common/src/main/proto/</argument>
-                <argument>-Isrc/main/proto/</argument>
-                <argument>--java_out=target/generated-sources/java</argument>
-                <argument>src/main/proto/QJournalProtocol.proto</argument>
-              </arguments>
+              <imports>
+                <param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
+                <param>${basedir}/src/main/proto</param>
+              </imports>
+              <source>
+                <directory>${basedir}/src/main/proto</directory>
+                <includes>
+                  <include>QJournalProtocol.proto</include>
+                </includes>
+              </source>
+              <output>${project.build.directory}/generated-sources/java</output>
             </configuration>
             </configuration>
           </execution>
           </execution>
         </executions>
         </executions>

+ 16 - 51
hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml

@@ -92,63 +92,28 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
   <build>
   <build>
     <plugins>
     <plugins>
       <plugin>
       <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>add-source</id>
-            <phase>generate-sources</phase>
-            <goals>
-              <goal>add-source</goal>
-            </goals>
-            <configuration>
-              <sources>
-                <source>${project.build.directory}/generated-sources/java</source>
-              </sources>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.apache.maven.plugins</groupId>
-        <artifactId>maven-antrun-plugin</artifactId>
-        <configuration>
-          <skipTests>false</skipTests>
-        </configuration>
-        <executions>
-          <execution>
-            <id>create-protobuf-generated-sources-directory</id>
-            <phase>initialize</phase>
-            <goals>
-              <goal>run</goal>
-            </goals>
-            <configuration>
-              <target>
-                <mkdir dir="target/generated-sources/java" />
-              </target>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>exec-maven-plugin</artifactId>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-maven-plugins</artifactId>
         <executions>
         <executions>
           <execution>
           <execution>
-            <id>compile-proto</id>
+            <id>compile-protoc</id>
             <phase>generate-sources</phase>
             <phase>generate-sources</phase>
             <goals>
             <goals>
-              <goal>exec</goal>
+              <goal>protoc</goal>
             </goals>
             </goals>
             <configuration>
             <configuration>
-              <executable>protoc</executable>
-              <arguments>
-                <argument>-I../../../../../hadoop-common-project/hadoop-common/src/main/proto/</argument>
-                <argument>-Isrc/main/proto/</argument>
-                <argument>-I../../main/proto</argument>
-                <argument>--java_out=target/generated-sources/java</argument>
-                <argument>src/main/proto/bkjournal.proto</argument>
-              </arguments>
+              <imports>
+                <param>${basedir}/../../../../../hadoop-common-project/hadoop-common/src/main/proto</param>
+                <param>${basedir}/../../../../../hadoop-hdfs-project/hadoop-hdfs/src/main/proto</param>
+                <param>${basedir}/src/main/proto</param>
+              </imports>
+              <source>
+                <directory>${basedir}/src/main/proto</directory>
+                <includes>
+                  <include>bkjournal.proto</include>
+                </includes>
+              </source>
+              <output>${project.build.directory}/generated-sources/java</output>
             </configuration>
             </configuration>
           </execution>
           </execution>
         </executions>
         </executions>

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -623,10 +623,10 @@ public class DFSClient implements java.io.Closeable {
       } catch (IOException e) {
       } catch (IOException e) {
         // Abort if the lease has already expired. 
         // Abort if the lease has already expired. 
         final long elapsed = Time.now() - getLastLeaseRenewal();
         final long elapsed = Time.now() - getLastLeaseRenewal();
-        if (elapsed > HdfsConstants.LEASE_SOFTLIMIT_PERIOD) {
+        if (elapsed > HdfsConstants.LEASE_HARDLIMIT_PERIOD) {
           LOG.warn("Failed to renew lease for " + clientName + " for "
           LOG.warn("Failed to renew lease for " + clientName + " for "
               + (elapsed/1000) + " seconds (>= soft-limit ="
               + (elapsed/1000) + " seconds (>= soft-limit ="
-              + (HdfsConstants.LEASE_SOFTLIMIT_PERIOD/1000) + " seconds.) "
+              + (HdfsConstants.LEASE_HARDLIMIT_PERIOD/1000) + " seconds.) "
               + "Closing all files being written ...", e);
               + "Closing all files being written ...", e);
           closeAllFilesBeingWritten(true);
           closeAllFilesBeingWritten(true);
         } else {
         } else {

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -235,6 +235,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_NAMENODE_SHARED_EDITS_DIR_KEY = "dfs.namenode.shared.edits.dir";
   public static final String  DFS_NAMENODE_SHARED_EDITS_DIR_KEY = "dfs.namenode.shared.edits.dir";
   public static final String  DFS_NAMENODE_EDITS_PLUGIN_PREFIX = "dfs.namenode.edits.journal-plugin";
   public static final String  DFS_NAMENODE_EDITS_PLUGIN_PREFIX = "dfs.namenode.edits.journal-plugin";
   public static final String  DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY = "dfs.namenode.edits.dir.required";
   public static final String  DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY = "dfs.namenode.edits.dir.required";
+  public static final String  DFS_NAMENODE_EDITS_DIR_DEFAULT = "file:///tmp/hadoop/dfs/name";
   public static final String  DFS_CLIENT_READ_PREFETCH_SIZE_KEY = "dfs.client.read.prefetch.size"; 
   public static final String  DFS_CLIENT_READ_PREFETCH_SIZE_KEY = "dfs.client.read.prefetch.size"; 
   public static final String  DFS_CLIENT_RETRY_WINDOW_BASE= "dfs.client.retry.window.base";
   public static final String  DFS_CLIENT_RETRY_WINDOW_BASE= "dfs.client.retry.window.base";
   public static final String  DFS_METRICS_SESSION_ID_KEY = "dfs.metrics.session-id";
   public static final String  DFS_METRICS_SESSION_ID_KEY = "dfs.metrics.session-id";

+ 56 - 32
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -127,6 +127,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.ha.ServiceFailedException;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -431,51 +432,73 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
   }
   }
   
   
   /**
   /**
-
-  /**
-   * Instantiates an FSNamesystem loaded from the image and edits
-   * directories specified in the passed Configuration.
-   * 
-   * @param conf the Configuration which specifies the storage directories
-   *             from which to load
-   * @return an FSNamesystem which contains the loaded namespace
-   * @throws IOException if loading fails
+   * Check the supplied configuration for correctness.
+   * @param conf Supplies the configuration to validate.
+   * @throws IOException if the configuration could not be queried.
+   * @throws IllegalArgumentException if the configuration is invalid.
    */
    */
-  public static FSNamesystem loadFromDisk(Configuration conf)
+  private static void checkConfiguration(Configuration conf)
       throws IOException {
       throws IOException {
-    Collection<URI> namespaceDirs = FSNamesystem.getNamespaceDirs(conf);
-    List<URI> namespaceEditsDirs = 
-      FSNamesystem.getNamespaceEditsDirs(conf);
-    return loadFromDisk(conf, namespaceDirs, namespaceEditsDirs);
-  }
 
 
-  /**
-   * Instantiates an FSNamesystem loaded from the image and edits
-   * directories passed.
-   * 
-   * @param conf the Configuration which specifies the storage directories
-   *             from which to load
-   * @param namespaceDirs directories to load the fsimages
-   * @param namespaceEditsDirs directories to load the edits from
-   * @return an FSNamesystem which contains the loaded namespace
-   * @throws IOException if loading fails
-   */
-  public static FSNamesystem loadFromDisk(Configuration conf,
-      Collection<URI> namespaceDirs, List<URI> namespaceEditsDirs)
-      throws IOException {
+    final Collection<URI> namespaceDirs =
+        FSNamesystem.getNamespaceDirs(conf);
+    final Collection<URI> editsDirs =
+        FSNamesystem.getNamespaceEditsDirs(conf);
+    final Collection<URI> requiredEditsDirs =
+        FSNamesystem.getRequiredNamespaceEditsDirs(conf);
+    final Collection<URI> sharedEditsDirs =
+        FSNamesystem.getSharedEditsDirs(conf);
+
+    for (URI u : requiredEditsDirs) {
+      if (u.toString().compareTo(
+              DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_DEFAULT) == 0) {
+        continue;
+      }
+
+      // Each required directory must also be in editsDirs or in
+      // sharedEditsDirs.
+      if (!editsDirs.contains(u) &&
+          !sharedEditsDirs.contains(u)) {
+        throw new IllegalArgumentException(
+            "Required edits directory " + u.toString() + " not present in " +
+            DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY + ". " +
+            DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY + "=" +
+            editsDirs.toString() + "; " +
+            DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY + "=" +
+            requiredEditsDirs.toString() + ". " +
+            DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY + "=" +
+            sharedEditsDirs.toString() + ".");
+      }
+    }
 
 
     if (namespaceDirs.size() == 1) {
     if (namespaceDirs.size() == 1) {
       LOG.warn("Only one image storage directory ("
       LOG.warn("Only one image storage directory ("
           + DFS_NAMENODE_NAME_DIR_KEY + ") configured. Beware of dataloss"
           + DFS_NAMENODE_NAME_DIR_KEY + ") configured. Beware of dataloss"
           + " due to lack of redundant storage directories!");
           + " due to lack of redundant storage directories!");
     }
     }
-    if (namespaceEditsDirs.size() == 1) {
+    if (editsDirs.size() == 1) {
       LOG.warn("Only one namespace edits storage directory ("
       LOG.warn("Only one namespace edits storage directory ("
           + DFS_NAMENODE_EDITS_DIR_KEY + ") configured. Beware of dataloss"
           + DFS_NAMENODE_EDITS_DIR_KEY + ") configured. Beware of dataloss"
           + " due to lack of redundant storage directories!");
           + " due to lack of redundant storage directories!");
     }
     }
+  }
+
+  /**
+   * Instantiates an FSNamesystem loaded from the image and edits
+   * directories specified in the passed Configuration.
+   *
+   * @param conf the Configuration which specifies the storage directories
+   *             from which to load
+   * @return an FSNamesystem which contains the loaded namespace
+   * @throws IOException if loading fails
+   */
+  public static FSNamesystem loadFromDisk(Configuration conf)
+      throws IOException {
 
 
-    FSImage fsImage = new FSImage(conf, namespaceDirs, namespaceEditsDirs);
+    checkConfiguration(conf);
+    FSImage fsImage = new FSImage(conf,
+        FSNamesystem.getNamespaceDirs(conf),
+        FSNamesystem.getNamespaceEditsDirs(conf));
     FSNamesystem namesystem = new FSNamesystem(conf, fsImage);
     FSNamesystem namesystem = new FSNamesystem(conf, fsImage);
     StartupOption startOpt = NameNode.getStartupOption(conf);
     StartupOption startOpt = NameNode.getStartupOption(conf);
     if (startOpt == StartupOption.RECOVER) {
     if (startOpt == StartupOption.RECOVER) {
@@ -923,7 +946,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
           "\n\t\t- use Backup Node as a persistent and up-to-date storage " +
           "\n\t\t- use Backup Node as a persistent and up-to-date storage " +
           "of the file system meta-data.");
           "of the file system meta-data.");
     } else if (dirNames.isEmpty()) {
     } else if (dirNames.isEmpty()) {
-      dirNames = Collections.singletonList("file:///tmp/hadoop/dfs/name");
+      dirNames = Collections.singletonList(
+          DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_DEFAULT);
     }
     }
     return Util.stringCollectionAsURIs(dirNames);
     return Util.stringCollectionAsURIs(dirNames);
   }
   }

+ 23 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -78,6 +78,7 @@ import org.apache.hadoop.util.ServicePlugin;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import com.google.common.collect.Lists;
 
 
@@ -780,6 +781,26 @@ public class NameNode {
     return initializeSharedEdits(conf, force, false);
     return initializeSharedEdits(conf, force, false);
   }
   }
 
 
+  /**
+   * Clone the supplied configuration but remove the shared edits dirs.
+   *
+   * @param conf Supplies the original configuration.
+   * @return Cloned configuration without the shared edit dirs.
+   * @throws IOException on failure to generate the configuration.
+   */
+  private static Configuration getConfigurationWithoutSharedEdits(
+      Configuration conf)
+      throws IOException {
+    List<URI> editsDirs = FSNamesystem.getNamespaceEditsDirs(conf, false);
+    String editsDirsString = Joiner.on(",").join(editsDirs);
+
+    Configuration confWithoutShared = new Configuration(conf);
+    confWithoutShared.unset(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
+    confWithoutShared.setStrings(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
+        editsDirsString);
+    return confWithoutShared;
+  }
+
   /**
   /**
    * Format a new shared edits dir and copy in enough edit log segments so that
    * Format a new shared edits dir and copy in enough edit log segments so that
    * the standby NN can start up.
    * the standby NN can start up.
@@ -809,11 +830,8 @@ public class NameNode {
 
 
     NNStorage existingStorage = null;
     NNStorage existingStorage = null;
     try {
     try {
-      Configuration confWithoutShared = new Configuration(conf);
-      confWithoutShared.unset(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
-      FSNamesystem fsns = FSNamesystem.loadFromDisk(confWithoutShared,
-          FSNamesystem.getNamespaceDirs(conf),
-          FSNamesystem.getNamespaceEditsDirs(conf, false));
+      FSNamesystem fsns =
+          FSNamesystem.loadFromDisk(getConfigurationWithoutSharedEdits(conf));
       
       
       existingStorage = fsns.getFSImage().getStorage();
       existingStorage = fsns.getFSImage().getStorage();
       NamespaceInfo nsInfo = existingStorage.getNamespaceInfo();
       NamespaceInfo nsInfo = existingStorage.getNamespaceInfo();

+ 20 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLease.java

@@ -41,6 +41,7 @@ import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
@@ -93,9 +94,26 @@ public class TestLease {
 
 
       // We don't need to wait the lease renewer thread to act.
       // We don't need to wait the lease renewer thread to act.
       // call renewLease() manually.
       // call renewLease() manually.
-      // make it look like lease has already expired.
+      // make it look like the soft limit has been exceeded.
       LeaseRenewer originalRenewer = dfs.getLeaseRenewer();
       LeaseRenewer originalRenewer = dfs.getLeaseRenewer();
-      dfs.lastLeaseRenewal = Time.now() - 300000;
+      dfs.lastLeaseRenewal = Time.now()
+      - HdfsConstants.LEASE_SOFTLIMIT_PERIOD - 1000;
+      try {
+        dfs.renewLease();
+      } catch (IOException e) {}
+
+      // Things should continue to work it passes hard limit without
+      // renewing.
+      try {
+        d_out.write(buf, 0, 1024);
+        LOG.info("Write worked beyond the soft limit as expected.");
+      } catch (IOException e) {
+        Assert.fail("Write failed.");
+      }
+
+      // make it look like the hard limit has been exceeded.
+      dfs.lastLeaseRenewal = Time.now()
+      - HdfsConstants.LEASE_HARDLIMIT_PERIOD - 1000;
       dfs.renewLease();
       dfs.renewLease();
 
 
       // this should not work.
       // this should not work.

+ 82 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java

@@ -308,6 +308,88 @@ public class TestNameEditsConfigs {
         new File(storageDir, "current"), NameNodeDirType.IMAGE_AND_EDITS);
         new File(storageDir, "current"), NameNodeDirType.IMAGE_AND_EDITS);
   }
   }
 
 
+  /**
+   * Test edits.dir.required configuration options.
+   * 1. Directory present in dfs.namenode.edits.dir.required but not in
+   *    dfs.namenode.edits.dir. Expected to fail.
+   * 2. Directory present in both dfs.namenode.edits.dir.required and
+   *    dfs.namenode.edits.dir. Expected to succeed.
+   * 3. Directory present only in dfs.namenode.edits.dir. Expected to
+   *    succeed.
+   */
+  @Test
+  public void testNameEditsRequiredConfigs() throws IOException {
+    MiniDFSCluster cluster = null;
+    File nameAndEditsDir = new File(base_dir, "name_and_edits");
+    File nameAndEditsDir2 = new File(base_dir, "name_and_edits2");
+
+    // 1
+    // Bad configuration. Add a directory to dfs.namenode.edits.dir.required
+    // without adding it to dfs.namenode.edits.dir.
+    try {
+      Configuration conf = new HdfsConfiguration();
+      conf.set(
+          DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY,
+          nameAndEditsDir2.toURI().toString());
+      conf.set(
+          DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
+          nameAndEditsDir.toURI().toString());
+      cluster = new MiniDFSCluster.Builder(conf)
+          .numDataNodes(NUM_DATA_NODES)
+          .manageNameDfsDirs(false)
+          .build();
+      fail("Successfully started cluster but should not have been able to.");
+    } catch (IllegalArgumentException iae) { // expect to fail
+      LOG.info("EXPECTED: cluster start failed due to bad configuration" + iae);
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+      cluster = null;
+    }
+
+    // 2
+    // Good configuration. Add a directory to both dfs.namenode.edits.dir.required
+    // and dfs.namenode.edits.dir.
+    try {
+      Configuration conf = new HdfsConfiguration();
+      conf.setStrings(
+          DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
+          nameAndEditsDir.toURI().toString(),
+          nameAndEditsDir2.toURI().toString());
+      conf.set(
+          DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY,
+          nameAndEditsDir2.toURI().toString());
+      cluster = new MiniDFSCluster.Builder(conf)
+          .numDataNodes(NUM_DATA_NODES)
+          .manageNameDfsDirs(false)
+          .build();
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+
+    // 3
+    // Good configuration. Adds a directory to dfs.namenode.edits.dir but not to
+    // dfs.namenode.edits.dir.required.
+    try {
+      Configuration conf = new HdfsConfiguration();
+      conf.setStrings(
+          DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
+          nameAndEditsDir.toURI().toString(),
+          nameAndEditsDir2.toURI().toString());
+      cluster = new MiniDFSCluster.Builder(conf)
+          .numDataNodes(NUM_DATA_NODES)
+          .manageNameDfsDirs(false)
+          .build();
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
   /**
   /**
    * Test various configuration options of dfs.namenode.name.dir and dfs.namenode.edits.dir
    * Test various configuration options of dfs.namenode.name.dir and dfs.namenode.edits.dir
    * This test tries to simulate failure scenarios.
    * This test tries to simulate failure scenarios.

+ 13 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -167,6 +167,13 @@ Release 2.0.4-beta - UNRELEASED
     MAPREDUCE-4671. AM does not tell the RM about container requests which are
     MAPREDUCE-4671. AM does not tell the RM about container requests which are
     no longer needed. (Bikas Saha via sseth)
     no longer needed. (Bikas Saha via sseth)
 
 
+    MAPREDUCE-4994. -jt generic command line option does not work. (sandyr via tucu)
+
+    MAPREDUCE-5000. Fixes getCounters when speculating by fixing the selection
+    of the best attempt for a task. (Jason Lowe via sseth)
+
+    MAPREDUCE-4994. Addendum fixing testcases failures. (sandyr via tucu)
+
 Release 2.0.3-alpha - 2013-02-06 
 Release 2.0.3-alpha - 2013-02-06 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -694,6 +701,9 @@ Release 0.23.7 - UNRELEASED
     MAPREDUCE-4905. test org.apache.hadoop.mapred.pipes 
     MAPREDUCE-4905. test org.apache.hadoop.mapred.pipes 
     (Aleksey Gorshkov via bobby)
     (Aleksey Gorshkov via bobby)
 
 
+    MAPREDUCE-4989. JSONify DataTables input data for Attempts page (Ravi
+    Prakash via jlowe)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     MAPREDUCE-4946. Fix a performance problem for large jobs by reducing the
     MAPREDUCE-4946. Fix a performance problem for large jobs by reducing the
@@ -707,6 +717,9 @@ Release 0.23.7 - UNRELEASED
     MAPREDUCE-4458. Warn if java.library.path is used for AM or Task
     MAPREDUCE-4458. Warn if java.library.path is used for AM or Task
     (Robert Parker via jeagles)
     (Robert Parker via jeagles)
 
 
+    MAPREDUCE-4992. AM hangs in RecoveryService when recovering tasks with
+    speculative attempts (Robert Parker via jlowe)
+
 Release 0.23.6 - UNRELEASED
 Release 0.23.6 - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 4 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java

@@ -539,6 +539,10 @@ public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
   //select the nextAttemptNumber with best progress
   //select the nextAttemptNumber with best progress
   // always called inside the Read Lock
   // always called inside the Read Lock
   private TaskAttempt selectBestAttempt() {
   private TaskAttempt selectBestAttempt() {
+    if (successfulAttempt != null) {
+      return attempts.get(successfulAttempt);
+    }
+
     float progress = 0f;
     float progress = 0f;
     TaskAttempt result = null;
     TaskAttempt result = null;
     for (TaskAttempt at : attempts.values()) {
     for (TaskAttempt at : attempts.values()) {

+ 14 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java

@@ -21,9 +21,12 @@ package org.apache.hadoop.mapreduce.v2.app.recover;
 import java.io.IOException;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashMap;
+import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Set;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
@@ -35,6 +38,7 @@ import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.MRJobConfig;
 import org.apache.hadoop.mapreduce.OutputCommitter;
 import org.apache.hadoop.mapreduce.OutputCommitter;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
 import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.TypeConverter;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser;
@@ -108,7 +112,7 @@ public class RecoveryService extends CompositeService implements Recovery {
   private JobInfo jobInfo = null;
   private JobInfo jobInfo = null;
   private final Map<TaskId, TaskInfo> completedTasks =
   private final Map<TaskId, TaskInfo> completedTasks =
     new HashMap<TaskId, TaskInfo>();
     new HashMap<TaskId, TaskInfo>();
-
+  
   private final List<TaskEvent> pendingTaskScheduleEvents =
   private final List<TaskEvent> pendingTaskScheduleEvents =
     new ArrayList<TaskEvent>();
     new ArrayList<TaskEvent>();
 
 
@@ -193,6 +197,14 @@ public class RecoveryService extends CompositeService implements Recovery {
         .getAllTasks();
         .getAllTasks();
     for (TaskInfo taskInfo : taskInfos.values()) {
     for (TaskInfo taskInfo : taskInfos.values()) {
       if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) {
       if (TaskState.SUCCEEDED.toString().equals(taskInfo.getTaskStatus())) {
+        Iterator<Entry<TaskAttemptID, TaskAttemptInfo>> taskAttemptIterator = 
+            taskInfo.getAllTaskAttempts().entrySet().iterator();
+        while (taskAttemptIterator.hasNext()) {
+          Map.Entry<TaskAttemptID, TaskAttemptInfo> currentEntry = taskAttemptIterator.next();
+          if (!jobInfo.getAllCompletedTaskAttempts().containsKey(currentEntry.getKey())) {
+            taskAttemptIterator.remove();
+          }
+        }
         completedTasks
         completedTasks
             .put(TypeConverter.toYarn(taskInfo.getTaskId()), taskInfo);
             .put(TypeConverter.toYarn(taskInfo.getTaskId()), taskInfo);
         LOG.info("Read from history task "
         LOG.info("Read from history task "
@@ -215,6 +227,7 @@ public class RecoveryService extends CompositeService implements Recovery {
         JobHistoryUtils.getConfiguredHistoryStagingDirPrefix(conf, jobId);
         JobHistoryUtils.getConfiguredHistoryStagingDirPrefix(conf, jobId);
     Path histDirPath =
     Path histDirPath =
         FileContext.getFileContext(conf).makeQualified(new Path(jobhistoryDir));
         FileContext.getFileContext(conf).makeQualified(new Path(jobhistoryDir));
+    LOG.info("Trying file " + histDirPath.toString());
     FileContext fc = FileContext.getFileContext(histDirPath.toUri(), conf);
     FileContext fc = FileContext.getFileContext(histDirPath.toUri(), conf);
     // read the previous history file
     // read the previous history file
     historyFile =
     historyFile =

+ 54 - 49
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java

@@ -27,18 +27,11 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
 
 
 import java.util.Collection;
 import java.util.Collection;
 
 
+import org.apache.commons.lang.StringEscapeUtils;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo;
 import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.yarn.util.Times;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.SubView;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TD;
-import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
 
 
 import com.google.inject.Inject;
 import com.google.inject.Inject;
@@ -60,7 +53,7 @@ public class TaskPage extends AppView {
           h2($(TITLE));
           h2($(TITLE));
         return;
         return;
       }
       }
-      TBODY<TABLE<Hamlet>> tbody = html.
+      html.
       table("#attempts").
       table("#attempts").
         thead().
         thead().
           tr().
           tr().
@@ -72,49 +65,46 @@ public class TaskPage extends AppView {
             th(".tsh", "Started").
             th(".tsh", "Started").
             th(".tsh", "Finished").
             th(".tsh", "Finished").
             th(".tsh", "Elapsed").
             th(".tsh", "Elapsed").
-            th(".note", "Note")._()._().
-        tbody();
+            th(".note", "Note")._()._();
+      // Write all the data into a JavaScript array of arrays for JQuery
+      // DataTables to display
+      StringBuilder attemptsTableData = new StringBuilder("[\n");
+
       for (TaskAttempt attempt : getTaskAttempts()) {
       for (TaskAttempt attempt : getTaskAttempts()) {
         TaskAttemptInfo ta = new TaskAttemptInfo(attempt, true);
         TaskAttemptInfo ta = new TaskAttemptInfo(attempt, true);
-        String taid = ta.getId();
         String progress = percent(ta.getProgress() / 100);
         String progress = percent(ta.getProgress() / 100);
-        ContainerId containerId = ta.getAssignedContainerId();
 
 
         String nodeHttpAddr = ta.getNode();
         String nodeHttpAddr = ta.getNode();
-        long startTime = ta.getStartTime();
-        long finishTime = ta.getFinishTime();
-        long elapsed = ta.getElapsedTime();
         String diag = ta.getNote() == null ? "" : ta.getNote();
         String diag = ta.getNote() == null ? "" : ta.getNote();
-        TR<TBODY<TABLE<Hamlet>>> row = tbody.tr();
-        TD<TR<TBODY<TABLE<Hamlet>>>> nodeTd = row.
-          td(".id", taid).
-          td(".progress", progress).
-          td(".state", ta.getState()).td();
-        if (nodeHttpAddr == null) {
-          nodeTd._("N/A");
-        } else {
-          nodeTd.
-            a(".nodelink", url(HttpConfig.getSchemePrefix(),
-                               nodeHttpAddr), nodeHttpAddr);
-        }
-        nodeTd._();
-        if (containerId != null) {
-          String containerIdStr = ta.getAssignedContainerIdStr();
-          row.td().
-              a(".logslink", url(HttpConfig.getSchemePrefix(),
-              nodeHttpAddr, "node", "containerlogs",
-              containerIdStr, app.getJob().getUserName()), "logs")._();
-        } else {
-          row.td()._("N/A")._();
-        }
-
-        row.
-          td(".ts", Times.format(startTime)).
-          td(".ts", Times.format(finishTime)).
-          td(".dt", StringUtils.formatTime(elapsed)).
-          td(".note", diag)._();
+        attemptsTableData.append("[\"")
+        .append(ta.getId()).append("\",\"")
+        .append(progress).append("\",\"")
+        .append(ta.getState().toString()).append("\",\"")
+
+        .append(nodeHttpAddr == null ? "N/A" :
+          "<a class='nodelink' href='" + HttpConfig.getSchemePrefix() + nodeHttpAddr + "'>"
+          + nodeHttpAddr + "</a>")
+        .append("\",\"")
+
+        .append(ta.getAssignedContainerId() == null ? "N/A" :
+          "<a class='logslink' href='" + url(HttpConfig.getSchemePrefix(), nodeHttpAddr, "node"
+            , "containerlogs", ta.getAssignedContainerIdStr(), app.getJob()
+            .getUserName()) + "'>logs</a>")
+          .append("\",\"")
+
+        .append(ta.getStartTime()).append("\",\"")
+        .append(ta.getFinishTime()).append("\",\"")
+        .append(ta.getElapsedTime()).append("\",\"")
+        .append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(
+          diag))).append("\"],\n");
+      }
+      //Remove the last comma and close off the array of arrays
+      if(attemptsTableData.charAt(attemptsTableData.length() - 2) == ',') {
+        attemptsTableData.delete(attemptsTableData.length()-2, attemptsTableData.length()-1);
       }
       }
-      tbody._()._();
+      attemptsTableData.append("]");
+      html.script().$type("text/javascript").
+      _("var attemptsTableData=" + attemptsTableData)._();
     }
     }
 
 
     protected boolean isValidRequest() {
     protected boolean isValidRequest() {
@@ -140,9 +130,24 @@ public class TaskPage extends AppView {
   }
   }
 
 
   private String attemptsTableInit() {
   private String attemptsTableInit() {
-    return tableInit().
-        // Sort by id upon page load
-        append(", aaSorting: [[0, 'asc']]").
-        append("}").toString();
+    return tableInit()
+    .append(", 'aaData': attemptsTableData")
+    .append(", bDeferRender: true")
+    .append(", bProcessing: true")
+    .append("\n,aoColumnDefs:[\n")
+
+    //logs column should not filterable (it includes container ID which may pollute searches)
+    .append("\n{'aTargets': [ 4 ]")
+    .append(", 'bSearchable': false }")
+
+    .append("\n, {'sType':'numeric', 'aTargets': [ 5, 6")
+    .append(" ], 'mRender': renderHadoopDate }")
+
+    .append("\n, {'sType':'numeric', 'aTargets': [ 7")
+    .append(" ], 'mRender': renderHadoopElapsedTime }]")
+
+    // Sort by id upon page load
+    .append("\n, aaSorting: [[0, 'asc']]")
+    .append("}").toString();
   }
   }
 }
 }

+ 167 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java

@@ -50,11 +50,15 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
 import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
 import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher;
 import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher;
 import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent;
 import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.junit.Test;
 import org.junit.Test;
 
 
@@ -734,12 +738,173 @@ public class TestRecovery {
     app.verifyCompleted();
     app.verifyCompleted();
     validateOutput();
     validateOutput();
   }
   }
-  
+
+  /**
+   * AM with 2 maps and 1 reduce. For 1st map, one attempt fails, one attempt
+   * completely disappears because of failed launch, one attempt gets killed and
+   * one attempt succeeds. AM crashes after the first tasks finishes and
+   * recovers completely and succeeds in the second generation.
+   * 
+   * @throws Exception
+   */
+  @Test
+  public void testSpeculative() throws Exception {
+
+    int runCount = 0;
+    long am1StartTimeEst = System.currentTimeMillis();
+    MRApp app = new MRAppWithHistory(2, 1, false, this.getClass().getName(), true, ++runCount);
+    Configuration conf = new Configuration();
+    conf.setBoolean("mapred.mapper.new-api", true);
+    conf.setBoolean("mapred.reducer.new-api", true);
+    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
+    conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
+    Job job = app.submit(conf);
+    app.waitForState(job, JobState.RUNNING);
+    long jobStartTime = job.getReport().getStartTime();
+    //all maps would be running
+    Assert.assertEquals("No of tasks not correct",
+       3, job.getTasks().size());
+
+    Iterator<Task> it = job.getTasks().values().iterator();
+    Task mapTask1 = it.next();
+    Task mapTask2 = it.next();
+    Task reduceTask = it.next();
+
+    // all maps must be running
+    app.waitForState(mapTask1, TaskState.RUNNING);
+    app.waitForState(mapTask2, TaskState.RUNNING);
+
+    // Launch a Speculative Task for the first Task
+    app.getContext().getEventHandler().handle(
+        new TaskEvent(mapTask1.getID(), TaskEventType.T_ADD_SPEC_ATTEMPT));
+    int timeOut = 0;
+    while (mapTask1.getAttempts().size() != 2 && timeOut++ < 10) {
+      Thread.sleep(1000);
+      LOG.info("Waiting for next attempt to start");
+    }
+    Iterator<TaskAttempt> t1it = mapTask1.getAttempts().values().iterator();
+    TaskAttempt task1Attempt1 = t1it.next();
+    TaskAttempt task1Attempt2 = t1it.next();
+    TaskAttempt task2Attempt = mapTask2.getAttempts().values().iterator().next();
+
+    ContainerId t1a2contId = task1Attempt2.getAssignedContainerID();
+
+    LOG.info(t1a2contId.toString());
+    LOG.info(task1Attempt1.getID().toString());
+    LOG.info(task1Attempt2.getID().toString());
+
+    // Launch container for speculative attempt
+    app.getContext().getEventHandler().handle(
+        new TaskAttemptContainerLaunchedEvent(task1Attempt2.getID(), runCount));
+
+    //before sending the TA_DONE, event make sure attempt has come to 
+    //RUNNING state
+    app.waitForState(task1Attempt1, TaskAttemptState.RUNNING);
+    app.waitForState(task1Attempt2, TaskAttemptState.RUNNING);
+    app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
+
+    // reduces must be in NEW state
+    Assert.assertEquals("Reduce Task state not correct",
+        TaskState.RUNNING, reduceTask.getReport().getTaskState());
+
+    //send the done signal to the map 1 attempt 1
+    app.getContext().getEventHandler().handle(
+        new TaskAttemptEvent(
+            task1Attempt1.getID(),
+            TaskAttemptEventType.TA_DONE));
+
+    app.waitForState(task1Attempt1, TaskAttemptState.SUCCEEDED);
+
+    //wait for first map task to complete
+    app.waitForState(mapTask1, TaskState.SUCCEEDED);
+    long task1StartTime = mapTask1.getReport().getStartTime();
+    long task1FinishTime = mapTask1.getReport().getFinishTime();
+
+    //stop the app
+    app.stop();
+
+    //rerun
+    //in rerun the 1st map will be recovered from previous run
+    long am2StartTimeEst = System.currentTimeMillis();
+    app = new MRAppWithHistory(2, 1, false, this.getClass().getName(), false, ++runCount);
+    conf = new Configuration();
+    conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
+    conf.setBoolean("mapred.mapper.new-api", true);
+    conf.setBoolean("mapred.reducer.new-api", true);
+    conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
+    conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
+    job = app.submit(conf);
+    app.waitForState(job, JobState.RUNNING);
+    //all maps would be running
+    Assert.assertEquals("No of tasks not correct",
+       3, job.getTasks().size());
+    it = job.getTasks().values().iterator();
+    mapTask1 = it.next();
+    mapTask2 = it.next();
+    reduceTask = it.next();
+
+    // first map will be recovered, no need to send done
+    app.waitForState(mapTask1, TaskState.SUCCEEDED);
+
+    app.waitForState(mapTask2, TaskState.RUNNING);
+
+    task2Attempt = mapTask2.getAttempts().values().iterator().next();
+    //before sending the TA_DONE, event make sure attempt has come to 
+    //RUNNING state
+    app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
+
+    //send the done signal to the 2nd map task
+    app.getContext().getEventHandler().handle(
+        new TaskAttemptEvent(
+            mapTask2.getAttempts().values().iterator().next().getID(),
+            TaskAttemptEventType.TA_DONE));
+
+    //wait to get it completed
+    app.waitForState(mapTask2, TaskState.SUCCEEDED);
+
+    //wait for reduce to be running before sending done
+    app.waitForState(reduceTask, TaskState.RUNNING);
+
+    //send the done signal to the reduce
+    app.getContext().getEventHandler().handle(
+        new TaskAttemptEvent(
+            reduceTask.getAttempts().values().iterator().next().getID(),
+            TaskAttemptEventType.TA_DONE));
+
+    app.waitForState(job, JobState.SUCCEEDED);
+    app.verifyCompleted();
+    Assert.assertEquals("Job Start time not correct",
+        jobStartTime, job.getReport().getStartTime());
+    Assert.assertEquals("Task Start time not correct",
+        task1StartTime, mapTask1.getReport().getStartTime());
+    Assert.assertEquals("Task Finish time not correct",
+        task1FinishTime, mapTask1.getReport().getFinishTime());
+    Assert.assertEquals(2, job.getAMInfos().size());
+    int attemptNum = 1;
+    // Verify AMInfo
+    for (AMInfo amInfo : job.getAMInfos()) {
+      Assert.assertEquals(attemptNum++, amInfo.getAppAttemptId()
+          .getAttemptId());
+      Assert.assertEquals(amInfo.getAppAttemptId(), amInfo.getContainerId()
+          .getApplicationAttemptId());
+      Assert.assertEquals(MRApp.NM_HOST, amInfo.getNodeManagerHost());
+      Assert.assertEquals(MRApp.NM_PORT, amInfo.getNodeManagerPort());
+      Assert.assertEquals(MRApp.NM_HTTP_PORT, amInfo.getNodeManagerHttpPort());
+    }
+    long am1StartTimeReal = job.getAMInfos().get(0).getStartTime();
+    long am2StartTimeReal = job.getAMInfos().get(1).getStartTime();
+    Assert.assertTrue(am1StartTimeReal >= am1StartTimeEst
+        && am1StartTimeReal <= am2StartTimeEst);
+    Assert.assertTrue(am2StartTimeReal >= am2StartTimeEst
+        && am2StartTimeReal <= System.currentTimeMillis());
+
+  }
+
   private void writeBadOutput(TaskAttempt attempt, Configuration conf)
   private void writeBadOutput(TaskAttempt attempt, Configuration conf)
   throws Exception {
   throws Exception {
   TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, 
   TaskAttemptContext tContext = new TaskAttemptContextImpl(conf, 
       TypeConverter.fromYarn(attempt.getID()));
       TypeConverter.fromYarn(attempt.getID()));
-  
+ 
   TextOutputFormat<?, ?> theOutputFormat = new TextOutputFormat();
   TextOutputFormat<?, ?> theOutputFormat = new TextOutputFormat();
   RecordWriter theRecordWriter = theOutputFormat
   RecordWriter theRecordWriter = theOutputFormat
       .getRecordWriter(tContext);
       .getRecordWriter(tContext);

+ 58 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java

@@ -35,6 +35,9 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.Task;
 import org.apache.hadoop.mapred.Task;
 import org.apache.hadoop.mapred.TaskUmbilicalProtocol;
 import org.apache.hadoop.mapred.TaskUmbilicalProtocol;
+import org.apache.hadoop.mapreduce.Counter;
+import org.apache.hadoop.mapreduce.Counters;
+import org.apache.hadoop.mapreduce.TaskCounter;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
 import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
 import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
 import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
 import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
@@ -52,7 +55,6 @@ import org.apache.hadoop.mapreduce.v2.app.job.TaskStateInternal;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent;
 import org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent;
-import org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl;
 import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
 import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
@@ -143,6 +145,7 @@ public class TestTaskImpl {
     private float progress = 0;
     private float progress = 0;
     private TaskAttemptState state = TaskAttemptState.NEW;
     private TaskAttemptState state = TaskAttemptState.NEW;
     private TaskType taskType;
     private TaskType taskType;
+    private Counters attemptCounters = TaskAttemptImpl.EMPTY_COUNTERS;
 
 
     public MockTaskAttemptImpl(TaskId taskId, int id, EventHandler eventHandler,
     public MockTaskAttemptImpl(TaskId taskId, int id, EventHandler eventHandler,
         TaskAttemptListener taskAttemptListener, Path jobFile, int partition,
         TaskAttemptListener taskAttemptListener, Path jobFile, int partition,
@@ -178,7 +181,15 @@ public class TestTaskImpl {
     public TaskAttemptState getState() {
     public TaskAttemptState getState() {
       return state;
       return state;
     }
     }
-    
+
+    @Override
+    public Counters getCounters() {
+      return attemptCounters;
+    }
+
+    public void setCounters(Counters counters) {
+      attemptCounters = counters;
+    }
   }
   }
   
   
   private class MockTask extends Task {
   private class MockTask extends Task {
@@ -687,4 +698,49 @@ public class TestTaskImpl {
         TaskEventType.T_ATTEMPT_KILLED));
         TaskEventType.T_ATTEMPT_KILLED));
     assertEquals(TaskState.FAILED, mockTask.getState());
     assertEquals(TaskState.FAILED, mockTask.getState());
   }
   }
+
+  @Test
+  public void testCountersWithSpeculation() {
+    mockTask = new MockTaskImpl(jobId, partition, dispatcher.getEventHandler(),
+        remoteJobConfFile, conf, taskAttemptListener, jobToken,
+        credentials, clock,
+        completedTasksFromPreviousRun, startCount,
+        metrics, appContext, TaskType.MAP) {
+          @Override
+          protected int getMaxAttempts() {
+            return 1;
+          }
+    };
+    TaskId taskId = getNewTaskID();
+    scheduleTaskAttempt(taskId);
+    launchTaskAttempt(getLastAttempt().getAttemptId());
+    updateLastAttemptState(TaskAttemptState.RUNNING);
+    MockTaskAttemptImpl baseAttempt = getLastAttempt();
+
+    // add a speculative attempt
+    mockTask.handle(new TaskTAttemptEvent(getLastAttempt().getAttemptId(),
+        TaskEventType.T_ADD_SPEC_ATTEMPT));
+    launchTaskAttempt(getLastAttempt().getAttemptId());
+    updateLastAttemptState(TaskAttemptState.RUNNING);
+    MockTaskAttemptImpl specAttempt = getLastAttempt();
+    assertEquals(2, taskAttempts.size());
+
+    Counters specAttemptCounters = new Counters();
+    Counter cpuCounter = specAttemptCounters.findCounter(
+        TaskCounter.CPU_MILLISECONDS);
+    cpuCounter.setValue(1000);
+    specAttempt.setCounters(specAttemptCounters);
+
+    // have the spec attempt succeed but second attempt at 1.0 progress as well
+    commitTaskAttempt(specAttempt.getAttemptId());
+    specAttempt.setProgress(1.0f);
+    specAttempt.setState(TaskAttemptState.SUCCEEDED);
+    mockTask.handle(new TaskTAttemptEvent(specAttempt.getAttemptId(),
+        TaskEventType.T_ATTEMPT_SUCCEEDED));
+    assertEquals(TaskState.SUCCEEDED, mockTask.getState());
+    baseAttempt.setProgress(1.0f);
+
+    Counters taskCounters = mockTask.getCounters();
+    assertEquals("wrong counters for task", specAttemptCounters, taskCounters);
+  }
 }
 }

+ 18 - 51
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml

@@ -54,63 +54,30 @@
   <build>
   <build>
     <plugins>
     <plugins>
       <plugin>
       <plugin>
-        <artifactId>maven-antrun-plugin</artifactId>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-maven-plugins</artifactId>
         <executions>
         <executions>
           <execution>
           <execution>
-            <id>create-protobuf-generated-sources-directory</id>
-            <phase>initialize</phase>
-            <configuration>
-              <target>
-                <mkdir dir="target/generated-sources/proto" />
-              </target>
-            </configuration>
-            <goals>
-              <goal>run</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>exec-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>generate-sources</id>
-            <phase>generate-sources</phase>
-            <configuration>
-              <executable>protoc</executable>
-              <arguments>
-                <argument>-I../../../hadoop-common-project/hadoop-common/src/main/proto/</argument>
-                <argument>-I../../../hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/</argument>
-                <argument>-Isrc/main/proto/</argument>
-                <argument>--java_out=target/generated-sources/proto</argument>
-                <argument>src/main/proto/mr_protos.proto</argument>
-                <argument>src/main/proto/mr_service_protos.proto</argument>
-                <argument>src/main/proto/MRClientProtocol.proto</argument>
-              </arguments>
-            </configuration>
-            <goals>
-              <goal>exec</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>add-source</id>
+            <id>compile-protoc</id>
             <phase>generate-sources</phase>
             <phase>generate-sources</phase>
             <goals>
             <goals>
-              <goal>add-source</goal>
+              <goal>protoc</goal>
             </goals>
             </goals>
             <configuration>
             <configuration>
-              <sources>
-                <source>target/generated-sources/proto</source>
-              </sources>
+              <imports>
+                <param>${basedir}/../../../hadoop-common-project/hadoop-common/src/main/proto</param>
+                <param>${basedir}/../../../hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto</param>
+                <param>${basedir}/src/main/proto</param>
+              </imports>
+              <source>
+                <directory>${basedir}/src/main/proto</directory>
+                <includes>
+                  <include>mr_protos.proto</include>
+                  <include>mr_service_protos.proto</include>
+                  <include>MRClientProtocol.proto</include>
+                </includes>
+              </source>
+              <output>${project.build.directory}/generated-sources/java</output>
             </configuration>
             </configuration>
           </execution>
           </execution>
         </executions>
         </executions>

+ 3 - 9
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalClientProtocolProvider.java

@@ -26,7 +26,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
 import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
 import org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider;
 import org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider;
-import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
 
 
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 public class LocalClientProtocolProvider extends ClientProtocolProvider {
 public class LocalClientProtocolProvider extends ClientProtocolProvider {
@@ -38,16 +37,11 @@ public class LocalClientProtocolProvider extends ClientProtocolProvider {
     if (!MRConfig.LOCAL_FRAMEWORK_NAME.equals(framework)) {
     if (!MRConfig.LOCAL_FRAMEWORK_NAME.equals(framework)) {
       return null;
       return null;
     }
     }
-    String tracker = conf.get(JTConfig.JT_IPC_ADDRESS, "local");
-    if ("local".equals(tracker)) {
+    if (conf.get("mapreduce.job.maps") == null) {
       conf.setInt("mapreduce.job.maps", 1);
       conf.setInt("mapreduce.job.maps", 1);
-      return new LocalJobRunner(conf);
-    } else {
-
-      throw new IOException("Invalid \"" + JTConfig.JT_IPC_ADDRESS
-          + "\" configuration value for LocalJobRunner : \""
-          + tracker + "\"");
     }
     }
+
+    return new LocalJobRunner(conf);
   }
   }
 
 
   @Override
   @Override

+ 0 - 18
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml

@@ -67,24 +67,6 @@
           </execution>
           </execution>
         </executions>
         </executions>
       </plugin>
       </plugin>
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>add-source</id>
-            <phase>generate-sources</phase>
-            <goals>
-              <goal>add-source</goal>
-            </goals>
-            <configuration>
-              <sources>
-                <source>target/generated-sources/avro</source>
-              </sources>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
       <plugin>
       <plugin>
         <groupId>org.apache.maven.plugins</groupId>
         <groupId>org.apache.maven.plugins</groupId>
         <artifactId>maven-antrun-plugin</artifactId>
         <artifactId>maven-antrun-plugin</artifactId>

+ 8 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java

@@ -246,6 +246,7 @@ public class JobHistoryParser implements HistoryEventHandler {
     attemptInfo.state = StringInterner.weakIntern(event.getState());
     attemptInfo.state = StringInterner.weakIntern(event.getState());
     attemptInfo.counters = event.getCounters();
     attemptInfo.counters = event.getCounters();
     attemptInfo.hostname = StringInterner.weakIntern(event.getHostname());
     attemptInfo.hostname = StringInterner.weakIntern(event.getHostname());
+    info.completedTaskAttemptsMap.put(event.getAttemptId(), attemptInfo);
   }
   }
 
 
   private void handleReduceAttemptFinishedEvent
   private void handleReduceAttemptFinishedEvent
@@ -262,6 +263,7 @@ public class JobHistoryParser implements HistoryEventHandler {
     attemptInfo.hostname = StringInterner.weakIntern(event.getHostname());
     attemptInfo.hostname = StringInterner.weakIntern(event.getHostname());
     attemptInfo.port = event.getPort();
     attemptInfo.port = event.getPort();
     attemptInfo.rackname = StringInterner.weakIntern(event.getRackName());
     attemptInfo.rackname = StringInterner.weakIntern(event.getRackName());
+    info.completedTaskAttemptsMap.put(event.getAttemptId(), attemptInfo);
   }
   }
 
 
   private void handleMapAttemptFinishedEvent(MapAttemptFinishedEvent event) {
   private void handleMapAttemptFinishedEvent(MapAttemptFinishedEvent event) {
@@ -276,6 +278,7 @@ public class JobHistoryParser implements HistoryEventHandler {
     attemptInfo.hostname = StringInterner.weakIntern(event.getHostname());
     attemptInfo.hostname = StringInterner.weakIntern(event.getHostname());
     attemptInfo.port = event.getPort();
     attemptInfo.port = event.getPort();
     attemptInfo.rackname = StringInterner.weakIntern(event.getRackName());
     attemptInfo.rackname = StringInterner.weakIntern(event.getRackName());
+    info.completedTaskAttemptsMap.put(event.getAttemptId(), attemptInfo);
   }
   }
 
 
   private void handleTaskAttemptFailedEvent(
   private void handleTaskAttemptFailedEvent(
@@ -306,6 +309,7 @@ public class JobHistoryParser implements HistoryEventHandler {
         taskInfo.successfulAttemptId = null;
         taskInfo.successfulAttemptId = null;
       }
       }
     }
     }
+    info.completedTaskAttemptsMap.put(event.getTaskAttemptId(), attemptInfo);
   }
   }
 
 
   private void handleTaskAttemptStartedEvent(TaskAttemptStartedEvent event) {
   private void handleTaskAttemptStartedEvent(TaskAttemptStartedEvent event) {
@@ -443,6 +447,7 @@ public class JobHistoryParser implements HistoryEventHandler {
     Map<JobACL, AccessControlList> jobACLs;
     Map<JobACL, AccessControlList> jobACLs;
     
     
     Map<TaskID, TaskInfo> tasksMap;
     Map<TaskID, TaskInfo> tasksMap;
+    Map<TaskAttemptID, TaskAttemptInfo> completedTaskAttemptsMap;
     List<AMInfo> amInfos;
     List<AMInfo> amInfos;
     AMInfo latestAmInfo;
     AMInfo latestAmInfo;
     boolean uberized;
     boolean uberized;
@@ -456,6 +461,7 @@ public class JobHistoryParser implements HistoryEventHandler {
       finishedMaps = finishedReduces = 0;
       finishedMaps = finishedReduces = 0;
       username = jobname = jobConfPath = jobQueueName = "";
       username = jobname = jobConfPath = jobQueueName = "";
       tasksMap = new HashMap<TaskID, TaskInfo>();
       tasksMap = new HashMap<TaskID, TaskInfo>();
+      completedTaskAttemptsMap = new HashMap<TaskAttemptID, TaskAttemptInfo>();
       jobACLs = new HashMap<JobACL, AccessControlList>();
       jobACLs = new HashMap<JobACL, AccessControlList>();
       priority = JobPriority.NORMAL;
       priority = JobPriority.NORMAL;
     }
     }
@@ -530,6 +536,8 @@ public class JobHistoryParser implements HistoryEventHandler {
     public Counters getReduceCounters() { return reduceCounters; }
     public Counters getReduceCounters() { return reduceCounters; }
     /** @return the map of all tasks in this job */
     /** @return the map of all tasks in this job */
     public Map<TaskID, TaskInfo> getAllTasks() { return tasksMap; }
     public Map<TaskID, TaskInfo> getAllTasks() { return tasksMap; }
+    /** @return the map of all completed task attempts in this job */
+    public Map<TaskAttemptID, TaskAttemptInfo> getAllCompletedTaskAttempts() { return completedTaskAttemptsMap; }
     /** @return the priority of this job */
     /** @return the priority of this job */
     public String getPriority() { return priority.toString(); }
     public String getPriority() { return priority.toString(); }
     public Map<JobACL, AccessControlList> getJobACLs() { return jobACLs; }
     public Map<JobACL, AccessControlList> getJobACLs() { return jobACLs; }

+ 61 - 65
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTaskPage.java

@@ -29,6 +29,7 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
 
 
 import java.util.Collection;
 import java.util.Collection;
 
 
+import org.apache.commons.lang.StringEscapeUtils;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
@@ -110,13 +111,17 @@ public class HsTaskPage extends HsView {
               th(".note", "Note");
               th(".note", "Note");
       
       
        TBODY<TABLE<Hamlet>> tbody = headRow._()._().tbody();
        TBODY<TABLE<Hamlet>> tbody = headRow._()._().tbody();
-      for (TaskAttempt ta : getTaskAttempts()) {
+       // Write all the data into a JavaScript array of arrays for JQuery
+       // DataTables to display
+       StringBuilder attemptsTableData = new StringBuilder("[\n");
+
+       for (TaskAttempt ta : getTaskAttempts()) {
         String taid = MRApps.toString(ta.getID());
         String taid = MRApps.toString(ta.getID());
 
 
         String nodeHttpAddr = ta.getNodeHttpAddress();
         String nodeHttpAddr = ta.getNodeHttpAddress();
         String containerIdString = ta.getAssignedContainerID().toString();
         String containerIdString = ta.getAssignedContainerID().toString();
         String nodeIdString = ta.getAssignedContainerMgrAddress();
         String nodeIdString = ta.getAssignedContainerMgrAddress();
-        String nodeRackName = ta.getNodeRackName();        
+        String nodeRackName = ta.getNodeRackName();
 
 
         long attemptStartTime = ta.getLaunchTime();
         long attemptStartTime = ta.getLaunchTime();
         long shuffleFinishTime = -1;
         long shuffleFinishTime = -1;
@@ -138,58 +143,43 @@ public class HsTaskPage extends HsView {
         long attemptElapsed =
         long attemptElapsed =
             Times.elapsed(attemptStartTime, attemptFinishTime, false);
             Times.elapsed(attemptStartTime, attemptFinishTime, false);
         int sortId = ta.getID().getId() + (ta.getID().getTaskId().getId() * 10000);
         int sortId = ta.getID().getId() + (ta.getID().getTaskId().getId() * 10000);
-        
-        TR<TBODY<TABLE<Hamlet>>> row = tbody.tr();
-        TD<TR<TBODY<TABLE<Hamlet>>>> td = row.td();
 
 
-        td.br().$title(String.valueOf(sortId))._(). // sorting
-            _(taid)._().td(ta.getState().toString()).td().a(".nodelink",
-                HttpConfig.getSchemePrefix()+ nodeHttpAddr,
-                nodeRackName + "/" + nodeHttpAddr);
-        td._();
-        row.td().
-          a(".logslink",
-            url("logs", nodeIdString, containerIdString, taid, app.getJob()
-                .getUserName()), "logs")._();
-        
-        row.td().
-          br().$title(String.valueOf(attemptStartTime))._().
-            _(Times.format(attemptStartTime))._();
+        attemptsTableData.append("[\"")
+        .append(sortId + " ").append(taid).append("\",\"")
+        .append(ta.getState().toString()).append("\",\"")
+
+        .append("<a class='nodelink' href='" + HttpConfig.getSchemePrefix() + nodeHttpAddr + "'>")
+        .append(nodeRackName + "/" + nodeHttpAddr + "</a>\",\"")
+
+        .append("<a class='logslink' href='").append(url("logs", nodeIdString
+          , containerIdString, taid, app.getJob().getUserName()))
+          .append("'>logs</a>\",\"")
+
+          .append(attemptStartTime).append("\",\"");
 
 
         if(type == TaskType.REDUCE) {
         if(type == TaskType.REDUCE) {
-          row.td().
-            br().$title(String.valueOf(shuffleFinishTime))._().
-            _(Times.format(shuffleFinishTime))._();
-          row.td().
-          br().$title(String.valueOf(sortFinishTime))._().
-          _(Times.format(sortFinishTime))._();
+          attemptsTableData.append(shuffleFinishTime).append("\",\"")
+          .append(sortFinishTime).append("\",\"");
         }
         }
-        row.
-            td().
-              br().$title(String.valueOf(attemptFinishTime))._().
-              _(Times.format(attemptFinishTime))._();
-        
+        attemptsTableData.append(attemptFinishTime).append("\",\"");
+
         if(type == TaskType.REDUCE) {
         if(type == TaskType.REDUCE) {
-          row.td().
-            br().$title(String.valueOf(elapsedShuffleTime))._().
-          _(formatTime(elapsedShuffleTime))._();
-          row.td().
-          br().$title(String.valueOf(elapsedSortTime))._().
-        _(formatTime(elapsedSortTime))._();
-          row.td().
-            br().$title(String.valueOf(elapsedReduceTime))._().
-          _(formatTime(elapsedReduceTime))._();
+          attemptsTableData.append(elapsedShuffleTime).append("\",\"")
+          .append(elapsedSortTime).append("\",\"")
+          .append(elapsedReduceTime).append("\",\"");
         }
         }
-        
-        row.
-          td().
-            br().$title(String.valueOf(attemptElapsed))._().
-          _(formatTime(attemptElapsed))._().
-          td(".note", Joiner.on('\n').join(ta.getDiagnostics()));
-        row._();
+          attemptsTableData.append(attemptElapsed).append("\",\"")
+          .append(StringEscapeUtils.escapeJavaScript(StringEscapeUtils.escapeHtml(
+           Joiner.on('\n').join(ta.getDiagnostics())))).append("\"],\n");
       }
       }
-      
-      
+       //Remove the last comma and close off the array of arrays
+       if(attemptsTableData.charAt(attemptsTableData.length() - 2) == ',') {
+         attemptsTableData.delete(attemptsTableData.length()-2, attemptsTableData.length()-1);
+       }
+       attemptsTableData.append("]");
+       html.script().$type("text/javascript").
+       _("var attemptsTableData=" + attemptsTableData)._();
+
       TR<TFOOT<TABLE<Hamlet>>> footRow = tbody._().tfoot().tr();
       TR<TFOOT<TABLE<Hamlet>>> footRow = tbody._().tfoot().tr();
       footRow.
       footRow.
           th().input("search_init").$type(InputType.text).
           th().input("search_init").$type(InputType.text).
@@ -237,10 +227,6 @@ public class HsTaskPage extends HsView {
       footRow._()._()._();
       footRow._()._()._();
     }
     }
 
 
-    private String formatTime(long elapsed) {
-      return elapsed < 0 ? "N/A" : StringUtils.formatTime(elapsed);
-    }
-    
     /**
     /**
      * @return true if this is a valid request else false.
      * @return true if this is a valid request else false.
      */
      */
@@ -292,24 +278,34 @@ public class HsTaskPage extends HsView {
       TaskId taskID = MRApps.toTaskID($(TASK_ID));
       TaskId taskID = MRApps.toTaskID($(TASK_ID));
       type = taskID.getTaskType();
       type = taskID.getTaskType();
     }
     }
-    StringBuilder b = tableInit().
-      append(",aoColumnDefs:[");
+    StringBuilder b = tableInit()
+      .append(", 'aaData': attemptsTableData")
+      .append(", bDeferRender: true")
+      .append(", bProcessing: true")
+      .append("\n,aoColumnDefs:[\n")
 
 
-    b.append("{'sType':'title-numeric', 'aTargets': [ 0");
-    if(type == TaskType.REDUCE) {
-      b.append(", 7, 8, 9, 10");
-    } else { //MAP
-      b.append(", 5");
-    }
-    b.append(" ] }]");
+      //logs column should not filterable (it includes container ID which may pollute searches)
+      .append("\n{'aTargets': [ 3 ]")
+      .append(", 'bSearchable': false }")
 
 
-    // Sort by id upon page load
-    b.append(", aaSorting: [[0, 'asc']]");
+      .append("\n, {'sType':'numeric', 'aTargets': [ 0 ]")
+      .append(", 'mRender': parseHadoopAttemptID }")
 
 
-    b.append("}");
-    return b.toString();
+      .append("\n, {'sType':'numeric', 'aTargets': [ 4, 5")
+      //Column numbers are different for maps and reduces
+      .append(type == TaskType.REDUCE ? ", 6, 7" : "")
+      .append(" ], 'mRender': renderHadoopDate }")
+
+      .append("\n, {'sType':'numeric', 'aTargets': [")
+      .append(type == TaskType.REDUCE ? "8, 9, 10, 11" : "6")
+      .append(" ], 'mRender': renderHadoopElapsedTime }]")
+
+      // Sort by id upon page load
+      .append("\n, aaSorting: [[0, 'asc']]")
+      .append("}");
+      return b.toString();
   }
   }
-  
+
   private String attemptsPostTableInit() {
   private String attemptsPostTableInit() {
     return "var asInitVals = new Array();\n" +
     return "var asInitVals = new Array();\n" +
            "$('tfoot input').keyup( function () \n{"+
            "$('tfoot input').keyup( function () \n{"+

+ 1 - 5
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksBlock.java

@@ -140,6 +140,7 @@ public class HsTasksBlock extends HtmlBlock {
         attemptFinishTime = ta.getFinishTime();
         attemptFinishTime = ta.getFinishTime();
         attemptElapsed = ta.getElapsedTime();
         attemptElapsed = ta.getElapsedTime();
       }
       }
+
       tasksTableData.append("[\"")
       tasksTableData.append("[\"")
       .append("<a href='" + url("task", tid)).append("'>")
       .append("<a href='" + url("task", tid)).append("'>")
       .append(tid).append("</a>\",\"")
       .append(tid).append("</a>\",\"")
@@ -205,9 +206,4 @@ public class HsTasksBlock extends HtmlBlock {
 
 
     footRow._()._()._();
     footRow._()._()._();
   }
   }
-
-  private String formatTime(long elapsed) {
-    return elapsed < 0 ? "N/A" : StringUtils.formatTime(elapsed);
-  }
-
 }
 }

+ 14 - 22
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksPage.java

@@ -67,33 +67,25 @@ public class HsTasksPage extends HsView {
       type = MRApps.taskType(symbol);
       type = MRApps.taskType(symbol);
     }
     }
     StringBuilder b = tableInit().
     StringBuilder b = tableInit().
-    append(", 'aaData': tasksTableData");
-    b.append(", bDeferRender: true");
-    b.append(", bProcessing: true");
+    append(", 'aaData': tasksTableData")
+    .append(", bDeferRender: true")
+    .append(", bProcessing: true")
 
 
-    b.append("\n, aoColumnDefs: [\n");
-    b.append("{'sType':'numeric', 'aTargets': [ 0 ]");
-    b.append(", 'mRender': parseHadoopID }");
+    .append("\n, aoColumnDefs: [\n")
+    .append("{'sType':'numeric', 'aTargets': [ 0 ]")
+    .append(", 'mRender': parseHadoopID }")
 
 
-    b.append(", {'sType':'numeric', 'aTargets': [ 4");
-    if(type == TaskType.REDUCE) {
-      b.append(", 9, 10, 11, 12");
-    } else { //MAP
-      b.append(", 7");
-    }
-    b.append(" ], 'mRender': renderHadoopElapsedTime }");
+    .append(", {'sType':'numeric', 'aTargets': [ 4")
+    .append(type == TaskType.REDUCE ? ", 9, 10, 11, 12" : ", 7")
+    .append(" ], 'mRender': renderHadoopElapsedTime }")
 
 
-    b.append("\n, {'sType':'numeric', 'aTargets': [ 2, 3, 5");
-    if(type == TaskType.REDUCE) {
-      b.append(", 6, 7, 8");
-    } else { //MAP
-      b.append(", 6");
-    }
-    b.append(" ], 'mRender': renderHadoopDate }]");
+    .append("\n, {'sType':'numeric', 'aTargets': [ 2, 3, 5")
+    .append(type == TaskType.REDUCE ? ", 6, 7, 8" : ", 6")
+    .append(" ], 'mRender': renderHadoopDate }]")
 
 
     // Sort by id upon page load
     // Sort by id upon page load
-    b.append("\n, aaSorting: [[0, 'asc']]");
-    b.append("}");
+    .append("\n, aaSorting: [[0, 'asc']]")
+    .append("}");
     return b.toString();
     return b.toString();
   }
   }
   
   

+ 1 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestLineRecordReader.java

@@ -98,6 +98,7 @@ public class TestLineRecordReader extends TestCase {
       InterruptedException, ClassNotFoundException {
       InterruptedException, ClassNotFoundException {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
     conf.set("textinputformat.record.delimiter", "\t\n");
     conf.set("textinputformat.record.delimiter", "\t\n");
+    conf.setInt("mapreduce.job.maps", 1);
     FileSystem localFs = FileSystem.getLocal(conf);
     FileSystem localFs = FileSystem.getLocal(conf);
     // cleanup
     // cleanup
     localFs.delete(workDir, true);
     localFs.delete(workDir, true);

+ 1 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/lib/TestChainMapReduce.java

@@ -82,6 +82,7 @@ public class TestChainMapReduce extends HadoopTestCase {
 
 
     JobConf conf = createJobConf();
     JobConf conf = createJobConf();
     conf.setBoolean("localFS", isLocalFS());
     conf.setBoolean("localFS", isLocalFS());
+    conf.setInt("mapreduce.job.maps", 1);
 
 
     cleanFlags(conf);
     cleanFlags(conf);
 
 

+ 4 - 18
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java

@@ -42,24 +42,10 @@ public class TestClientProtocolProviderImpls extends TestCase {
 
 
     }
     }
 
 
-    try {
-      conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.LOCAL_FRAMEWORK_NAME);
-      conf.set(JTConfig.JT_IPC_ADDRESS, "127.0.0.1:0");
-
-      new Cluster(conf);
-      fail("Cluster with Local Framework name should use local JT address");
-    } catch (IOException e) {
-
-    }
-
-    try {
-      conf.set(JTConfig.JT_IPC_ADDRESS, "local");
-      Cluster cluster = new Cluster(conf);
-      assertTrue(cluster.getClient() instanceof LocalJobRunner);
-      cluster.close();
-    } catch (IOException e) {
-
-    }
+    conf.set(MRConfig.FRAMEWORK_NAME, "local");
+    Cluster cluster = new Cluster(conf);
+    assertTrue(cluster.getClient() instanceof LocalJobRunner);
+    cluster.close();
   }
   }
 
 
   @Test
   @Test

+ 86 - 0
hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java

@@ -0,0 +1,86 @@
+/*
+ * Copyright 2012 The Apache Software Foundation.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.maven.plugin.protoc;
+
+import org.apache.hadoop.maven.plugin.util.Exec;
+import org.apache.hadoop.maven.plugin.util.FileSetUtils;
+import org.apache.maven.model.FileSet;
+import org.apache.maven.plugin.AbstractMojo;
+import org.apache.maven.plugin.MojoExecutionException;
+import org.apache.maven.plugins.annotations.LifecyclePhase;
+import org.apache.maven.plugins.annotations.Mojo;
+import org.apache.maven.plugins.annotations.Parameter;
+import org.apache.maven.project.MavenProject;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.List;
+
+
+@Mojo(name="protoc", defaultPhase = LifecyclePhase.GENERATE_SOURCES)
+public class ProtocMojo extends AbstractMojo {
+
+  @Parameter(defaultValue="${project}")
+  private MavenProject project;
+
+  @Parameter
+  private List<File> imports;
+
+  @Parameter(defaultValue="${project.build.directory}/generated-sources/java")
+  private File output;
+
+  @Parameter(required=true)
+  private FileSet source;
+
+  @Parameter(defaultValue="protoc")
+  private String protocCommand;
+
+
+  public void execute() throws MojoExecutionException {
+    try {
+      if (!output.mkdirs()) {
+        if (!output.exists()) {
+          throw new MojoExecutionException("Could not create directory: " + 
+            output);
+        }
+      }
+      List<String> command = new ArrayList<String>();
+      command.add(protocCommand);
+      command.add("--java_out=" + output.getCanonicalPath());
+      if (imports != null) {
+        for (File i : imports) {
+          command.add("-I" + i.getCanonicalPath());
+        }
+      }
+      for (File f : FileSetUtils.convertFileSetToFiles(source)) {
+        command.add(f.getCanonicalPath());
+      }
+      Exec exec = new Exec(this);
+      List<String> out = new ArrayList<String>();
+      if (exec.run(command, out) != 0) {
+        getLog().error("protoc compiler error");
+        for (String s : out) {
+          getLog().error(s);
+        }
+        throw new MojoExecutionException("protoc failure");
+      }
+    } catch (Throwable ex) {
+      throw new MojoExecutionException(ex.toString(), ex);
+    }
+    project.addCompileSourceRoot(output.getAbsolutePath());
+  }
+
+}

+ 13 - 0
hadoop-project/src/site/site.xml

@@ -52,13 +52,25 @@
       <item name="Cluster Setup" href="hadoop-project-dist/hadoop-common/ClusterSetup.html"/>
       <item name="Cluster Setup" href="hadoop-project-dist/hadoop-common/ClusterSetup.html"/>
       <item name="CLI Mini Cluster" href="hadoop-project-dist/hadoop-common/CLIMiniCluster.html"/>
       <item name="CLI Mini Cluster" href="hadoop-project-dist/hadoop-common/CLIMiniCluster.html"/>
       <item name="File System Shell" href="hadoop-project-dist/hadoop-common/FileSystemShell.html"/>
       <item name="File System Shell" href="hadoop-project-dist/hadoop-common/FileSystemShell.html"/>
+      <item name="Native Libraries" href="hadoop-project-dist/hadoop-common/NativeLibraries.html"/>
+      <item name="Superusers" href="hadoop-project-dist/hadoop-common/Superusers.html"/>
       <item name="Hadoop Commands Reference" href="hadoop-project-dist/hadoop-common/CommandsManual.html"/>
       <item name="Hadoop Commands Reference" href="hadoop-project-dist/hadoop-common/CommandsManual.html"/>
+      <item name="Service Level Authorization" href="hadoop-project-dist/hadoop-common/ServiceLevelAuth.html"/>
+      <item name="HTTP Authentication" href="hadoop-project-dist/hadoop-common/HttpAuthentication.html"/>
     </menu>
     </menu>
     
     
     <menu name="HDFS" inherit="top">
     <menu name="HDFS" inherit="top">
+      <item name="HDFS User Guide" href="hadoop-project-dist/hadoop-hdfs/HdfsUserGuide.html"/>
       <item name="High Availability With QJM" href="hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.html"/>
       <item name="High Availability With QJM" href="hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.html"/>
       <item name="High Availability With NFS" href="hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithNFS.html"/>
       <item name="High Availability With NFS" href="hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithNFS.html"/>
       <item name="Federation" href="hadoop-project-dist/hadoop-hdfs/Federation.html"/>
       <item name="Federation" href="hadoop-project-dist/hadoop-hdfs/Federation.html"/>
+      <item name="HDFS Architecture" href="hadoop-project-dist/hadoop-hdfs/HdfsDesign.html"/>
+      <item name="Edits Viewer" href="hadoop-project-dist/hadoop-hdfs/HdfsEditsViewer.html"/>
+      <item name="Image Viewer" href="hadoop-project-dist/hadoop-hdfs/HdfsImageViewer.html"/>
+      <item name="Permissions and HDFS" href="hadoop-project-dist/hadoop-hdfs/HdfsPermissionsGuide.html"/>
+      <item name="Quotas and HDFS" href="hadoop-project-dist/hadoop-hdfs/HdfsQuotaAdminGuide.html"/>
+      <item name="HFTP" href="hadoop-project-dist/hadoop-hdfs/Hftp.html"/>
+      <item name="C API libhdfs" href="hadoop-project-dist/hadoop-hdfs/LibHdfs.html"/>
       <item name="WebHDFS REST API" href="hadoop-project-dist/hadoop-hdfs/WebHDFS.html"/>
       <item name="WebHDFS REST API" href="hadoop-project-dist/hadoop-hdfs/WebHDFS.html"/>
       <item name="HttpFS Gateway" href="hadoop-hdfs-httpfs/index.html"/>
       <item name="HttpFS Gateway" href="hadoop-hdfs-httpfs/index.html"/>
     </menu>
     </menu>
@@ -72,6 +84,7 @@
       <item name="YARN Architecture" href="hadoop-yarn/hadoop-yarn-site/YARN.html"/>
       <item name="YARN Architecture" href="hadoop-yarn/hadoop-yarn-site/YARN.html"/>
       <item name="Writing YARN Applications" href="hadoop-yarn/hadoop-yarn-site/WritingYarnApplications.html"/>
       <item name="Writing YARN Applications" href="hadoop-yarn/hadoop-yarn-site/WritingYarnApplications.html"/>
       <item name="Capacity Scheduler" href="hadoop-yarn/hadoop-yarn-site/CapacityScheduler.html"/>
       <item name="Capacity Scheduler" href="hadoop-yarn/hadoop-yarn-site/CapacityScheduler.html"/>
+      <item name="Fair Scheduler" href="hadoop-yarn/hadoop-yarn-site/FairScheduler.html"/>
       <item name="Web Application Proxy" href="hadoop-yarn/hadoop-yarn-site/WebApplicationProxy.html"/>
       <item name="Web Application Proxy" href="hadoop-yarn/hadoop-yarn-site/WebApplicationProxy.html"/>
       <item name="YARN Commands" href="hadoop-yarn/hadoop-yarn-site/YarnCommands.html"/>
       <item name="YARN Commands" href="hadoop-yarn/hadoop-yarn-site/YarnCommands.html"/>
     </menu>
     </menu>

+ 0 - 279
hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/typedbytes/TestIO.java

@@ -1,279 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.typedbytes;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-
-import org.apache.hadoop.io.ArrayWritable;
-import org.apache.hadoop.io.BooleanWritable;
-import org.apache.hadoop.io.ByteWritable;
-import org.apache.hadoop.io.BytesWritable;
-import org.apache.hadoop.io.DoubleWritable;
-import org.apache.hadoop.io.FloatWritable;
-import org.apache.hadoop.io.MapWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.VIntWritable;
-import org.apache.hadoop.io.VLongWritable;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.record.Buffer;
-import org.apache.hadoop.record.RecRecord0;
-import org.apache.hadoop.record.RecRecord1;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
-
-public class TestIO {
-
-  private File tmpfile;
-  private File tmpdir;
-
-  @Before
-  public void setUp() throws Exception {
-    this.tmpdir = new File(System.getProperty("test.build.data", "/tmp"));
-    if(this.tmpdir.exists() || this.tmpdir.mkdirs()) {
-      this.tmpfile = new File(this.tmpdir, 
-        "typedbytes.bin");
-    } else {
-      throw new IOException("Failed to create directory " + tmpdir.getAbsolutePath());	
-    }
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    tmpfile.delete();
-  }
-
-  @Test
-  public void testIO() throws IOException {
-    ArrayList<Object> vector = new ArrayList<Object>();
-    vector.add("test");
-    vector.add(false);
-    vector.add(12345);
-    List<Object> list = new LinkedList<Object>();
-    list.add("another test");
-    list.add(true);
-    list.add(123456789L);
-    Map<Object, Object> map = new HashMap<Object, Object>();
-    map.put("one", 1);
-    map.put("vector", vector);
-    Buffer buffer = new Buffer(new byte[] { 1, 2, 3, 4 });
-    buffer.setCapacity(10);
-    Object[] objects = new Object[] {
-      buffer,
-      (byte) 123, true, 12345, 123456789L, (float) 1.2, 1.234,
-      "random string", vector, list, map 
-    };
-
-    FileOutputStream ostream = new FileOutputStream(tmpfile);
-    DataOutputStream dostream = new DataOutputStream(ostream);
-    TypedBytesOutput out = new TypedBytesOutput(dostream);
-    for (Object obj : objects) {
-      out.write(obj);
-    }
-    dostream.close();
-    ostream.close();
-
-    FileInputStream istream = new FileInputStream(tmpfile);
-    DataInputStream distream = new DataInputStream(istream);
-    TypedBytesInput in = new TypedBytesInput(distream);
-    for (Object obj : objects) {
-      assertEquals(obj, in.read());
-    }
-    distream.close();
-    istream.close();
-
-    istream = new FileInputStream(tmpfile);
-    distream = new DataInputStream(istream);
-    in = new TypedBytesInput(distream);
-    for (Object obj : objects) {
-      byte[] bytes = in.readRaw();
-      ByteArrayInputStream bais = new ByteArrayInputStream(bytes);
-      DataInputStream dis = new DataInputStream(bais);
-      assertEquals(obj, (new TypedBytesInput(dis)).read());
-      ByteArrayOutputStream baos = new ByteArrayOutputStream();
-      TypedBytesOutput tbout = new TypedBytesOutput(new DataOutputStream(baos));
-      tbout.writeRaw(bytes);
-      bais = new ByteArrayInputStream(bytes);
-      dis = new DataInputStream(bais);
-      assertEquals(obj, (new TypedBytesInput(dis)).read());
-    }
-    distream.close();
-    istream.close();
-  }
-
-  @Test
-  public void testCustomTypesIO() throws IOException {
-    byte[] rawBytes = new byte[] { 100, 0, 0, 0, 3, 1, 2, 3 };
-    
-    FileOutputStream ostream = new FileOutputStream(tmpfile);
-    DataOutputStream dostream = new DataOutputStream(ostream);
-    TypedBytesOutput out = new TypedBytesOutput(dostream);
-    out.writeRaw(rawBytes);
-    dostream.close();
-    ostream.close();
-
-    FileInputStream istream = new FileInputStream(tmpfile);
-    DataInputStream distream = new DataInputStream(istream);
-    TypedBytesInput in = new TypedBytesInput(distream);
-    assertTrue(Arrays.equals(rawBytes, in.readRaw()));
-    distream.close();
-    istream.close();
-  }
-  
-  @Test
-  public void testRecordIO() throws IOException {
-    RecRecord1 r1 = new RecRecord1();
-    r1.setBoolVal(true);
-    r1.setByteVal((byte) 0x66);
-    r1.setFloatVal(3.145F);
-    r1.setDoubleVal(1.5234);
-    r1.setIntVal(-4567);
-    r1.setLongVal(-2367L);
-    r1.setStringVal("random text");
-    r1.setBufferVal(new Buffer());
-    r1.setVectorVal(new ArrayList<String>());
-    r1.setMapVal(new TreeMap<String, String>());
-    RecRecord0 r0 = new RecRecord0();
-    r0.setStringVal("other random text");
-    r1.setRecordVal(r0);
-
-    FileOutputStream ostream = new FileOutputStream(tmpfile);
-    DataOutputStream dostream = new DataOutputStream(ostream);
-    TypedBytesRecordOutput out = TypedBytesRecordOutput.get(dostream);
-    r1.serialize(out, "");
-    dostream.close();
-    ostream.close();
-
-    FileInputStream istream = new FileInputStream(tmpfile);
-    DataInputStream distream = new DataInputStream(istream);
-    TypedBytesRecordInput in = TypedBytesRecordInput.get(distream);
-    RecRecord1 r2 = new RecRecord1();
-    r2.deserialize(in, "");
-    distream.close();
-    istream.close();
-    assertEquals(r1, r2);
-  }
-
-  @Test
-  public void testWritableIO() throws IOException {
-    Writable[] vectorValues = new Writable[] {
-      new Text("test1"), new Text("test2"), new Text("test3")
-    };
-    ArrayWritable vector = new ArrayWritable(Text.class, vectorValues);
-    MapWritable map = new MapWritable();
-    map.put(new Text("one"), new VIntWritable(1));
-    map.put(new Text("two"), new VLongWritable(2));
-    Writable[] writables = new Writable[] {
-      new BytesWritable(new byte[] { 1, 2, 3, 4 }),
-      new ByteWritable((byte) 123), new BooleanWritable(true),
-      new VIntWritable(12345), new VLongWritable(123456789L),
-      new FloatWritable((float) 1.2), new DoubleWritable(1.234),
-      new Text("random string")
-    };
-    TypedBytesWritable tbw = new TypedBytesWritable();
-    tbw.setValue("typed bytes text");
-    RecRecord1 r1 = new RecRecord1();
-    r1.setBoolVal(true);
-    r1.setByteVal((byte) 0x66);
-    r1.setFloatVal(3.145F);
-    r1.setDoubleVal(1.5234);
-    r1.setIntVal(-4567);
-    r1.setLongVal(-2367L);
-    r1.setStringVal("random text");
-    r1.setBufferVal(new Buffer());
-    r1.setVectorVal(new ArrayList<String>());
-    r1.setMapVal(new TreeMap<String, String>());
-    RecRecord0 r0 = new RecRecord0();
-    r0.setStringVal("other random text");
-    r1.setRecordVal(r0);
-
-    FileOutputStream ostream = new FileOutputStream(tmpfile);
-    DataOutputStream dostream = new DataOutputStream(ostream);
-    TypedBytesWritableOutput out = new TypedBytesWritableOutput(dostream);
-    for (Writable w : writables) {
-      out.write(w);
-    }
-    out.write(tbw);
-    out.write(vector);
-    out.write(map);
-    out.write(r1);
-    dostream.close();
-    ostream.close();
-
-    FileInputStream istream = new FileInputStream(tmpfile);
-    DataInputStream distream = new DataInputStream(istream);
-
-    TypedBytesWritableInput in = new TypedBytesWritableInput(distream);
-    for (Writable w : writables) {
-      assertEquals(w, in.read());
-    }
-
-    assertEquals(tbw.getValue().toString(), in.read().toString());
-
-    assertEquals(ArrayWritable.class, in.readType());
-    ArrayWritable aw = in.readArray();
-    Writable[] writables1 = vector.get(), writables2 = aw.get();
-    assertEquals(writables1.length, writables2.length);
-    for (int i = 0; i < writables1.length; i++) {
-      assertEquals(((Text) writables1[i]).toString(),
-        ((TypedBytesWritable) writables2[i]).getValue());
-    }
-    assertEquals(MapWritable.class, in.readType());
-
-    MapWritable mw = in.readMap();
-    assertEquals(map.entrySet(), mw.entrySet());
-
-    assertEquals(Type.LIST, TypedBytesInput.get(distream).readType());
-    assertEquals(r1.getBoolVal(), TypedBytesInput.get(distream).read());
-    assertEquals(r1.getByteVal(), TypedBytesInput.get(distream).read());
-    assertEquals(r1.getIntVal(), TypedBytesInput.get(distream).read());
-    assertEquals(r1.getLongVal(), TypedBytesInput.get(distream).read());
-    assertEquals(r1.getFloatVal(), TypedBytesInput.get(distream).read());
-    assertEquals(r1.getDoubleVal(), TypedBytesInput.get(distream).read());
-    assertEquals(r1.getStringVal(), TypedBytesInput.get(distream).read());
-    Object prevObj = null, obj = TypedBytesInput.get(distream).read();
-    while (obj != null) {
-      prevObj = obj;
-      obj = TypedBytesInput.get(distream).read();
-    }
-    List recList = (List) prevObj;
-    assertEquals(r0.getStringVal(), recList.get(0));
-
-    distream.close();
-    istream.close();
-  }
-
-}

+ 6 - 0
hadoop-yarn-project/CHANGES.txt

@@ -35,6 +35,9 @@ Release 2.0.4-beta - UNRELEASED
     YARN-377. Use the new StringUtils methods added by HADOOP-9252 and fix
     YARN-377. Use the new StringUtils methods added by HADOOP-9252 and fix
     TestContainersMonitor.  (Chris Nauroth via szetszwo)
     TestContainersMonitor.  (Chris Nauroth via szetszwo)
 
 
+    YARN-391. Formatting fixes for LCEResourceHandler classes.
+    (Steve Loughran via sseth)
+
 Release 2.0.3-alpha - 2013-02-06 
 Release 2.0.3-alpha - 2013-02-06 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -310,6 +313,9 @@ Release 0.23.7 - UNRELEASED
     YARN-133 Update web services docs for RM clusterMetrics (Ravi Prakash via
     YARN-133 Update web services docs for RM clusterMetrics (Ravi Prakash via
     kihwal)
     kihwal)
 
 
+    YARN-249. Capacity Scheduler web page should show list of active users per 
+    queue like it used to (in 1.x) (Ravi Prakash via tgraves)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     YARN-357. App submission should not be synchronized (daryn)
     YARN-357. App submission should not be synchronized (daryn)

+ 21 - 54
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml

@@ -35,66 +35,33 @@
   <build>
   <build>
     <plugins>
     <plugins>
       <plugin>
       <plugin>
-        <artifactId>maven-antrun-plugin</artifactId>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-maven-plugins</artifactId>
         <executions>
         <executions>
           <execution>
           <execution>
-            <id>create-protobuf-generated-sources-directory</id>
-            <phase>initialize</phase>
-            <configuration>
-              <target>
-                <mkdir dir="target/generated-sources/proto" />
-              </target>
-            </configuration>
-            <goals>
-              <goal>run</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>exec-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>generate-sources</id>
-            <phase>generate-sources</phase>
-            <configuration>
-              <executable>protoc</executable>
-              <arguments>
-                <argument>-I../../../hadoop-common-project/hadoop-common/src/main/proto/</argument>
-                <argument>-Isrc/main/proto/</argument>
-                <argument>--java_out=target/generated-sources/proto</argument>
-                <argument>src/main/proto/yarn_protos.proto</argument>
-                <argument>src/main/proto/yarn_service_protos.proto</argument>
-                <argument>src/main/proto/AM_RM_protocol.proto</argument>
-                <argument>src/main/proto/client_RM_protocol.proto</argument>
-                <argument>src/main/proto/container_manager.proto</argument>
-                <argument>src/main/proto/yarn_server_resourcemanager_service_protos.proto</argument>
-                <argument>src/main/proto/RMAdminProtocol.proto</argument>
-              </arguments>
-            </configuration>
-            <goals>
-              <goal>exec</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>add-source</id>
+            <id>compile-protoc</id>
             <phase>generate-sources</phase>
             <phase>generate-sources</phase>
             <goals>
             <goals>
-              <goal>add-source</goal>
+              <goal>protoc</goal>
             </goals>
             </goals>
             <configuration>
             <configuration>
-              <sources>
-                <source>target/generated-sources/proto</source>
-              </sources>
+              <imports>
+                <param>${basedir}/../../../hadoop-common-project/hadoop-common/src/main/proto</param>
+                <param>${basedir}/src/main/proto</param>
+              </imports>
+              <source>
+                <directory>${basedir}/src/main/proto</directory>
+                <includes>
+                  <include>yarn_protos.proto</include>
+                  <include>yarn_service_protos.proto</include>
+                  <include>AM_RM_protocol.proto</include>
+                  <include>client_RM_protocol.proto</include>
+                  <include>container_manager.proto</include>
+                  <include>yarn_server_resourcemanager_service_protos.proto</include>
+                  <include>RMAdminProtocol.proto</include>
+                </includes>
+              </source>
+              <output>${project.build.directory}/generated-sources/java</output>
             </configuration>
             </configuration>
           </execution>
           </execution>
         </executions>
         </executions>

+ 21 - 55
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml

@@ -107,6 +107,27 @@
               </source>
               </source>
             </configuration>
             </configuration>
           </execution>
           </execution>
+          <execution>
+            <id>compile-protoc</id>
+            <phase>generate-sources</phase>
+            <goals>
+              <goal>protoc</goal>
+            </goals>
+            <configuration>
+              <imports>
+                <param>${basedir}/../../../hadoop-common-project/hadoop-common/src/main/proto</param>
+                <param>${basedir}/../hadoop-yarn-api/src/main/proto</param>
+                <param>${basedir}/src/main/proto</param>
+              </imports>
+              <source>
+                <directory>${basedir}/src/main/proto</directory>
+                <includes>
+                  <include>yarnprototunnelrpc.proto</include>
+                </includes>
+              </source>
+              <output>${project.build.directory}/generated-sources/java</output>
+            </configuration>
+          </execution>
         </executions>
         </executions>
       </plugin>
       </plugin>
       <plugin>
       <plugin>
@@ -124,18 +145,6 @@
       <plugin>
       <plugin>
         <artifactId>maven-antrun-plugin</artifactId>
         <artifactId>maven-antrun-plugin</artifactId>
         <executions>
         <executions>
-          <execution>
-            <id>create-protobuf-generated-sources-directory</id>
-            <phase>initialize</phase>
-            <configuration>
-              <target>
-                <mkdir dir="target/generated-sources/proto" />
-              </target>
-            </configuration>
-            <goals>
-              <goal>run</goal>
-            </goals>
-          </execution>
           <execution>
           <execution>
             <phase>pre-site</phase>
             <phase>pre-site</phase>
             <goals>
             <goals>
@@ -151,49 +160,6 @@
         </executions>
         </executions>
       </plugin>
       </plugin>
 
 
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>exec-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>generate-sources</id>
-            <phase>generate-sources</phase>
-            <configuration>
-              <executable>protoc</executable>
-              <arguments>
-                <argument>-I../../../hadoop-common-project/hadoop-common/src/main/proto/</argument>
-                <argument>-I../hadoop-yarn-api/src/main/proto/</argument>
-                <argument>-Isrc/main/proto/</argument>
-                <argument>--java_out=target/generated-sources/proto</argument>
-                <argument>src/main/proto/yarnprototunnelrpc.proto</argument>
-              </arguments>
-            </configuration>
-            <goals>
-              <goal>exec</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>add-source</id>
-            <phase>generate-sources</phase>
-            <goals>
-              <goal>add-source</goal>
-            </goals>
-            <configuration>
-              <sources>
-                <source>target/generated-sources/proto</source>
-                <source>target/generated-sources/version</source>
-              </sources>
-            </configuration>
-          </execution>
-        </executions>
-      </plugin>
     </plugins>
     </plugins>
   </build>
   </build>
 </project>
 </project>

+ 13 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/ResponseInfo.java

@@ -34,19 +34,21 @@ public class ResponseInfo implements Iterable<ResponseInfo.Item> {
     public final String key;
     public final String key;
     public final String url;
     public final String url;
     public final Object value;
     public final Object value;
+    public final boolean isRaw;
 
 
-    Item(String key, String url, Object value) {
+    Item(String key, String url, Object value, boolean isRaw) {
       this.key = key;
       this.key = key;
       this.url = url;
       this.url = url;
       this.value = value;
       this.value = value;
+      this.isRaw = isRaw;
     }
     }
 
 
-    public static Item of(String key, Object value) {
-      return new Item(key, null, value);
+    public static Item of(String key, Object value, boolean isRaw) {
+      return new Item(key, null, value, isRaw);
     }
     }
 
 
     public static Item of(String key, String url, Object value) {
     public static Item of(String key, String url, Object value) {
-      return new Item(key, url, value);
+      return new Item(key, url, value, false);
     }
     }
   }
   }
 
 
@@ -71,7 +73,7 @@ public class ResponseInfo implements Iterable<ResponseInfo.Item> {
   }
   }
 
 
   public ResponseInfo _(String key, Object value) {
   public ResponseInfo _(String key, Object value) {
-    items.add(Item.of(key, value));
+    items.add(Item.of(key, value, false));
     return this;
     return this;
   }
   }
 
 
@@ -80,6 +82,12 @@ public class ResponseInfo implements Iterable<ResponseInfo.Item> {
     return this;
     return this;
   }
   }
 
 
+  //Value is raw HTML and shouldn't be escaped
+  public ResponseInfo _r(String key, Object value) {
+    items.add(Item.of(key, value, true));
+    return this;
+  }
+
   public void clear() {
   public void clear() {
     items.clear();
     items.clear();
   }
   }

+ 5 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/InfoBlock.java

@@ -46,7 +46,11 @@ public class InfoBlock extends HtmlBlock {
           th(item.key);
           th(item.key);
       String value = String.valueOf(item.value);
       String value = String.valueOf(item.value);
       if (item.url == null) {
       if (item.url == null) {
-        tr.td(value);
+        if (!item.isRaw) {
+          tr.td(value);
+        } else {
+          tr.td()._r(value)._();
+        }
       } else {
       } else {
         tr.
         tr.
           td().
           td().

+ 19 - 45
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml

@@ -48,18 +48,6 @@
       <plugin>
       <plugin>
         <artifactId>maven-antrun-plugin</artifactId>
         <artifactId>maven-antrun-plugin</artifactId>
         <executions>
         <executions>
-          <execution>
-            <id>create-protobuf-generated-sources-directory</id>
-            <phase>initialize</phase>
-            <configuration>
-              <target>
-                <mkdir dir="target/generated-sources/proto" />
-              </target>
-            </configuration>
-            <goals>
-              <goal>run</goal>
-            </goals>
-          </execution>
           <execution>
           <execution>
             <phase>pre-site</phase>
             <phase>pre-site</phase>
             <goals>
             <goals>
@@ -75,45 +63,31 @@
       </plugin>
       </plugin>
 
 
       <plugin>
       <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>exec-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>generate-sources</id>
-            <phase>generate-sources</phase>
-            <configuration>
-              <executable>protoc</executable>
-              <arguments>
-                <argument>-I../../../../hadoop-common-project/hadoop-common/src/main/proto/</argument>
-                <argument>-I../../hadoop-yarn-api/src/main/proto/</argument>
-                <argument>-Isrc/main/proto/</argument>
-                <argument>--java_out=target/generated-sources/proto</argument>
-                <argument>src/main/proto/yarn_server_common_protos.proto</argument>
-                <argument>src/main/proto/yarn_server_common_service_protos.proto</argument>
-                <argument>src/main/proto/ResourceTracker.proto</argument>
-              </arguments>
-            </configuration>
-            <goals>
-              <goal>exec</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-maven-plugins</artifactId>
         <executions>
         <executions>
           <execution>
           <execution>
-            <id>add-source</id>
+            <id>compile-protoc</id>
             <phase>generate-sources</phase>
             <phase>generate-sources</phase>
             <goals>
             <goals>
-              <goal>add-source</goal>
+              <goal>protoc</goal>
             </goals>
             </goals>
             <configuration>
             <configuration>
-              <sources>
-                <source>target/generated-sources/proto</source>
-              </sources>
+              <imports>
+                <param>${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto</param>
+                <param>${basedir}/../../hadoop-yarn-api/src/main/proto</param>
+                <param>${basedir}/src/main/proto</param>
+              </imports>
+              <source>
+                <directory>${basedir}/src/main/proto</directory>
+                <includes>
+                  <include>yarn_server_common_protos.proto</include>
+                  <include>yarn_server_common_service_protos.proto</include>
+                  <include>yarn_server_common_service_protos.proto</include>
+                  <include>ResourceTracker.proto</include>
+                </includes>
+              </source>
+              <output>${project.build.directory}/generated-sources/java</output>
             </configuration>
             </configuration>
           </execution>
           </execution>
         </executions>
         </executions>

+ 17 - 57
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml

@@ -151,69 +151,29 @@
       </plugin>
       </plugin>
 
 
       <plugin>
       <plugin>
-        <artifactId>maven-antrun-plugin</artifactId>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-maven-plugins</artifactId>
         <executions>
         <executions>
           <execution>
           <execution>
-            <id>create-protobuf-generated-sources-directory</id>
-            <phase>initialize</phase>
-            <configuration>
-              <target>
-                <mkdir dir="target/generated-sources/proto" />
-              </target>
-            </configuration>
-            <goals>
-              <goal>run</goal>
-            </goals>
-          </execution>
-          <execution>
-            <id>compile</id>
-            <phase>generate-sources</phase>
-            <goals>
-              <goal>run</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>exec-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>generate-sources</id>
-            <phase>generate-sources</phase>
-            <configuration>
-              <executable>protoc</executable>
-              <arguments>
-                <argument>-I../../../../hadoop-common-project/hadoop-common/src/main/proto/</argument>
-                <argument>-I../../hadoop-yarn-api/src/main/proto/</argument>
-                <argument>-Isrc/main/proto/</argument>
-                <argument>--java_out=target/generated-sources/proto</argument>
-                <argument>src/main/proto/yarn_server_nodemanager_service_protos.proto</argument>
-                <argument>src/main/proto/LocalizationProtocol.proto</argument>
-              </arguments>
-            </configuration>
-            <goals>
-              <goal>exec</goal>
-            </goals>
-          </execution>
-        </executions>
-      </plugin>
-
-      <plugin>
-        <groupId>org.codehaus.mojo</groupId>
-        <artifactId>build-helper-maven-plugin</artifactId>
-        <executions>
-          <execution>
-            <id>add-source</id>
+            <id>compile-protoc</id>
             <phase>generate-sources</phase>
             <phase>generate-sources</phase>
             <goals>
             <goals>
-              <goal>add-source</goal>
+              <goal>protoc</goal>
             </goals>
             </goals>
             <configuration>
             <configuration>
-              <sources>
-                <source>target/generated-sources/proto</source>
-              </sources>
+              <imports>
+                <param>${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto</param>
+                <param>${basedir}/../../hadoop-yarn-api/src/main/proto</param>
+                <param>${basedir}/src/main/proto</param>
+              </imports>
+              <source>
+                <directory>${basedir}/src/main/proto</directory>
+                <includes>
+                  <include>yarn_server_nodemanager_service_protos.proto</include>
+                  <include>LocalizationProtocol.proto</include>
+                </includes>
+              </source>
+              <output>${project.build.directory}/generated-sources/java</output>
             </configuration>
             </configuration>
           </execution>
           </execution>
         </executions>
         </executions>

+ 16 - 16
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/CgroupsLCEResourcesHandler.java

@@ -80,17 +80,17 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
             NM_LINUX_CONTAINER_CGROUPS_MOUNT, false);
             NM_LINUX_CONTAINER_CGROUPS_MOUNT, false);
     this.cgroupMountPath = conf.get(YarnConfiguration.
     this.cgroupMountPath = conf.get(YarnConfiguration.
             NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH, null);
             NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH, null);
-	
+
     // remove extra /'s at end or start of cgroupPrefix
     // remove extra /'s at end or start of cgroupPrefix
     if (cgroupPrefix.charAt(0) == '/') {
     if (cgroupPrefix.charAt(0) == '/') {
-    	cgroupPrefix = cgroupPrefix.substring(1);
+      cgroupPrefix = cgroupPrefix.substring(1);
     }
     }
 
 
     int len = cgroupPrefix.length();
     int len = cgroupPrefix.length();
     if (cgroupPrefix.charAt(len - 1) == '/') {
     if (cgroupPrefix.charAt(len - 1) == '/') {
-    	cgroupPrefix = cgroupPrefix.substring(0, len - 1);
+      cgroupPrefix = cgroupPrefix.substring(0, len - 1);
     }
     }
-   
+  
     // mount cgroups if requested
     // mount cgroups if requested
     if (cgroupMount && cgroupMountPath != null) {
     if (cgroupMount && cgroupMountPath != null) {
       ArrayList<String> cgroupKVs = new ArrayList<String>();
       ArrayList<String> cgroupKVs = new ArrayList<String>();
@@ -98,14 +98,14 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
                     CONTROLLER_CPU);
                     CONTROLLER_CPU);
       lce.mountCgroups(cgroupKVs, cgroupPrefix);
       lce.mountCgroups(cgroupKVs, cgroupPrefix);
     }
     }
-    
+
     initializeControllerPaths();
     initializeControllerPaths();
   }
   }
 
 
 
 
   boolean isCpuWeightEnabled() {
   boolean isCpuWeightEnabled() {
     return this.cpuWeightEnabled;
     return this.cpuWeightEnabled;
-  }	
+  }
 
 
   /*
   /*
    * Next four functions are for an individual cgroup.
    * Next four functions are for an individual cgroup.
@@ -155,7 +155,7 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
         }
         }
       }
       }
     }
     }
-  }	
+  }
 
 
   private void deleteCgroup(String controller, String groupName) {
   private void deleteCgroup(String controller, String groupName) {
     String path = pathForCgroup(controller, groupName);
     String path = pathForCgroup(controller, groupName);
@@ -165,7 +165,7 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
     if (! new File(path).delete()) {
     if (! new File(path).delete()) {
       LOG.warn("Unable to delete cgroup at: " + path);
       LOG.warn("Unable to delete cgroup at: " + path);
     }
     }
-  }	
+  }
 
 
   /*
   /*
    * Next three functions operate on all the resources we are enforcing.
    * Next three functions operate on all the resources we are enforcing.
@@ -178,7 +178,7 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
   private void setupLimits(ContainerId containerId,
   private void setupLimits(ContainerId containerId,
                            Resource containerResource) throws IOException {
                            Resource containerResource) throws IOException {
     String containerName = containerId.toString();
     String containerName = containerId.toString();
-    
+
     if (isCpuWeightEnabled()) {
     if (isCpuWeightEnabled()) {
       createCgroup(CONTROLLER_CPU, containerName);
       createCgroup(CONTROLLER_CPU, containerName);
       updateCgroup(CONTROLLER_CPU, containerName, "shares",
       updateCgroup(CONTROLLER_CPU, containerName, "shares",
@@ -202,7 +202,7 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
 
 
     if (isCpuWeightEnabled()) {
     if (isCpuWeightEnabled()) {
       deleteCgroup(CONTROLLER_CPU, containerName);
       deleteCgroup(CONTROLLER_CPU, containerName);
-    }	
+    }
   }
   }
 
 
   /*
   /*
@@ -222,7 +222,7 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
     String containerName = containerId.toString();
     String containerName = containerId.toString();
 
 
     StringBuilder sb = new StringBuilder("cgroups=");
     StringBuilder sb = new StringBuilder("cgroups=");
-    
+
     if (isCpuWeightEnabled()) {
     if (isCpuWeightEnabled()) {
       sb.append(pathForCgroup(CONTROLLER_CPU, containerName) + "/cgroup.procs");
       sb.append(pathForCgroup(CONTROLLER_CPU, containerName) + "/cgroup.procs");
       sb.append(",");
       sb.append(",");
@@ -231,7 +231,7 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
     if (sb.charAt(sb.length() - 1) == ',') {
     if (sb.charAt(sb.length() - 1) == ',') {
       sb.deleteCharAt(sb.length() - 1);
       sb.deleteCharAt(sb.length() - 1);
     }
     }
-    
+
     return sb.toString();
     return sb.toString();
   }
   }
 
 
@@ -255,8 +255,8 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
     BufferedReader in = null;
     BufferedReader in = null;
 
 
     try {
     try {
-      in = new BufferedReader(new FileReader(new File(MTAB_FILE)));	
-    	
+      in = new BufferedReader(new FileReader(new File(MTAB_FILE)));
+
       for (String str = in.readLine(); str != null;
       for (String str = in.readLine(); str != null;
           str = in.readLine()) {
           str = in.readLine()) {
         Matcher m = MTAB_FILE_FORMAT.matcher(str);
         Matcher m = MTAB_FILE_FORMAT.matcher(str);
@@ -316,6 +316,6 @@ public class CgroupsLCEResourcesHandler implements LCEResourcesHandler {
     } else {
     } else {
       throw new IOException("Not able to enforce cpu weights; cannot find "
       throw new IOException("Not able to enforce cpu weights; cannot find "
           + "cgroup for cpu controller in " + MTAB_FILE);
           + "cgroup for cpu controller in " + MTAB_FILE);
-    }	
+    }
   }
   }
-}
+}

+ 2 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/util/DefaultLCEResourcesHandler.java

@@ -33,7 +33,7 @@ public class DefaultLCEResourcesHandler implements LCEResourcesHandler {
   private Configuration conf;
   private Configuration conf;
   
   
   public DefaultLCEResourcesHandler() {
   public DefaultLCEResourcesHandler() {
-  }	
+  }
   
   
   public void setConf(Configuration conf) {
   public void setConf(Configuration conf) {
         this.conf = conf;
         this.conf = conf;
@@ -42,7 +42,7 @@ public class DefaultLCEResourcesHandler implements LCEResourcesHandler {
   @Override
   @Override
   public Configuration getConf() {
   public Configuration getConf() {
     return  conf;
     return  conf;
-  }	
+  }
   
   
   public void init(LinuxContainerExecutor lce) {
   public void init(LinuxContainerExecutor lce) {
   }
   }

+ 13 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java

@@ -571,6 +571,19 @@ public class LeafQueue implements CSQueue {
     return user;
     return user;
   }
   }
 
 
+  /**
+   * @return an ArrayList of UserInfo objects who are active in this queue
+   */
+  public synchronized ArrayList<UserInfo> getUsers() {
+    ArrayList<UserInfo> usersToReturn = new ArrayList<UserInfo>();
+    for (Map.Entry<String, User> entry: users.entrySet()) {
+      usersToReturn.add(new UserInfo(entry.getKey(), Resources.clone(
+        entry.getValue().consumed), entry.getValue().getActiveApplications(),
+        entry.getValue().getPendingApplications()));
+    }
+    return usersToReturn;
+  }
+
   @Override
   @Override
   public synchronized void reinitialize(
   public synchronized void reinitialize(
       CSQueue newlyParsedQueue, Resource clusterResource) 
       CSQueue newlyParsedQueue, Resource clusterResource) 

+ 60 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/UserInfo.java

@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceInfo;
+
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.FIELD)
+public class UserInfo {
+  protected String  username;
+  protected ResourceInfo resourcesUsed;
+  protected int numPendingApplications;
+  protected int numActiveApplications;
+
+  UserInfo() {}
+
+  UserInfo(String username, Resource resUsed, int activeApps, int pendingApps) {
+    this.username = username;
+    this.resourcesUsed = new ResourceInfo(resUsed);
+    this.numActiveApplications = activeApps;
+    this.numPendingApplications = pendingApps;
+  }
+
+  public String getUsername() {
+    return username;
+  }
+
+  public ResourceInfo getResourcesUsed() {
+    return resourcesUsed;
+  }
+
+  public int getNumPendingApplications() {
+    return numPendingApplications;
+  }
+
+  public int getNumActiveApplications() {
+    return numActiveApplications;
+  }
+}

+ 39 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/CapacitySchedulerPage.java

@@ -22,12 +22,15 @@ import static org.apache.hadoop.yarn.util.StringHelper.join;
 
 
 import java.util.ArrayList;
 import java.util.ArrayList;
 
 
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.UserInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerLeafQueueInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerLeafQueueInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerQueueInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerQueueInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceInfo;
 import org.apache.hadoop.yarn.webapp.ResponseInfo;
 import org.apache.hadoop.yarn.webapp.ResponseInfo;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.SubView;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
 import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
@@ -63,8 +66,42 @@ class CapacitySchedulerPage extends RmView {
       lqinfo = (CapacitySchedulerLeafQueueInfo) info.qinfo;
       lqinfo = (CapacitySchedulerLeafQueueInfo) info.qinfo;
     }
     }
 
 
+    //Return a string describing one resource as a percentage of another
+    private String getPercentage(ResourceInfo numerator, ResourceInfo denominator) {
+      StringBuilder percentString = new StringBuilder("Memory: ");
+      if (numerator != null) {
+        percentString.append(numerator.getMemory());
+      }
+      if (denominator.getMemory() != 0) {
+        percentString.append(" (<span title='of used resources in this queue'>")
+          .append(StringUtils.format("%.2f", numerator.getMemory() * 100.0 /
+            denominator.getMemory()) + "%</span>)");
+      }
+      percentString.append(", vCores: ");
+      if (numerator != null) {
+        percentString.append(numerator.getvCores());
+      }
+      if (denominator.getvCores() != 0) {
+        percentString.append(" (<span title='of used resources in this queue'>")
+          .append(StringUtils.format("%.2f", numerator.getvCores() * 100.0 /
+          denominator.getvCores()) + "%</span>)");
+      }
+      return percentString.toString();
+    }
+
     @Override
     @Override
     protected void render(Block html) {
     protected void render(Block html) {
+      StringBuilder activeUserList = new StringBuilder("");
+      ResourceInfo usedResources = lqinfo.getResourcesUsed();
+      ArrayList<UserInfo> users = lqinfo.getUsers().getUsersList();
+      for (UserInfo entry: users) {
+        activeUserList.append(entry.getUsername()).append(" &lt;")
+          .append(getPercentage(entry.getResourcesUsed(), usedResources))
+          .append(", Active Apps: " + entry.getNumActiveApplications())
+          .append(", Pending Apps: " + entry.getNumPendingApplications())
+          .append("&gt;<br style='display:block'>"); //Force line break
+      }
+
       ResponseInfo ri = info("\'" + lqinfo.getQueuePath().substring(5) + "\' Queue Status").
       ResponseInfo ri = info("\'" + lqinfo.getQueuePath().substring(5) + "\' Queue Status").
           _("Queue State:", lqinfo.getQueueState()).
           _("Queue State:", lqinfo.getQueueState()).
           _("Used Capacity:", percent(lqinfo.getUsedCapacity() / 100)).
           _("Used Capacity:", percent(lqinfo.getUsedCapacity() / 100)).
@@ -81,7 +118,8 @@ class CapacitySchedulerPage extends RmView {
           _("Configured Capacity:", percent(lqinfo.getCapacity() / 100)).
           _("Configured Capacity:", percent(lqinfo.getCapacity() / 100)).
           _("Configured Max Capacity:", percent(lqinfo.getMaxCapacity() / 100)).
           _("Configured Max Capacity:", percent(lqinfo.getMaxCapacity() / 100)).
           _("Configured Minimum User Limit Percent:", Integer.toString(lqinfo.getUserLimit()) + "%").
           _("Configured Minimum User Limit Percent:", Integer.toString(lqinfo.getUserLimit()) + "%").
-          _("Configured User Limit Factor:", String.format("%.1f", lqinfo.getUserLimitFactor()));
+          _("Configured User Limit Factor:", String.format("%.1f", lqinfo.getUserLimitFactor())).
+          _r("Active users: ", activeUserList.toString());
 
 
       html._(InfoBlock.class);
       html._(InfoBlock.class);
 
 

+ 5 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java

@@ -30,6 +30,7 @@ import javax.ws.rs.ext.ContextResolver;
 import javax.ws.rs.ext.Provider;
 import javax.ws.rs.ext.Provider;
 import javax.xml.bind.JAXBContext;
 import javax.xml.bind.JAXBContext;
 
 
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.UserInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptsInfo;
@@ -42,9 +43,11 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsIn
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FifoSchedulerInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FifoSchedulerInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ResourceInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.UserMetricsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.UserMetricsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.UsersInfo;
 import org.apache.hadoop.yarn.webapp.RemoteExceptionData;
 import org.apache.hadoop.yarn.webapp.RemoteExceptionData;
 
 
 @Singleton
 @Singleton
@@ -61,7 +64,8 @@ public class JAXBContextResolver implements ContextResolver<JAXBContext> {
       SchedulerTypeInfo.class, NodeInfo.class, UserMetricsInfo.class,
       SchedulerTypeInfo.class, NodeInfo.class, UserMetricsInfo.class,
       CapacitySchedulerInfo.class, ClusterMetricsInfo.class,
       CapacitySchedulerInfo.class, ClusterMetricsInfo.class,
       SchedulerInfo.class, AppsInfo.class, NodesInfo.class,
       SchedulerInfo.class, AppsInfo.class, NodesInfo.class,
-      RemoteExceptionData.class, CapacitySchedulerQueueInfoList.class};
+      RemoteExceptionData.class, CapacitySchedulerQueueInfoList.class,
+      ResourceInfo.class, UsersInfo.class, UserInfo.class};
 
 
   public JAXBContextResolver() throws Exception {
   public JAXBContextResolver() throws Exception {
     this.types = new HashSet<Class>(Arrays.asList(cTypes));
     this.types = new HashSet<Class>(Arrays.asList(cTypes));

+ 7 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerLeafQueueInfo.java

@@ -35,6 +35,7 @@ public class CapacitySchedulerLeafQueueInfo extends CapacitySchedulerQueueInfo {
   protected int maxActiveApplications;
   protected int maxActiveApplications;
   protected int maxActiveApplicationsPerUser;
   protected int maxActiveApplicationsPerUser;
   protected int userLimit;
   protected int userLimit;
+  protected UsersInfo users; // To add another level in the XML
   protected float userLimitFactor;
   protected float userLimitFactor;
 
 
   CapacitySchedulerLeafQueueInfo() {
   CapacitySchedulerLeafQueueInfo() {
@@ -50,6 +51,7 @@ public class CapacitySchedulerLeafQueueInfo extends CapacitySchedulerQueueInfo {
     maxActiveApplications = q.getMaximumActiveApplications();
     maxActiveApplications = q.getMaximumActiveApplications();
     maxActiveApplicationsPerUser = q.getMaximumActiveApplicationsPerUser();
     maxActiveApplicationsPerUser = q.getMaximumActiveApplicationsPerUser();
     userLimit = q.getUserLimit();
     userLimit = q.getUserLimit();
+    users = new UsersInfo(q.getUsers());
     userLimitFactor = q.getUserLimitFactor();
     userLimitFactor = q.getUserLimitFactor();
   }
   }
 
 
@@ -85,6 +87,11 @@ public class CapacitySchedulerLeafQueueInfo extends CapacitySchedulerQueueInfo {
     return userLimit;
     return userLimit;
   }
   }
 
 
+  //Placing here because of JERSEY-1199
+  public UsersInfo getUsers() {
+    return users;
+  }
+
   public float getUserLimitFactor() {
   public float getUserLimitFactor() {
     return userLimitFactor;
     return userLimitFactor;
   }
   }

+ 6 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java

@@ -48,6 +48,7 @@ public class CapacitySchedulerQueueInfo {
   protected String queueName;
   protected String queueName;
   protected QueueState state;
   protected QueueState state;
   protected CapacitySchedulerQueueInfoList queues;
   protected CapacitySchedulerQueueInfoList queues;
+  protected ResourceInfo resourcesUsed;
 
 
   CapacitySchedulerQueueInfo() {
   CapacitySchedulerQueueInfo() {
   };
   };
@@ -69,6 +70,7 @@ public class CapacitySchedulerQueueInfo {
     usedResources = q.getUsedResources().toString();
     usedResources = q.getUsedResources().toString();
     queueName = q.getQueueName();
     queueName = q.getQueueName();
     state = q.getState();
     state = q.getState();
+    resourcesUsed = new ResourceInfo(q.getUsedResources());
   }
   }
 
 
   public float getCapacity() {
   public float getCapacity() {
@@ -119,6 +121,10 @@ public class CapacitySchedulerQueueInfo {
     return this.queues;
     return this.queues;
   }
   }
 
 
+  public ResourceInfo getResourcesUsed() {
+    return resourcesUsed;
+  }
+
   /**
   /**
    * Limit a value to a specified range.
    * Limit a value to a specified range.
    * @param val the value to be capped
    * @param val the value to be capped

+ 30 - 5
hadoop-common-project/hadoop-common/src/test/ddl/buffer.jr → hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java

@@ -15,9 +15,34 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License.
  * limitations under the License.
  */
  */
-module org.apache.hadoop.record {
-    class RecBuffer {
-        buffer data;
-    }
-}
 
 
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.yarn.api.records.Resource;
+
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.FIELD)
+public class ResourceInfo {
+  int memory;
+  int vCores;
+  
+  public ResourceInfo() { 
+  }
+
+  public ResourceInfo(Resource res) {
+    memory = res.getMemory();
+    vCores = res.getVirtualCores();
+  }
+
+  public int getMemory() {
+    return memory;
+  }
+
+  public int getvCores() {
+    return vCores;
+  }
+}

+ 28 - 5
hadoop-common-project/hadoop-common/src/test/ddl/int.jr → hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/UsersInfo.java

@@ -15,9 +15,32 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License.
  * limitations under the License.
  */
  */
-module org.apache.hadoop.record {
-    class RecInt {
-        int data;
-    }
-}
 
 
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+
+import java.util.ArrayList;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlElement;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.UserInfo;
+
+@XmlRootElement
+@XmlAccessorType(XmlAccessType.FIELD)
+public class UsersInfo {
+  @XmlElement(name="user")
+  protected ArrayList<UserInfo> usersList = new ArrayList<UserInfo>();
+
+  public UsersInfo() {
+  }
+
+  public UsersInfo(ArrayList<UserInfo> usersList) {
+    this.usersList = usersList;
+  }
+
+  public ArrayList<UserInfo> getUsersList() {
+    return usersList;
+  }
+}

+ 40 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java

@@ -18,16 +18,19 @@
 
 
 package org.apache.hadoop.yarn.server.resourcemanager;
 package org.apache.hadoop.yarn.server.resourcemanager;
 
 
+import java.security.PrivilegedAction;
 import java.util.Map;
 import java.util.Map;
 
 
 import junit.framework.Assert;
 import junit.framework.Assert;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.ClientRMProtocol;
 import org.apache.hadoop.yarn.api.ClientRMProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
+import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
@@ -37,6 +40,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent;
 import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
 import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
 import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
@@ -118,21 +122,27 @@ public class MockRM extends ResourceManager {
   }
   }
 
 
   public RMApp submitApp(int masterMemory) throws Exception {
   public RMApp submitApp(int masterMemory) throws Exception {
-    return submitApp(masterMemory, "", "");
+    return submitApp(masterMemory, "", UserGroupInformation.getCurrentUser()
+      .getShortUserName());
   }
   }
 
 
   // client
   // client
   public RMApp submitApp(int masterMemory, String name, String user) throws Exception {
   public RMApp submitApp(int masterMemory, String name, String user) throws Exception {
-    return submitApp(masterMemory, name, user, null, false);
+    return submitApp(masterMemory, name, user, null, false, null);
   }
   }
   
   
   public RMApp submitApp(int masterMemory, String name, String user,
   public RMApp submitApp(int masterMemory, String name, String user,
       Map<ApplicationAccessType, String> acls) throws Exception {
       Map<ApplicationAccessType, String> acls) throws Exception {
-    return submitApp(masterMemory, name, user, acls, false);
+    return submitApp(masterMemory, name, user, acls, false, null);
   }  
   }  
 
 
   public RMApp submitApp(int masterMemory, String name, String user,
   public RMApp submitApp(int masterMemory, String name, String user,
-      Map<ApplicationAccessType, String> acls, boolean unmanaged) throws Exception {
+      Map<ApplicationAccessType, String> acls, String queue) throws Exception {
+    return submitApp(masterMemory, name, user, acls, false, queue);
+  }  
+
+  public RMApp submitApp(int masterMemory, String name, String user,
+      Map<ApplicationAccessType, String> acls, boolean unmanaged, String queue) throws Exception {
     ClientRMProtocol client = getClientRMService();
     ClientRMProtocol client = getClientRMService();
     GetNewApplicationResponse resp = client.getNewApplication(Records
     GetNewApplicationResponse resp = client.getNewApplication(Records
         .newRecord(GetNewApplicationRequest.class));
         .newRecord(GetNewApplicationRequest.class));
@@ -148,6 +158,9 @@ public class MockRM extends ResourceManager {
     if(unmanaged) {
     if(unmanaged) {
       sub.setUnmanagedAM(true);
       sub.setUnmanagedAM(true);
     }
     }
+    if (queue != null) {
+      sub.setQueue(queue);
+    }
     ContainerLaunchContext clc = Records
     ContainerLaunchContext clc = Records
         .newRecord(ContainerLaunchContext.class);
         .newRecord(ContainerLaunchContext.class);
     Resource capability = Records.newRecord(Resource.class);
     Resource capability = Records.newRecord(Resource.class);
@@ -157,7 +170,29 @@ public class MockRM extends ResourceManager {
     sub.setAMContainerSpec(clc);
     sub.setAMContainerSpec(clc);
     req.setApplicationSubmissionContext(sub);
     req.setApplicationSubmissionContext(sub);
 
 
-    client.submitApplication(req);
+    UserGroupInformation fakeUser =
+      UserGroupInformation.createUserForTesting(user, new String[] {"someGroup"});
+    PrivilegedAction<SubmitApplicationResponse> action =
+      new PrivilegedAction<SubmitApplicationResponse>() {
+      ClientRMProtocol client;
+      SubmitApplicationRequest req;
+      @Override
+      public SubmitApplicationResponse run() {
+        try {
+          return client.submitApplication(req);
+        } catch (YarnRemoteException e) {
+          e.printStackTrace();
+        }
+        return null;
+      }
+      PrivilegedAction<SubmitApplicationResponse> setClientReq(
+        ClientRMProtocol client, SubmitApplicationRequest req) {
+        this.client = client;
+        this.req = req;
+        return this;
+      }
+    }.setClientReq(client, req);
+    fakeUser.doAs(action);
     // make sure app is immediately available after submit
     // make sure app is immediately available after submit
     waitForState(appId, RMAppState.ACCEPTED);
     waitForState(appId, RMAppState.ACCEPTED);
     return getRMContext().getRMApps().get(appId);
     return getRMContext().getRMApps().get(appId);

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java

@@ -152,7 +152,7 @@ public class TestRMRestart {
         .getApplicationId());
         .getApplicationId());
     
     
     // create unmanaged app
     // create unmanaged app
-    RMApp appUnmanaged = rm1.submitApp(200, "", "", null, true);
+    RMApp appUnmanaged = rm1.submitApp(200, "someApp", "someUser", null, true, null);
     ApplicationAttemptId unmanagedAttemptId = 
     ApplicationAttemptId unmanagedAttemptId = 
                         appUnmanaged.getCurrentAppAttempt().getAppAttemptId();
                         appUnmanaged.getCurrentAppAttempt().getAppAttemptId();
     // assert appUnmanaged info is saved
     // assert appUnmanaged info is saved

+ 146 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java

@@ -27,10 +27,12 @@ import javax.ws.rs.core.MediaType;
 import javax.xml.parsers.DocumentBuilder;
 import javax.xml.parsers.DocumentBuilder;
 import javax.xml.parsers.DocumentBuilderFactory;
 import javax.xml.parsers.DocumentBuilderFactory;
 
 
+import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
+import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
@@ -44,6 +46,7 @@ import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
 import org.w3c.dom.Document;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 import org.w3c.dom.Element;
+import org.w3c.dom.Node;
 import org.w3c.dom.NodeList;
 import org.w3c.dom.NodeList;
 import org.xml.sax.InputSource;
 import org.xml.sax.InputSource;
 
 
@@ -355,10 +358,10 @@ public class TestRMWebServicesCapacitySched extends JerseyTest {
   private void verifySubQueue(JSONObject info, String q, 
   private void verifySubQueue(JSONObject info, String q, 
       float parentAbsCapacity, float parentAbsMaxCapacity)
       float parentAbsCapacity, float parentAbsMaxCapacity)
       throws JSONException, Exception {
       throws JSONException, Exception {
-    int numExpectedElements = 11;
+    int numExpectedElements = 12;
     boolean isParentQueue = true;
     boolean isParentQueue = true;
     if (!info.has("queues")) {
     if (!info.has("queues")) {
-      numExpectedElements = 20;
+      numExpectedElements = 22;
       isParentQueue = false;
       isParentQueue = false;
     }
     }
     assertEquals("incorrect number of elements", numExpectedElements, info.length());
     assertEquals("incorrect number of elements", numExpectedElements, info.length());
@@ -397,6 +400,8 @@ public class TestRMWebServicesCapacitySched extends JerseyTest {
       lqi.userLimit = info.getInt("userLimit");
       lqi.userLimit = info.getInt("userLimit");
       lqi.userLimitFactor = (float) info.getDouble("userLimitFactor");
       lqi.userLimitFactor = (float) info.getDouble("userLimitFactor");
       verifyLeafQueueGeneric(q, lqi);
       verifyLeafQueueGeneric(q, lqi);
+      // resourcesUsed and users (per-user resources used) are checked in
+      // testPerUserResource()
     }
     }
   }
   }
 
 
@@ -464,4 +469,143 @@ public class TestRMWebServicesCapacitySched extends JerseyTest {
     assertEquals("userLimitFactor doesn't match",
     assertEquals("userLimitFactor doesn't match",
         csConf.getUserLimitFactor(q), info.userLimitFactor, 1e-3f);
         csConf.getUserLimitFactor(q), info.userLimitFactor, 1e-3f);
   }
   }
+
+  //Return a child Node of node with the tagname or null if none exists 
+  private Node getChildNodeByName(Node node, String tagname) {
+    NodeList nodeList = node.getChildNodes();
+    for (int i=0; i < nodeList.getLength(); ++i) {
+      if (nodeList.item(i).getNodeName().equals(tagname)) {
+        return nodeList.item(i);
+      }
+    }
+    return null;
+  }
+
+  /**
+   * Test per user resources and resourcesUsed elements in the web services XML
+   * @throws Exception
+   */
+  @Test
+  public void testPerUserResourcesXML() throws Exception {
+    //Start RM so that it accepts app submissions
+    rm.start();
+    try {
+      rm.submitApp(10, "app1", "user1", null, "b1");
+      rm.submitApp(20, "app2", "user2", null, "b1");
+
+      //Get the XML from ws/v1/cluster/scheduler
+      WebResource r = resource();
+      ClientResponse response = r.path("ws/v1/cluster/scheduler")
+        .accept(MediaType.APPLICATION_XML).get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_XML_TYPE, response.getType());
+      String xml = response.getEntity(String.class);
+      DocumentBuilder db = DocumentBuilderFactory.newInstance()
+        .newDocumentBuilder();
+      InputSource is = new InputSource();
+      is.setCharacterStream(new StringReader(xml));
+      //Parse the XML we got
+      Document dom = db.parse(is);
+
+      //Get all users elements (1 for each leaf queue)
+      NodeList allUsers = dom.getElementsByTagName("users");
+      for (int i=0; i<allUsers.getLength(); ++i) {
+        Node perUserResources = allUsers.item(i);
+        String queueName = getChildNodeByName(perUserResources
+          .getParentNode(), "queueName").getTextContent();
+        if (queueName.equals("b1")) {
+          //b1 should have two users (user1 and user2) which submitted jobs
+          assertEquals(2, perUserResources.getChildNodes().getLength());
+          NodeList users = perUserResources.getChildNodes();
+          for (int j=0; j<users.getLength(); ++j) {
+            Node user = users.item(j);
+            String username = getChildNodeByName(user, "username")
+              .getTextContent(); 
+            assertTrue(username.equals("user1") || username.equals("user2"));
+            //Should be a parsable integer
+            Integer.parseInt(getChildNodeByName(getChildNodeByName(user,
+              "resourcesUsed"), "memory").getTextContent());
+            Integer.parseInt(getChildNodeByName(user, "numActiveApplications")
+              .getTextContent());
+            Integer.parseInt(getChildNodeByName(user, "numPendingApplications")
+                .getTextContent());
+          }
+        } else {
+        //Queues other than b1 should have 0 users
+          assertEquals(0, perUserResources.getChildNodes().getLength());
+        }
+      }
+      NodeList allResourcesUsed = dom.getElementsByTagName("resourcesUsed");
+      for (int i=0; i<allResourcesUsed.getLength(); ++i) {
+        Node resourcesUsed = allResourcesUsed.item(i);
+        Integer.parseInt(getChildNodeByName(resourcesUsed, "memory")
+            .getTextContent());
+        Integer.parseInt(getChildNodeByName(resourcesUsed, "vCores")
+              .getTextContent());
+      }
+    } finally {
+      rm.stop();
+    }
+  }
+
+  private void checkResourcesUsed(JSONObject queue) throws JSONException {
+    queue.getJSONObject("resourcesUsed").getInt("memory");
+    queue.getJSONObject("resourcesUsed").getInt("vCores");
+  }
+
+  //Also checks resourcesUsed
+  private JSONObject getSubQueue(JSONObject queue, String subQueue)
+    throws JSONException {
+    JSONArray queues = queue.getJSONObject("queues").getJSONArray("queue");
+    for (int i=0; i<queues.length(); ++i) {
+      checkResourcesUsed(queues.getJSONObject(i));
+      if (queues.getJSONObject(i).getString("queueName").equals(subQueue) ) {
+        return queues.getJSONObject(i);
+      }
+    }
+    return null;
+  }
+
+  @Test
+  public void testPerUserResourcesJSON() throws Exception {
+    //Start RM so that it accepts app submissions
+    rm.start();
+    try {
+      rm.submitApp(10, "app1", "user1", null, "b1");
+      rm.submitApp(20, "app2", "user2", null, "b1");
+
+      //Get JSON
+      WebResource r = resource();
+      ClientResponse response = r.path("ws").path("v1").path("cluster")
+          .path("scheduler/").accept(MediaType.APPLICATION_JSON)
+          .get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject json = response.getEntity(JSONObject.class);
+
+      JSONObject schedulerInfo = json.getJSONObject("scheduler").getJSONObject(
+        "schedulerInfo");
+      JSONObject b1 = getSubQueue(getSubQueue(schedulerInfo, "b"), "b1");
+      //Check users user1 and user2 exist in b1
+      JSONArray users = b1.getJSONObject("users").getJSONArray("user");
+      for (int i=0; i<2; ++i) {
+        JSONObject user = users.getJSONObject(i);
+        assertTrue("User isn't user1 or user2",user.getString("username")
+          .equals("user1") || user.getString("username").equals("user2"));
+        user.getInt("numActiveApplications");
+        user.getInt("numPendingApplications");
+        checkResourcesUsed(user);
+      }
+    } finally {
+      rm.stop();
+    }
+  }
+
+
+  @Test
+  public void testResourceInfo() {
+    Resource res = Resources.createResource(10, 1);
+    // If we add a new resource (e.g disks), then
+    // CapacitySchedulerPage and these RM WebServices + docs need to be updated
+    // eg. ResourceInfo
+    assertEquals("<memory:10, vCores:1>", res.toString());
+  }
 }
 }

+ 422 - 270
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm

@@ -378,6 +378,8 @@ ResourceManager REST API's.
 *---------------+--------------+-------------------------------+
 *---------------+--------------+-------------------------------+
 | queues | array of queues(JSON)/zero or more queue objects(XML) | A collection of sub-queue information|
 | queues | array of queues(JSON)/zero or more queue objects(XML) | A collection of sub-queue information|
 *---------------+--------------+-------------------------------+
 *---------------+--------------+-------------------------------+
+| resourcesUsed | A single resource object | The total amount of resources used by this queue |
+*---------------+--------------+-------------------------------+
 
 
 ** Elements of the queues object for a Leaf queue - contains all elements in parent plus the following:
 ** Elements of the queues object for a Leaf queue - contains all elements in parent plus the following:
 
 
@@ -404,6 +406,32 @@ ResourceManager REST API's.
 *---------------+--------------+-------------------------------+
 *---------------+--------------+-------------------------------+
 | userLimitFactor | float | The user limit factor set in the configuration |
 | userLimitFactor | float | The user limit factor set in the configuration |
 *---------------+--------------+-------------------------------+
 *---------------+--------------+-------------------------------+
+| users | array of users(JSON)/zero or more user objects(XML) | A collection of user objects containing resources used |
+*---------------+--------------+-------------------------------+
+
+** Elements of the user object for users:
+
+*---------------+--------------+-------------------------------+
+|| Item         || Data Type   || Description                   |
+*---------------+--------------+-------------------------------+
+| username | String | The username of the user using the resources |
+*---------------+--------------+-------------------------------+
+| resourcesUsed | A single resource object | The amount of resources used by the user in this queue |
+*---------------+--------------+-------------------------------+
+| numActiveApplications | int | The number of active applications for this user in this queue |
+*---------------+--------------+-------------------------------+
+| numPendingApplications | int | The number of pending applications for this user in this queue |
+*---------------+--------------+-------------------------------+
+
+** Elements of the resource object for resourcesUsed in user and queues:
+
+*---------------+--------------+-------------------------------+
+|| Item         || Data Type   || Description                   |
+*---------------+--------------+-------------------------------+
+| memory | int | The amount of memory used (in MB) |
+*---------------+--------------+-------------------------------+
+| vCores | int | The number of virtual cores |
+*---------------+--------------+-------------------------------+
 
 
 *** Response Examples
 *** Response Examples
 
 
@@ -428,199 +456,262 @@ ResourceManager REST API's.
 
 
 +---+
 +---+
 {
 {
-   "scheduler" : {
-      "schedulerInfo" : {
-         "queueName" : "root",
-         "maxCapacity" : 100,
-         "type" : "capacityScheduler",
-         "queues" : {
-            "queue" : [
-               {
-                  "numPendingApplications" : 0,
-                  "queueName" : "default",
-                  "userLimitFactor" : 1,
-                  "maxApplications" : 1,
-                  "usedCapacity" : 0,
-                  "numContainers" : 0,
-                  "state" : "RUNNING",
-                  "maxCapacity" : 90,
-                  "numApplications" : 0,
-                  "usedResources" : "memory: 0",
-                  "absoluteMaxCapacity" : 90,
-                  "maxActiveApplications" : 1,
-                  "numActiveApplications" : 0,
-                  "absoluteUsedCapacity" : 0,
-                  "userLimit" : 100,
-                  "absoluteCapacity" : 70,
-                  "maxActiveApplicationsPerUser" : 1,
-                  "capacity" : 70,
-                  "type" : "capacitySchedulerLeafQueueInfo",
-                  "maxApplicationsPerUser" : 1
-               },
-               {
-                  "queueName" : "test",
-                  "absoluteCapacity" : 20,
-                  "usedCapacity" : 0,
-                  "capacity" : 20,
-                  "state" : "RUNNING",
-                  "maxCapacity" : 100,
-                  "numApplications" : 0,
-                  "usedResources" : "memory: 0",
-                  "absoluteMaxCapacity" : 100,
-                  "queues" : {
-                     "queue" : [
-                        {
-                           "queueName" : "a1",
-                           "absoluteCapacity" : 12,
-                           "usedCapacity" : 0,
-                           "capacity" : 60.000004,
-                           "state" : "RUNNING",
-                           "maxCapacity" : 100,
-                           "numApplications" : 0,
-                           "usedResources" : "memory: 0",
-                           "absoluteMaxCapacity" : 100,
-                           "queues" : {
-                              "queue" : [
-                                 {
-                                    "numPendingApplications" : 0,
-                                    "queueName" : "a11",
-                                    "userLimitFactor" : 1,
-                                    "maxApplications" : 0,
-                                    "usedCapacity" : 0,
-                                    "numContainers" : 0,
-                                    "state" : "RUNNING",
-                                    "maxCapacity" : 100,
-                                    "numApplications" : 0,
-                                    "usedResources" : "memory: 0",
-                                    "absoluteMaxCapacity" : 100,
-                                    "maxActiveApplications" : 1,
-                                    "numActiveApplications" : 0,
-                                    "absoluteUsedCapacity" : 0,
-                                    "userLimit" : 100,
-                                    "absoluteCapacity" : 10.200001,
-                                    "maxActiveApplicationsPerUser" : 1,
-                                    "capacity" : 85,
-                                    "type" : "capacitySchedulerLeafQueueInfo",
-                                    "maxApplicationsPerUser" : 0
-                                 },
-                                 {
-                                    "numPendingApplications" : 0,
-                                    "queueName" : "a12",
-                                    "userLimitFactor" : 1,
-                                    "maxApplications" : 0,
-                                    "usedCapacity" : 0,
-                                    "numContainers" : 0,
-                                    "state" : "RUNNING",
-                                    "maxCapacity" : 100,
-                                    "numApplications" : 0,
-                                    "usedResources" : "memory: 0",
-                                    "absoluteMaxCapacity" : 100,
-                                    "maxActiveApplications" : 1,
-                                    "numActiveApplications" : 0,
-                                    "absoluteUsedCapacity" : 0,
-                                    "userLimit" : 100,
-                                    "absoluteCapacity" : 1.8000001,
-                                    "maxActiveApplicationsPerUser" : 1,
-                                    "capacity" : 15.000001,
-                                    "type" : "capacitySchedulerLeafQueueInfo",
-                                    "maxApplicationsPerUser" : 0
-                                 }
-                              ]
-                           },
-                           "absoluteUsedCapacity" : 0
-                        },
-                        {
-                           "numPendingApplications" : 0,
-                           "queueName" : "a2",
-                           "userLimitFactor" : 1,
-                           "maxApplications" : 0,
-                           "usedCapacity" : 0,
-                           "numContainers" : 0,
-                           "state" : "RUNNING",
-                           "maxCapacity" : 100,
-                           "numApplications" : 0,
-                           "usedResources" : "memory: 0",
-                           "absoluteMaxCapacity" : 100,
-                           "maxActiveApplications" : 1,
-                           "numActiveApplications" : 0,
-                           "absoluteUsedCapacity" : 0,
-                           "userLimit" : 100,
-                           "absoluteCapacity" : 8.000001,
-                           "maxActiveApplicationsPerUser" : 1,
-                           "capacity" : 40,
-                           "type" : "capacitySchedulerLeafQueueInfo",
-                           "maxApplicationsPerUser" : 0
-                        }
-                     ]
-                  },
-                  "absoluteUsedCapacity" : 0
-               },
-               {
-                  "queueName" : "test2",
-                  "absoluteCapacity" : 10,
-                  "usedCapacity" : 0,
-                  "capacity" : 10,
-                  "state" : "RUNNING",
-                  "maxCapacity" : 15.000001,
-                  "numApplications" : 0,
-                  "usedResources" : "memory: 0",
-                  "absoluteMaxCapacity" : 15.000001,
-                  "queues" : {
-                     "queue" : [
-                        {
-                           "numPendingApplications" : 0,
-                           "queueName" : "a3",
-                           "userLimitFactor" : 1,
-                           "maxApplications" : 0,
-                           "usedCapacity" : 0,
-                           "numContainers" : 0,
-                           "state" : "RUNNING",
-                           "maxCapacity" : 100,
-                           "numApplications" : 0,
-                           "usedResources" : "memory: 0",
-                           "absoluteMaxCapacity" : 15.000001,
-                           "maxActiveApplications" : 1,
-                           "numActiveApplications" : 0,
-                           "absoluteUsedCapacity" : 0,
-                           "userLimit" : 100,
-                           "absoluteCapacity" : 9,
-                           "maxActiveApplicationsPerUser" : 1,
-                           "capacity" : 90,
-                           "type" : "capacitySchedulerLeafQueueInfo",
-                           "maxApplicationsPerUser" : 0
-                        },
-                        {
-                           "numPendingApplications" : 0,
-                           "queueName" : "a4",
-                           "userLimitFactor" : 1,
-                           "maxApplications" : 0,
-                           "usedCapacity" : 0,
-                           "numContainers" : 0,
-                           "state" : "RUNNING",
-                           "maxCapacity" : 100,
-                           "numApplications" : 0,
-                           "usedResources" : "memory: 0",
-                           "absoluteMaxCapacity" : 15.000001,
-                           "maxActiveApplications" : 1,
-                           "numActiveApplications" : 0,
-                           "absoluteUsedCapacity" : 0,
-                           "userLimit" : 100,
-                           "absoluteCapacity" : 1.0000001,
-                           "maxActiveApplicationsPerUser" : 1,
-                           "capacity" : 10,
-                           "type" : "capacitySchedulerLeafQueueInfo",
-                           "maxApplicationsPerUser" : 0
-                        }
-                     ]
-                  },
-                  "absoluteUsedCapacity" : 0
-               }
-            ]
-         },
-         "usedCapacity" : 0,
-         "capacity" : 100
-      }
-   }
+    "scheduler": {
+        "schedulerInfo": {
+            "capacity": 100.0, 
+            "maxCapacity": 100.0, 
+            "queueName": "root", 
+            "queues": {
+                "queue": [
+                    {
+                        "absoluteCapacity": 10.5, 
+                        "absoluteMaxCapacity": 50.0, 
+                        "absoluteUsedCapacity": 0.0, 
+                        "capacity": 10.5, 
+                        "maxCapacity": 50.0, 
+                        "numApplications": 0, 
+                        "queueName": "a", 
+                        "queues": {
+                            "queue": [
+                                {
+                                    "absoluteCapacity": 3.15, 
+                                    "absoluteMaxCapacity": 25.0, 
+                                    "absoluteUsedCapacity": 0.0, 
+                                    "capacity": 30.000002, 
+                                    "maxCapacity": 50.0, 
+                                    "numApplications": 0, 
+                                    "queueName": "a1", 
+                                    "queues": {
+                                        "queue": [
+                                            {
+                                                "absoluteCapacity": 2.6775, 
+                                                "absoluteMaxCapacity": 25.0, 
+                                                "absoluteUsedCapacity": 0.0, 
+                                                "capacity": 85.0, 
+                                                "maxActiveApplications": 1, 
+                                                "maxActiveApplicationsPerUser": 1, 
+                                                "maxApplications": 267, 
+                                                "maxApplicationsPerUser": 267, 
+                                                "maxCapacity": 100.0, 
+                                                "numActiveApplications": 0, 
+                                                "numApplications": 0, 
+                                                "numContainers": 0, 
+                                                "numPendingApplications": 0, 
+                                                "queueName": "a1a", 
+                                                "resourcesUsed": {
+                                                    "memory": 0, 
+                                                    "vCores": 0
+                                                }, 
+                                                "state": "RUNNING", 
+                                                "type": "capacitySchedulerLeafQueueInfo", 
+                                                "usedCapacity": 0.0, 
+                                                "usedResources": "<memory:0, vCores:0>", 
+                                                "userLimit": 100, 
+                                                "userLimitFactor": 1.0, 
+                                                "users": null
+                                            }, 
+                                            {
+                                                "absoluteCapacity": 0.47250003, 
+                                                "absoluteMaxCapacity": 25.0, 
+                                                "absoluteUsedCapacity": 0.0, 
+                                                "capacity": 15.000001, 
+                                                "maxActiveApplications": 1, 
+                                                "maxActiveApplicationsPerUser": 1, 
+                                                "maxApplications": 47, 
+                                                "maxApplicationsPerUser": 47, 
+                                                "maxCapacity": 100.0, 
+                                                "numActiveApplications": 0, 
+                                                "numApplications": 0, 
+                                                "numContainers": 0, 
+                                                "numPendingApplications": 0, 
+                                                "queueName": "a1b", 
+                                                "resourcesUsed": {
+                                                    "memory": 0, 
+                                                    "vCores": 0
+                                                }, 
+                                                "state": "RUNNING", 
+                                                "type": "capacitySchedulerLeafQueueInfo", 
+                                                "usedCapacity": 0.0, 
+                                                "usedResources": "<memory:0, vCores:0>", 
+                                                "userLimit": 100, 
+                                                "userLimitFactor": 1.0, 
+                                                "users": null
+                                            }
+                                        ]
+                                    }, 
+                                    "resourcesUsed": {
+                                        "memory": 0, 
+                                        "vCores": 0
+                                    }, 
+                                    "state": "RUNNING", 
+                                    "usedCapacity": 0.0, 
+                                    "usedResources": "<memory:0, vCores:0>"
+                                }, 
+                                {
+                                    "absoluteCapacity": 7.35, 
+                                    "absoluteMaxCapacity": 50.0, 
+                                    "absoluteUsedCapacity": 0.0, 
+                                    "capacity": 70.0, 
+                                    "maxActiveApplications": 1, 
+                                    "maxActiveApplicationsPerUser": 100, 
+                                    "maxApplications": 735, 
+                                    "maxApplicationsPerUser": 73500, 
+                                    "maxCapacity": 100.0, 
+                                    "numActiveApplications": 0, 
+                                    "numApplications": 0, 
+                                    "numContainers": 0, 
+                                    "numPendingApplications": 0, 
+                                    "queueName": "a2", 
+                                    "resourcesUsed": {
+                                        "memory": 0, 
+                                        "vCores": 0
+                                    }, 
+                                    "state": "RUNNING", 
+                                    "type": "capacitySchedulerLeafQueueInfo", 
+                                    "usedCapacity": 0.0, 
+                                    "usedResources": "<memory:0, vCores:0>", 
+                                    "userLimit": 100, 
+                                    "userLimitFactor": 100.0, 
+                                    "users": null
+                                }
+                            ]
+                        }, 
+                        "resourcesUsed": {
+                            "memory": 0, 
+                            "vCores": 0
+                        }, 
+                        "state": "RUNNING", 
+                        "usedCapacity": 0.0, 
+                        "usedResources": "<memory:0, vCores:0>"
+                    }, 
+                    {
+                        "absoluteCapacity": 89.5, 
+                        "absoluteMaxCapacity": 100.0, 
+                        "absoluteUsedCapacity": 0.0, 
+                        "capacity": 89.5, 
+                        "maxCapacity": 100.0, 
+                        "numApplications": 2, 
+                        "queueName": "b", 
+                        "queues": {
+                            "queue": [
+                                {
+                                    "absoluteCapacity": 53.7, 
+                                    "absoluteMaxCapacity": 100.0, 
+                                    "absoluteUsedCapacity": 0.0, 
+                                    "capacity": 60.000004, 
+                                    "maxActiveApplications": 1, 
+                                    "maxActiveApplicationsPerUser": 100, 
+                                    "maxApplications": 5370, 
+                                    "maxApplicationsPerUser": 537000, 
+                                    "maxCapacity": 100.0, 
+                                    "numActiveApplications": 1, 
+                                    "numApplications": 2, 
+                                    "numContainers": 0, 
+                                    "numPendingApplications": 1, 
+                                    "queueName": "b1", 
+                                    "resourcesUsed": {
+                                        "memory": 0, 
+                                        "vCores": 0
+                                    }, 
+                                    "state": "RUNNING", 
+                                    "type": "capacitySchedulerLeafQueueInfo", 
+                                    "usedCapacity": 0.0, 
+                                    "usedResources": "<memory:0, vCores:0>", 
+                                    "userLimit": 100, 
+                                    "userLimitFactor": 100.0, 
+                                    "users": {
+                                        "user": [
+                                            {
+                                                "numActiveApplications": 0, 
+                                                "numPendingApplications": 1, 
+                                                "resourcesUsed": {
+                                                    "memory": 0, 
+                                                    "vCores": 0
+                                                }, 
+                                                "username": "user2"
+                                            }, 
+                                            {
+                                                "numActiveApplications": 1, 
+                                                "numPendingApplications": 0, 
+                                                "resourcesUsed": {
+                                                    "memory": 0, 
+                                                    "vCores": 0
+                                                }, 
+                                                "username": "user1"
+                                            }
+                                        ]
+                                    }
+                                }, 
+                                {
+                                    "absoluteCapacity": 35.3525, 
+                                    "absoluteMaxCapacity": 100.0, 
+                                    "absoluteUsedCapacity": 0.0, 
+                                    "capacity": 39.5, 
+                                    "maxActiveApplications": 1, 
+                                    "maxActiveApplicationsPerUser": 100, 
+                                    "maxApplications": 3535, 
+                                    "maxApplicationsPerUser": 353500, 
+                                    "maxCapacity": 100.0, 
+                                    "numActiveApplications": 0, 
+                                    "numApplications": 0, 
+                                    "numContainers": 0, 
+                                    "numPendingApplications": 0, 
+                                    "queueName": "b2", 
+                                    "resourcesUsed": {
+                                        "memory": 0, 
+                                        "vCores": 0
+                                    }, 
+                                    "state": "RUNNING", 
+                                    "type": "capacitySchedulerLeafQueueInfo", 
+                                    "usedCapacity": 0.0, 
+                                    "usedResources": "<memory:0, vCores:0>", 
+                                    "userLimit": 100, 
+                                    "userLimitFactor": 100.0, 
+                                    "users": null
+                                }, 
+                                {
+                                    "absoluteCapacity": 0.4475, 
+                                    "absoluteMaxCapacity": 100.0, 
+                                    "absoluteUsedCapacity": 0.0, 
+                                    "capacity": 0.5, 
+                                    "maxActiveApplications": 1, 
+                                    "maxActiveApplicationsPerUser": 100, 
+                                    "maxApplications": 44, 
+                                    "maxApplicationsPerUser": 4400, 
+                                    "maxCapacity": 100.0, 
+                                    "numActiveApplications": 0, 
+                                    "numApplications": 0, 
+                                    "numContainers": 0, 
+                                    "numPendingApplications": 0, 
+                                    "queueName": "b3", 
+                                    "resourcesUsed": {
+                                        "memory": 0, 
+                                        "vCores": 0
+                                    }, 
+                                    "state": "RUNNING", 
+                                    "type": "capacitySchedulerLeafQueueInfo", 
+                                    "usedCapacity": 0.0, 
+                                    "usedResources": "<memory:0, vCores:0>", 
+                                    "userLimit": 100, 
+                                    "userLimitFactor": 100.0, 
+                                    "users": null
+                                }
+                            ]
+                        }, 
+                        "resourcesUsed": {
+                            "memory": 0, 
+                            "vCores": 0
+                        }, 
+                        "state": "RUNNING", 
+                        "usedCapacity": 0.0, 
+                        "usedResources": "<memory:0, vCores:0>"
+                    }
+                ]
+            }, 
+            "type": "capacityScheduler", 
+            "usedCapacity": 0.0
+        }
+    }
 }
 }
 +---+
 +---+
 
 
@@ -653,48 +744,27 @@ ResourceManager REST API's.
     <maxCapacity>100.0</maxCapacity>
     <maxCapacity>100.0</maxCapacity>
     <queueName>root</queueName>
     <queueName>root</queueName>
     <queues>
     <queues>
-      <queue xsi:type="capacitySchedulerLeafQueueInfo">
-        <capacity>70.0</capacity>
-        <usedCapacity>0.0</usedCapacity>
-        <maxCapacity>90.0</maxCapacity>
-        <absoluteCapacity>70.0</absoluteCapacity>
-        <absoluteMaxCapacity>90.0</absoluteMaxCapacity>
-        <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
-        <numApplications>0</numApplications>
-        <usedResources>memory: 0</usedResources>
-        <queueName>default</queueName>
-        <state>RUNNING</state>
-        <numActiveApplications>0</numActiveApplications>
-        <numPendingApplications>0</numPendingApplications>
-        <numContainers>0</numContainers>
-        <maxApplications>1</maxApplications>
-        <maxApplicationsPerUser>1</maxApplicationsPerUser>
-        <maxActiveApplications>1</maxActiveApplications>
-        <maxActiveApplicationsPerUser>1</maxActiveApplicationsPerUser>
-        <userLimit>100</userLimit>
-        <userLimitFactor>1.0</userLimitFactor>
-      </queue>
       <queue>
       <queue>
-        <capacity>20.0</capacity>
+        <capacity>10.5</capacity>
         <usedCapacity>0.0</usedCapacity>
         <usedCapacity>0.0</usedCapacity>
-        <maxCapacity>100.0</maxCapacity>
-        <absoluteCapacity>20.0</absoluteCapacity>
-        <absoluteMaxCapacity>100.0</absoluteMaxCapacity>
+        <maxCapacity>50.0</maxCapacity>
+        <absoluteCapacity>10.5</absoluteCapacity>
+        <absoluteMaxCapacity>50.0</absoluteMaxCapacity>
         <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
         <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
         <numApplications>0</numApplications>
         <numApplications>0</numApplications>
-        <usedResources>memory: 0</usedResources>
-        <queueName>test</queueName>
+        <usedResources>&lt;memory:0, vCores:0&gt;</usedResources>
+        <queueName>a</queueName>
         <state>RUNNING</state>
         <state>RUNNING</state>
         <queues>
         <queues>
           <queue>
           <queue>
-            <capacity>60.000004</capacity>
+            <capacity>30.000002</capacity>
             <usedCapacity>0.0</usedCapacity>
             <usedCapacity>0.0</usedCapacity>
-            <maxCapacity>100.0</maxCapacity>
-            <absoluteCapacity>12.0</absoluteCapacity>
-            <absoluteMaxCapacity>100.0</absoluteMaxCapacity>
+            <maxCapacity>50.0</maxCapacity>
+            <absoluteCapacity>3.15</absoluteCapacity>
+            <absoluteMaxCapacity>25.0</absoluteMaxCapacity>
             <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
             <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
             <numApplications>0</numApplications>
             <numApplications>0</numApplications>
-            <usedResources>memory: 0</usedResources>
+            <usedResources>&lt;memory:0, vCores:0&gt;</usedResources>
             <queueName>a1</queueName>
             <queueName>a1</queueName>
             <state>RUNNING</state>
             <state>RUNNING</state>
             <queues>
             <queues>
@@ -702,124 +772,206 @@ ResourceManager REST API's.
                 <capacity>85.0</capacity>
                 <capacity>85.0</capacity>
                 <usedCapacity>0.0</usedCapacity>
                 <usedCapacity>0.0</usedCapacity>
                 <maxCapacity>100.0</maxCapacity>
                 <maxCapacity>100.0</maxCapacity>
-                <absoluteCapacity>10.200001</absoluteCapacity>
-                <absoluteMaxCapacity>100.0</absoluteMaxCapacity>
+                <absoluteCapacity>2.6775</absoluteCapacity>
+                <absoluteMaxCapacity>25.0</absoluteMaxCapacity>
                 <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
                 <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
                 <numApplications>0</numApplications>
                 <numApplications>0</numApplications>
-                <usedResources>memory: 0</usedResources>
-                <queueName>a11</queueName>
+                <usedResources>&lt;memory:0, vCores:0&gt;</usedResources>
+                <queueName>a1a</queueName>
                 <state>RUNNING</state>
                 <state>RUNNING</state>
+                <resourcesUsed>
+                  <memory>0</memory>
+                  <vCores>0</vCores>
+                </resourcesUsed>
                 <numActiveApplications>0</numActiveApplications>
                 <numActiveApplications>0</numActiveApplications>
                 <numPendingApplications>0</numPendingApplications>
                 <numPendingApplications>0</numPendingApplications>
                 <numContainers>0</numContainers>
                 <numContainers>0</numContainers>
-                <maxApplications>0</maxApplications>
-                <maxApplicationsPerUser>0</maxApplicationsPerUser>
+                <maxApplications>267</maxApplications>
+                <maxApplicationsPerUser>267</maxApplicationsPerUser>
                 <maxActiveApplications>1</maxActiveApplications>
                 <maxActiveApplications>1</maxActiveApplications>
                 <maxActiveApplicationsPerUser>1</maxActiveApplicationsPerUser>
                 <maxActiveApplicationsPerUser>1</maxActiveApplicationsPerUser>
                 <userLimit>100</userLimit>
                 <userLimit>100</userLimit>
+                <users/>
                 <userLimitFactor>1.0</userLimitFactor>
                 <userLimitFactor>1.0</userLimitFactor>
               </queue>
               </queue>
               <queue xsi:type="capacitySchedulerLeafQueueInfo">
               <queue xsi:type="capacitySchedulerLeafQueueInfo">
                 <capacity>15.000001</capacity>
                 <capacity>15.000001</capacity>
                 <usedCapacity>0.0</usedCapacity>
                 <usedCapacity>0.0</usedCapacity>
                 <maxCapacity>100.0</maxCapacity>
                 <maxCapacity>100.0</maxCapacity>
-                <absoluteCapacity>1.8000001</absoluteCapacity>
-                <absoluteMaxCapacity>100.0</absoluteMaxCapacity>
+                <absoluteCapacity>0.47250003</absoluteCapacity>
+                <absoluteMaxCapacity>25.0</absoluteMaxCapacity>
                 <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
                 <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
                 <numApplications>0</numApplications>
                 <numApplications>0</numApplications>
-                <usedResources>memory: 0</usedResources>
-                <queueName>a12</queueName>
+                <usedResources>&lt;memory:0, vCores:0&gt;</usedResources>
+                <queueName>a1b</queueName>
                 <state>RUNNING</state>
                 <state>RUNNING</state>
+                <resourcesUsed>
+                  <memory>0</memory>
+                  <vCores>0</vCores>
+                </resourcesUsed>
                 <numActiveApplications>0</numActiveApplications>
                 <numActiveApplications>0</numActiveApplications>
                 <numPendingApplications>0</numPendingApplications>
                 <numPendingApplications>0</numPendingApplications>
                 <numContainers>0</numContainers>
                 <numContainers>0</numContainers>
-                <maxApplications>0</maxApplications>
-                <maxApplicationsPerUser>0</maxApplicationsPerUser>
+                <maxApplications>47</maxApplications>
+                <maxApplicationsPerUser>47</maxApplicationsPerUser>
                 <maxActiveApplications>1</maxActiveApplications>
                 <maxActiveApplications>1</maxActiveApplications>
                 <maxActiveApplicationsPerUser>1</maxActiveApplicationsPerUser>
                 <maxActiveApplicationsPerUser>1</maxActiveApplicationsPerUser>
                 <userLimit>100</userLimit>
                 <userLimit>100</userLimit>
+                <users/>
                 <userLimitFactor>1.0</userLimitFactor>
                 <userLimitFactor>1.0</userLimitFactor>
               </queue>
               </queue>
             </queues>
             </queues>
+            <resourcesUsed>
+              <memory>0</memory>
+              <vCores>0</vCores>
+            </resourcesUsed>
           </queue>
           </queue>
           <queue xsi:type="capacitySchedulerLeafQueueInfo">
           <queue xsi:type="capacitySchedulerLeafQueueInfo">
-            <capacity>40.0</capacity>
+            <capacity>70.0</capacity>
             <usedCapacity>0.0</usedCapacity>
             <usedCapacity>0.0</usedCapacity>
             <maxCapacity>100.0</maxCapacity>
             <maxCapacity>100.0</maxCapacity>
-            <absoluteCapacity>8.000001</absoluteCapacity>
-            <absoluteMaxCapacity>100.0</absoluteMaxCapacity>
+            <absoluteCapacity>7.35</absoluteCapacity>
+            <absoluteMaxCapacity>50.0</absoluteMaxCapacity>
             <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
             <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
             <numApplications>0</numApplications>
             <numApplications>0</numApplications>
-            <usedResources>memory: 0</usedResources>
+            <usedResources>&lt;memory:0, vCores:0&gt;</usedResources>
             <queueName>a2</queueName>
             <queueName>a2</queueName>
             <state>RUNNING</state>
             <state>RUNNING</state>
+            <resourcesUsed>
+              <memory>0</memory>
+              <vCores>0</vCores>
+            </resourcesUsed>
             <numActiveApplications>0</numActiveApplications>
             <numActiveApplications>0</numActiveApplications>
             <numPendingApplications>0</numPendingApplications>
             <numPendingApplications>0</numPendingApplications>
             <numContainers>0</numContainers>
             <numContainers>0</numContainers>
-            <maxApplications>0</maxApplications>
-            <maxApplicationsPerUser>0</maxApplicationsPerUser>
+            <maxApplications>735</maxApplications>
+            <maxApplicationsPerUser>73500</maxApplicationsPerUser>
             <maxActiveApplications>1</maxActiveApplications>
             <maxActiveApplications>1</maxActiveApplications>
-            <maxActiveApplicationsPerUser>1</maxActiveApplicationsPerUser>
+            <maxActiveApplicationsPerUser>100</maxActiveApplicationsPerUser>
             <userLimit>100</userLimit>
             <userLimit>100</userLimit>
-            <userLimitFactor>1.0</userLimitFactor>
+            <users/>
+            <userLimitFactor>100.0</userLimitFactor>
           </queue>
           </queue>
         </queues>
         </queues>
+        <resourcesUsed>
+          <memory>0</memory>
+          <vCores>0</vCores>
+        </resourcesUsed>
       </queue>
       </queue>
       <queue>
       <queue>
-        <capacity>10.0</capacity>
+        <capacity>89.5</capacity>
         <usedCapacity>0.0</usedCapacity>
         <usedCapacity>0.0</usedCapacity>
-        <maxCapacity>15.000001</maxCapacity>
-        <absoluteCapacity>10.0</absoluteCapacity>
-        <absoluteMaxCapacity>15.000001</absoluteMaxCapacity>
+        <maxCapacity>100.0</maxCapacity>
+        <absoluteCapacity>89.5</absoluteCapacity>
+        <absoluteMaxCapacity>100.0</absoluteMaxCapacity>
         <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
         <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
-        <numApplications>0</numApplications>
-        <usedResources>memory: 0</usedResources>
-        <queueName>test2</queueName>
+        <numApplications>2</numApplications>
+        <usedResources>&lt;memory:0, vCores:0&gt;</usedResources>
+        <queueName>b</queueName>
         <state>RUNNING</state>
         <state>RUNNING</state>
         <queues>
         <queues>
           <queue xsi:type="capacitySchedulerLeafQueueInfo">
           <queue xsi:type="capacitySchedulerLeafQueueInfo">
-            <capacity>90.0</capacity>
+            <capacity>60.000004</capacity>
             <usedCapacity>0.0</usedCapacity>
             <usedCapacity>0.0</usedCapacity>
             <maxCapacity>100.0</maxCapacity>
             <maxCapacity>100.0</maxCapacity>
-            <absoluteCapacity>9.0</absoluteCapacity>
-            <absoluteMaxCapacity>15.000001</absoluteMaxCapacity>
+            <absoluteCapacity>53.7</absoluteCapacity>
+            <absoluteMaxCapacity>100.0</absoluteMaxCapacity>
+            <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
+            <numApplications>2</numApplications>
+            <usedResources>&lt;memory:0, vCores:0&gt;</usedResources>
+            <queueName>b1</queueName>
+            <state>RUNNING</state>
+            <resourcesUsed>
+              <memory>0</memory>
+              <vCores>0</vCores>
+            </resourcesUsed>
+            <numActiveApplications>1</numActiveApplications>
+            <numPendingApplications>1</numPendingApplications>
+            <numContainers>0</numContainers>
+            <maxApplications>5370</maxApplications>
+            <maxApplicationsPerUser>537000</maxApplicationsPerUser>
+            <maxActiveApplications>1</maxActiveApplications>
+            <maxActiveApplicationsPerUser>100</maxActiveApplicationsPerUser>
+            <userLimit>100</userLimit>
+            <users>
+              <user>
+                <username>user2</username>
+                <resourcesUsed>
+                  <memory>0</memory>
+                  <vCores>0</vCores>
+                </resourcesUsed>
+                <numPendingApplications>1</numPendingApplications>
+                <numActiveApplications>0</numActiveApplications>
+              </user>
+              <user>
+                <username>user1</username>
+                <resourcesUsed>
+                  <memory>0</memory>
+                  <vCores>0</vCores>
+                </resourcesUsed>
+                <numPendingApplications>0</numPendingApplications>
+                <numActiveApplications>1</numActiveApplications>
+              </user>
+            </users>
+            <userLimitFactor>100.0</userLimitFactor>
+          </queue>
+          <queue xsi:type="capacitySchedulerLeafQueueInfo">
+            <capacity>39.5</capacity>
+            <usedCapacity>0.0</usedCapacity>
+            <maxCapacity>100.0</maxCapacity>
+            <absoluteCapacity>35.3525</absoluteCapacity>
+            <absoluteMaxCapacity>100.0</absoluteMaxCapacity>
             <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
             <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
             <numApplications>0</numApplications>
             <numApplications>0</numApplications>
-            <usedResources>memory: 0</usedResources>
-            <queueName>a3</queueName>
+            <usedResources>&lt;memory:0, vCores:0&gt;</usedResources>
+            <queueName>b2</queueName>
             <state>RUNNING</state>
             <state>RUNNING</state>
+            <resourcesUsed>
+              <memory>0</memory>
+              <vCores>0</vCores>
+            </resourcesUsed>
             <numActiveApplications>0</numActiveApplications>
             <numActiveApplications>0</numActiveApplications>
             <numPendingApplications>0</numPendingApplications>
             <numPendingApplications>0</numPendingApplications>
             <numContainers>0</numContainers>
             <numContainers>0</numContainers>
-            <maxApplications>0</maxApplications>
-            <maxApplicationsPerUser>0</maxApplicationsPerUser>
+            <maxApplications>3535</maxApplications>
+            <maxApplicationsPerUser>353500</maxApplicationsPerUser>
             <maxActiveApplications>1</maxActiveApplications>
             <maxActiveApplications>1</maxActiveApplications>
-            <maxActiveApplicationsPerUser>1</maxActiveApplicationsPerUser>
+            <maxActiveApplicationsPerUser>100</maxActiveApplicationsPerUser>
             <userLimit>100</userLimit>
             <userLimit>100</userLimit>
-            <userLimitFactor>1.0</userLimitFactor>
+            <users/>
+            <userLimitFactor>100.0</userLimitFactor>
           </queue>
           </queue>
           <queue xsi:type="capacitySchedulerLeafQueueInfo">
           <queue xsi:type="capacitySchedulerLeafQueueInfo">
-            <capacity>10.0</capacity>
+            <capacity>0.5</capacity>
             <usedCapacity>0.0</usedCapacity>
             <usedCapacity>0.0</usedCapacity>
             <maxCapacity>100.0</maxCapacity>
             <maxCapacity>100.0</maxCapacity>
-            <absoluteCapacity>1.0000001</absoluteCapacity>
-            <absoluteMaxCapacity>15.000001</absoluteMaxCapacity>
+            <absoluteCapacity>0.4475</absoluteCapacity>
+            <absoluteMaxCapacity>100.0</absoluteMaxCapacity>
             <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
             <absoluteUsedCapacity>0.0</absoluteUsedCapacity>
             <numApplications>0</numApplications>
             <numApplications>0</numApplications>
-            <usedResources>memory: 0</usedResources>
-            <queueName>a4</queueName>
+            <usedResources>&lt;memory:0, vCores:0&gt;</usedResources>
+            <queueName>b3</queueName>
             <state>RUNNING</state>
             <state>RUNNING</state>
+            <resourcesUsed>
+              <memory>0</memory>
+              <vCores>0</vCores>
+            </resourcesUsed>
             <numActiveApplications>0</numActiveApplications>
             <numActiveApplications>0</numActiveApplications>
             <numPendingApplications>0</numPendingApplications>
             <numPendingApplications>0</numPendingApplications>
             <numContainers>0</numContainers>
             <numContainers>0</numContainers>
-            <maxApplications>0</maxApplications>
-            <maxApplicationsPerUser>0</maxApplicationsPerUser>
+            <maxApplications>44</maxApplications>
+            <maxApplicationsPerUser>4400</maxApplicationsPerUser>
             <maxActiveApplications>1</maxActiveApplications>
             <maxActiveApplications>1</maxActiveApplications>
-            <maxActiveApplicationsPerUser>1</maxActiveApplicationsPerUser>
+            <maxActiveApplicationsPerUser>100</maxActiveApplicationsPerUser>
             <userLimit>100</userLimit>
             <userLimit>100</userLimit>
-            <userLimitFactor>1.0</userLimitFactor>
+            <users/>
+            <userLimitFactor>100.0</userLimitFactor>
           </queue>
           </queue>
         </queues>
         </queues>
+        <resourcesUsed>
+          <memory>0</memory>
+          <vCores>0</vCores>
+        </resourcesUsed>
       </queue>
       </queue>
     </queues>
     </queues>
   </schedulerInfo>
   </schedulerInfo>