瀏覽代碼

Merge r1523804 through r1523877 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1523878 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 12 年之前
父節點
當前提交
0398943572
共有 20 個文件被更改,包括 662 次插入165 次删除
  1. 0 2
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 4 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  3. 7 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  4. 7 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
  5. 53 46
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
  6. 19 17
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
  7. 2 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  8. 8 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
  9. 14 12
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
  10. 5 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
  11. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
  12. 0 2
      hadoop-project/pom.xml
  13. 5 0
      hadoop-yarn-project/CHANGES.txt
  14. 5 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
  15. 4 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java
  16. 128 44
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java
  17. 44 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationStatisticsInfo.java
  18. 56 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/StatisticsItemInfo.java
  19. 176 9
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
  20. 123 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm

+ 0 - 2
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -471,8 +471,6 @@ Release 2.1.1-beta - UNRELEASED
     HADOOP-9350. Hadoop not building against Java7 on OSX
     (Robert Kanter via stevel)
 
-    HADOOP-9935. set junit dependency to test scope. (André Kelpe via cnauroth)
-
     HADOOP-9961. versions of a few transitive dependencies diverged between hadoop 
     subprojects. (rvs via tucu)
 

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -279,6 +279,10 @@ Release 2.3.0 - UNRELEASED
     methods; replace HashMap with Map in parameter declarations and cleanup
     some related code.  (szetszwo)
 
+    HDFS-5207. In BlockPlacementPolicy.chooseTarget(..), change the writer
+    and the excludedNodes parameter types respectively to Node and Set.
+    (Junping Du via szetszwo)
+
   OPTIMIZATIONS
 
   BUG FIXES

+ 7 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -26,6 +26,7 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.EnumSet;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
@@ -1260,13 +1261,13 @@ public class BlockManager {
       namesystem.writeUnlock();
     }
 
-    final Map<Node, Node> excludedNodes = new HashMap<Node, Node>();
+    final Set<Node> excludedNodes = new HashSet<Node>();
     for(ReplicationWork rw : work){
       // Exclude all of the containing nodes from being targets.
       // This list includes decommissioning or corrupt nodes.
       excludedNodes.clear();
       for (DatanodeDescriptor dn : rw.containingNodes) {
-        excludedNodes.put(dn, dn);
+        excludedNodes.add(dn);
       }
 
       // choose replication targets: NOT HOLDING THE GLOBAL LOCK
@@ -1379,12 +1380,12 @@ public class BlockManager {
    * 
    * @throws IOException
    *           if the number of targets < minimum replication.
-   * @see BlockPlacementPolicy#chooseTarget(String, int, DatanodeDescriptor,
-   *      List, boolean, HashMap, long)
+   * @see BlockPlacementPolicy#chooseTarget(String, int, Node,
+   *      List, boolean, Set, long)
    */
   public DatanodeDescriptor[] chooseTarget(final String src,
       final int numOfReplicas, final DatanodeDescriptor client,
-      final HashMap<Node, Node> excludedNodes,
+      final Set<Node> excludedNodes,
       final long blocksize, List<String> favoredNodes) throws IOException {
     List<DatanodeDescriptor> favoredDatanodeDescriptors = 
         getDatanodeDescriptors(favoredNodes);
@@ -3248,7 +3249,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
     }
     
     private void chooseTargets(BlockPlacementPolicy blockplacement,
-        Map<Node, Node> excludedNodes) {
+        Set<Node> excludedNodes) {
       targets = blockplacement.chooseTarget(bc.getName(),
           additionalReplRequired, srcNode, liveReplicaNodes, false,
           excludedNodes, block.getNumBytes());

+ 7 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java

@@ -21,6 +21,7 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -68,22 +69,22 @@ public abstract class BlockPlacementPolicy {
    */
   public abstract DatanodeDescriptor[] chooseTarget(String srcPath,
                                              int numOfReplicas,
-                                             DatanodeDescriptor writer,
+                                             Node writer,
                                              List<DatanodeDescriptor> chosenNodes,
                                              boolean returnChosenNodes,
-                                             Map<Node, Node> excludedNodes,
+                                             Set<Node> excludedNodes,
                                              long blocksize);
   
   /**
-   * Same as {@link #chooseTarget(String, int, DatanodeDescriptor, List, boolean, 
-   * HashMap, long)} with added parameter {@code favoredDatanodes}
+   * Same as {@link #chooseTarget(String, int, Node, List, boolean, 
+   * Set, long)} with added parameter {@code favoredDatanodes}
    * @param favoredNodes datanodes that should be favored as targets. This
    *          is only a hint and due to cluster state, namenode may not be 
    *          able to place the blocks on these datanodes.
    */
   DatanodeDescriptor[] chooseTarget(String src,
-      int numOfReplicas, DatanodeDescriptor writer,
-      Map<Node, Node> excludedNodes,
+      int numOfReplicas, Node writer,
+      Set<Node> excludedNodes,
       long blocksize, List<DatanodeDescriptor> favoredNodes) {
     // This class does not provide the functionality of placing
     // a block in favored datanodes. The implementations of this class

+ 53 - 46
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java

@@ -21,9 +21,8 @@ import static org.apache.hadoop.util.Time.now;
 
 import java.util.ArrayList;
 import java.util.Collection;
-import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
-import java.util.Map;
 import java.util.Set;
 import java.util.TreeSet;
 
@@ -106,10 +105,10 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
   @Override
   public DatanodeDescriptor[] chooseTarget(String srcPath,
                                     int numOfReplicas,
-                                    DatanodeDescriptor writer,
+                                    Node writer,
                                     List<DatanodeDescriptor> chosenNodes,
                                     boolean returnChosenNodes,
-                                    Map<Node, Node> excludedNodes,
+                                    Set<Node> excludedNodes,
                                     long blocksize) {
     return chooseTarget(numOfReplicas, writer, chosenNodes, returnChosenNodes,
         excludedNodes, blocksize);
@@ -118,8 +117,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
   @Override
   DatanodeDescriptor[] chooseTarget(String src,
       int numOfReplicas,
-      DatanodeDescriptor writer,
-      Map<Node, Node> excludedNodes,
+      Node writer,
+      Set<Node> excludedNodes,
       long blocksize,
       List<DatanodeDescriptor> favoredNodes) {
     try {
@@ -130,8 +129,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
             excludedNodes, blocksize);
       }
 
-      Map<Node, Node> favoriteAndExcludedNodes = excludedNodes == null ?
-          new HashMap<Node, Node>() : new HashMap<Node, Node>(excludedNodes);
+      Set<Node> favoriteAndExcludedNodes = excludedNodes == null ?
+          new HashSet<Node>() : new HashSet<Node>(excludedNodes);
 
       // Choose favored nodes
       List<DatanodeDescriptor> results = new ArrayList<DatanodeDescriptor>();
@@ -150,10 +149,10 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
               + " with favored node " + favoredNode); 
           continue;
         }
-        favoriteAndExcludedNodes.put(target, target);
+        favoriteAndExcludedNodes.add(target);
       }
 
-      if (results.size() < numOfReplicas) {        
+      if (results.size() < numOfReplicas) {
         // Not enough favored nodes, choose other nodes.
         numOfReplicas -= results.size();
         DatanodeDescriptor[] remainingTargets = 
@@ -175,17 +174,17 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
 
   /** This is the implementation. */
   private DatanodeDescriptor[] chooseTarget(int numOfReplicas,
-                                    DatanodeDescriptor writer,
+                                    Node writer,
                                     List<DatanodeDescriptor> chosenNodes,
                                     boolean returnChosenNodes,
-                                    Map<Node, Node> excludedNodes,
+                                    Set<Node> excludedNodes,
                                     long blocksize) {
     if (numOfReplicas == 0 || clusterMap.getNumOfLeaves()==0) {
       return DatanodeDescriptor.EMPTY_ARRAY;
     }
       
     if (excludedNodes == null) {
-      excludedNodes = new HashMap<Node, Node>();
+      excludedNodes = new HashSet<Node>();
     }
      
     int[] result = getMaxNodesPerRack(chosenNodes, numOfReplicas);
@@ -200,12 +199,12 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
     }
       
     if (!clusterMap.contains(writer)) {
-      writer=null;
+      writer = null;
     }
       
     boolean avoidStaleNodes = (stats != null
         && stats.isAvoidingStaleDataNodesForWrite());
-    DatanodeDescriptor localNode = chooseTarget(numOfReplicas, writer,
+    Node localNode = chooseTarget(numOfReplicas, writer,
         excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes);
     if (!returnChosenNodes) {  
       results.removeAll(chosenNodes);
@@ -228,10 +227,20 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
     return new int[] {numOfReplicas, maxNodesPerRack};
   }
     
-  /* choose <i>numOfReplicas</i> from all data nodes */
-  private DatanodeDescriptor chooseTarget(int numOfReplicas,
-                                          DatanodeDescriptor writer,
-                                          Map<Node, Node> excludedNodes,
+  /**
+   * choose <i>numOfReplicas</i> from all data nodes
+   * @param numOfReplicas additional number of replicas wanted
+   * @param writer the writer's machine, could be a non-DatanodeDescriptor node
+   * @param excludedNodes datanodes that should not be considered as targets
+   * @param blocksize size of the data to be written
+   * @param maxNodesPerRack max nodes allowed per rack
+   * @param results the target nodes already chosen
+   * @param avoidStaleNodes avoid stale nodes in replica choosing
+   * @return local node of writer (not chosen node)
+   */
+  private Node chooseTarget(int numOfReplicas,
+                                          Node writer,
+                                          Set<Node> excludedNodes,
                                           long blocksize,
                                           int maxNodesPerRack,
                                           List<DatanodeDescriptor> results,
@@ -243,13 +252,13 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
       
     int numOfResults = results.size();
     boolean newBlock = (numOfResults==0);
-    if (writer == null && !newBlock) {
+    if ((writer == null || !(writer instanceof DatanodeDescriptor)) && !newBlock) {
       writer = results.get(0);
     }
 
     // Keep a copy of original excludedNodes
-    final Map<Node, Node> oldExcludedNodes = avoidStaleNodes ? 
-        new HashMap<Node, Node>(excludedNodes) : null;
+    final Set<Node> oldExcludedNodes = avoidStaleNodes ? 
+        new HashSet<Node>(excludedNodes) : null;
     try {
       if (numOfResults == 0) {
         writer = chooseLocalNode(writer, excludedNodes, blocksize,
@@ -296,7 +305,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
         // We need to additionally exclude the nodes that were added to the 
         // result list in the successful calls to choose*() above.
         for (Node node : results) {
-          oldExcludedNodes.put(node, node);
+          oldExcludedNodes.add(node);
         }
         // Set numOfReplicas, since it can get out of sync with the result list
         // if the NotEnoughReplicasException was thrown in chooseRandom().
@@ -314,8 +323,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
    * choose a node on the same rack
    * @return the chosen node
    */
-  protected DatanodeDescriptor chooseLocalNode(DatanodeDescriptor localMachine,
-                                             Map<Node, Node> excludedNodes,
+  protected DatanodeDescriptor chooseLocalNode(Node localMachine,
+                                             Set<Node> excludedNodes,
                                              long blocksize,
                                              int maxNodesPerRack,
                                              List<DatanodeDescriptor> results,
@@ -325,13 +334,13 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
     if (localMachine == null)
       return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
           maxNodesPerRack, results, avoidStaleNodes);
-    if (preferLocalNode) {
+    if (preferLocalNode && localMachine instanceof DatanodeDescriptor) {
+      DatanodeDescriptor localDatanode = (DatanodeDescriptor) localMachine;
       // otherwise try local machine first
-      Node oldNode = excludedNodes.put(localMachine, localMachine);
-      if (oldNode == null) { // was not in the excluded list
-        if (addIfIsGoodTarget(localMachine, excludedNodes, blocksize,
+      if (excludedNodes.add(localMachine)) { // was not in the excluded list
+        if (addIfIsGoodTarget(localDatanode, excludedNodes, blocksize,
             maxNodesPerRack, false, results, avoidStaleNodes) >= 0) {
-          return localMachine;
+          return localDatanode;
         }
       } 
     }      
@@ -347,9 +356,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
    * @return number of new excluded nodes
    */
   protected int addToExcludedNodes(DatanodeDescriptor localMachine,
-      Map<Node, Node> excludedNodes) {
-    Node node = excludedNodes.put(localMachine, localMachine);
-    return node == null?1:0;
+      Set<Node> excludedNodes) {
+    return excludedNodes.add(localMachine) ? 1 : 0;
   }
 
   /**
@@ -360,8 +368,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
    * in the cluster.
    * @return the chosen node
    */
-  protected DatanodeDescriptor chooseLocalRack(DatanodeDescriptor localMachine,
-                                             Map<Node, Node> excludedNodes,
+  protected DatanodeDescriptor chooseLocalRack(Node localMachine,
+                                             Set<Node> excludedNodes,
                                              long blocksize,
                                              int maxNodesPerRack,
                                              List<DatanodeDescriptor> results,
@@ -412,7 +420,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
     
   protected void chooseRemoteRack(int numOfReplicas,
                                 DatanodeDescriptor localMachine,
-                                Map<Node, Node> excludedNodes,
+                                Set<Node> excludedNodes,
                                 long blocksize,
                                 int maxReplicasPerRack,
                                 List<DatanodeDescriptor> results,
@@ -436,7 +444,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
    * @return the chosen node, if there is any.
    */
   protected DatanodeDescriptor chooseRandom(String scope,
-      Map<Node, Node> excludedNodes,
+      Set<Node> excludedNodes,
       long blocksize,
       int maxNodesPerRack,
       List<DatanodeDescriptor> results,
@@ -452,7 +460,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
    */
   protected DatanodeDescriptor chooseRandom(int numOfReplicas,
                             String scope,
-                            Map<Node, Node> excludedNodes,
+                            Set<Node> excludedNodes,
                             long blocksize,
                             int maxNodesPerRack,
                             List<DatanodeDescriptor> results,
@@ -460,7 +468,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
                                 throws NotEnoughReplicasException {
       
     int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes(
-        scope, excludedNodes.keySet());
+        scope, excludedNodes);
     StringBuilder builder = null;
     if (LOG.isDebugEnabled()) {
       builder = debugLoggingBuilder.get();
@@ -472,8 +480,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
     while(numOfReplicas > 0 && numOfAvailableNodes > 0) {
       DatanodeDescriptor chosenNode = 
           (DatanodeDescriptor)clusterMap.chooseRandom(scope);
-      Node oldNode = excludedNodes.put(chosenNode, chosenNode);
-      if (oldNode == null) {
+      if (excludedNodes.add(chosenNode)) { //was not in the excluded list
         numOfAvailableNodes--;
 
         int newExcludedNodes = addIfIsGoodTarget(chosenNode, excludedNodes,
@@ -506,16 +513,16 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
 
   /**
    * If the given node is a good target, add it to the result list and
-   * update the excluded node map.
+   * update the set of excluded nodes.
    * @return -1 if the given is not a good target;
-   *         otherwise, return the number of excluded nodes added to the map.
+   *         otherwise, return the number of nodes added to excludedNodes set.
    */
   int addIfIsGoodTarget(DatanodeDescriptor node,
-      Map<Node, Node> excludedNodes,
+      Set<Node> excludedNodes,
       long blockSize,
       int maxNodesPerRack,
       boolean considerLoad,
-      List<DatanodeDescriptor> results,                           
+      List<DatanodeDescriptor> results,
       boolean avoidStaleNodes) {
     if (isGoodTarget(node, blockSize, maxNodesPerRack, considerLoad,
         results, avoidStaleNodes)) {
@@ -614,7 +621,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
    * starts from the writer and traverses all <i>nodes</i>
    * This is basically a traveling salesman problem.
    */
-  private DatanodeDescriptor[] getPipeline(DatanodeDescriptor writer,
+  private DatanodeDescriptor[] getPipeline(Node writer,
                                            DatanodeDescriptor[] nodes) {
     if (nodes.length==0) return nodes;
       

+ 19 - 17
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java

@@ -22,6 +22,7 @@ import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -63,8 +64,8 @@ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefau
    * @return the chosen node
    */
   @Override
-  protected DatanodeDescriptor chooseLocalNode(DatanodeDescriptor localMachine,
-      Map<Node, Node> excludedNodes, long blocksize, int maxNodesPerRack,
+  protected DatanodeDescriptor chooseLocalNode(Node localMachine,
+      Set<Node> excludedNodes, long blocksize, int maxNodesPerRack,
       List<DatanodeDescriptor> results, boolean avoidStaleNodes)
         throws NotEnoughReplicasException {
     // if no local machine, randomly choose one node
@@ -72,14 +73,16 @@ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefau
       return chooseRandom(NodeBase.ROOT, excludedNodes, 
           blocksize, maxNodesPerRack, results, avoidStaleNodes);
 
-    // otherwise try local machine first
-    Node oldNode = excludedNodes.put(localMachine, localMachine);
-    if (oldNode == null) { // was not in the excluded list
-      if (addIfIsGoodTarget(localMachine, excludedNodes, blocksize,
-          maxNodesPerRack, false, results, avoidStaleNodes) >= 0) {
-        return localMachine;
+    if (localMachine instanceof DatanodeDescriptor) {
+      DatanodeDescriptor localDataNode = (DatanodeDescriptor)localMachine;
+      // otherwise try local machine first
+      if (excludedNodes.add(localMachine)) { // was not in the excluded list
+        if (addIfIsGoodTarget(localDataNode, excludedNodes, blocksize,
+            maxNodesPerRack, false, results, avoidStaleNodes) >= 0) {
+          return localDataNode;
+        }
       }
-    } 
+    }
 
     // try a node on local node group
     DatanodeDescriptor chosenNode = chooseLocalNodeGroup(
@@ -95,8 +98,8 @@ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefau
 
   
   @Override
-  protected DatanodeDescriptor chooseLocalRack(DatanodeDescriptor localMachine,
-      Map<Node, Node> excludedNodes, long blocksize, int maxNodesPerRack,
+  protected DatanodeDescriptor chooseLocalRack(Node localMachine,
+      Set<Node> excludedNodes, long blocksize, int maxNodesPerRack,
       List<DatanodeDescriptor> results, boolean avoidStaleNodes)
       throws NotEnoughReplicasException {
     // no local machine, so choose a random machine
@@ -142,7 +145,7 @@ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefau
 
   @Override
   protected void chooseRemoteRack(int numOfReplicas,
-      DatanodeDescriptor localMachine, Map<Node, Node> excludedNodes,
+      DatanodeDescriptor localMachine, Set<Node> excludedNodes,
       long blocksize, int maxReplicasPerRack, List<DatanodeDescriptor> results,
       boolean avoidStaleNodes) throws NotEnoughReplicasException {
     int oldNumOfReplicas = results.size();
@@ -168,8 +171,8 @@ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefau
    * @return the chosen node
    */
   private DatanodeDescriptor chooseLocalNodeGroup(
-      NetworkTopologyWithNodeGroup clusterMap, DatanodeDescriptor localMachine,
-      Map<Node, Node> excludedNodes, long blocksize, int maxNodesPerRack,
+      NetworkTopologyWithNodeGroup clusterMap, Node localMachine,
+      Set<Node> excludedNodes, long blocksize, int maxNodesPerRack,
       List<DatanodeDescriptor> results, boolean avoidStaleNodes)
       throws NotEnoughReplicasException {
     // no local machine, so choose a random machine
@@ -225,13 +228,12 @@ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefau
    */
   @Override
   protected int addToExcludedNodes(DatanodeDescriptor chosenNode,
-      Map<Node, Node> excludedNodes) {
+      Set<Node> excludedNodes) {
     int countOfExcludedNodes = 0;
     String nodeGroupScope = chosenNode.getNetworkLocation();
     List<Node> leafNodes = clusterMap.getLeaves(nodeGroupScope);
     for (Node leafNode : leafNodes) {
-      Node node = excludedNodes.put(leafNode, leafNode);
-      if (node == null) {
+      if (excludedNodes.add(leafNode)) {
         // not a existing node in excludedNodes
         countOfExcludedNodes++;
       }

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -230,7 +230,6 @@ import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 import org.apache.hadoop.security.token.delegation.DelegationKey;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.DataChecksum;
@@ -2450,7 +2449,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
    * client to "try again later".
    */
   LocatedBlock getAdditionalBlock(String src, long fileId, String clientName,
-      ExtendedBlock previous, HashMap<Node, Node> excludedNodes, 
+      ExtendedBlock previous, Set<Node> excludedNodes, 
       List<String> favoredNodes)
       throws LeaseExpiredException, NotReplicatedYetException,
       QuotaExceededException, SafeModeException, UnresolvedLinkException,
@@ -2654,7 +2653,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
 
   /** @see NameNode#getAdditionalDatanode(String, ExtendedBlock, DatanodeInfo[], DatanodeInfo[], int, String) */
   LocatedBlock getAdditionalDatanode(String src, final ExtendedBlock blk,
-      final DatanodeInfo[] existings,  final HashMap<Node, Node> excludes,
+      final DatanodeInfo[] existings,  final Set<Node> excludes,
       final int numAdditionalNodes, final String clientName
       ) throws IOException {
     //check if the feature is enabled

+ 8 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

@@ -29,8 +29,9 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
+import java.util.Set;
 
 import org.apache.commons.logging.Log;
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -547,11 +548,11 @@ class NameNodeRpcServer implements NamenodeProtocols {
       stateChangeLog.debug("*BLOCK* NameNode.addBlock: file " + src
           + " fileId=" + fileId + " for " + clientName);
     }
-    HashMap<Node, Node> excludedNodesSet = null;
+    Set<Node> excludedNodesSet = null;
     if (excludedNodes != null) {
-      excludedNodesSet = new HashMap<Node, Node>(excludedNodes.length);
+      excludedNodesSet = new HashSet<Node>(excludedNodes.length);
       for (Node node : excludedNodes) {
-        excludedNodesSet.put(node, node);
+        excludedNodesSet.add(node);
       }
     }
     List<String> favoredNodesList = (favoredNodes == null) ? null
@@ -579,11 +580,11 @@ class NameNodeRpcServer implements NamenodeProtocols {
 
     metrics.incrGetAdditionalDatanodeOps();
 
-    HashMap<Node, Node> excludeSet = null;
+    Set<Node> excludeSet = null;
     if (excludes != null) {
-      excludeSet = new HashMap<Node, Node>(excludes.length);
+      excludeSet = new HashSet<Node>(excludes.length);
       for (Node node : excludes) {
-        excludeSet.put(node, node);
+        excludeSet.add(node);
       }
     }
     return namesystem.getAdditionalDatanode(src, blk,

+ 14 - 12
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java

@@ -29,9 +29,11 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
+import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
@@ -189,7 +191,7 @@ public class TestReplicationPolicy {
   }
 
   private static DatanodeDescriptor[] chooseTarget(int numOfReplicas,
-      List<DatanodeDescriptor> chosenNodes, Map<Node, Node> excludedNodes) {
+      List<DatanodeDescriptor> chosenNodes, Set<Node> excludedNodes) {
     return chooseTarget(numOfReplicas, dataNodes[0], chosenNodes, excludedNodes);
   }
 
@@ -197,7 +199,7 @@ public class TestReplicationPolicy {
       int numOfReplicas,
       DatanodeDescriptor writer,
       List<DatanodeDescriptor> chosenNodes,
-      Map<Node, Node> excludedNodes) {
+      Set<Node> excludedNodes) {
     return replicator.chooseTarget(filename, numOfReplicas, writer, chosenNodes,
         false, excludedNodes, BLOCK_SIZE);
   }
@@ -212,25 +214,25 @@ public class TestReplicationPolicy {
    */
   @Test
   public void testChooseTarget2() throws Exception { 
-    HashMap<Node, Node> excludedNodes;
+    Set<Node> excludedNodes;
     DatanodeDescriptor[] targets;
     List<DatanodeDescriptor> chosenNodes = new ArrayList<DatanodeDescriptor>();
     
-    excludedNodes = new HashMap<Node, Node>();
-    excludedNodes.put(dataNodes[1], dataNodes[1]); 
+    excludedNodes = new HashSet<Node>();
+    excludedNodes.add(dataNodes[1]); 
     targets = chooseTarget(0, chosenNodes, excludedNodes);
     assertEquals(targets.length, 0);
     
     excludedNodes.clear();
     chosenNodes.clear();
-    excludedNodes.put(dataNodes[1], dataNodes[1]); 
+    excludedNodes.add(dataNodes[1]); 
     targets = chooseTarget(1, chosenNodes, excludedNodes);
     assertEquals(targets.length, 1);
     assertEquals(targets[0], dataNodes[0]);
     
     excludedNodes.clear();
     chosenNodes.clear();
-    excludedNodes.put(dataNodes[1], dataNodes[1]); 
+    excludedNodes.add(dataNodes[1]); 
     targets = chooseTarget(2, chosenNodes, excludedNodes);
     assertEquals(targets.length, 2);
     assertEquals(targets[0], dataNodes[0]);
@@ -238,7 +240,7 @@ public class TestReplicationPolicy {
     
     excludedNodes.clear();
     chosenNodes.clear();
-    excludedNodes.put(dataNodes[1], dataNodes[1]); 
+    excludedNodes.add(dataNodes[1]); 
     targets = chooseTarget(3, chosenNodes, excludedNodes);
     assertEquals(targets.length, 3);
     assertEquals(targets[0], dataNodes[0]);
@@ -247,7 +249,7 @@ public class TestReplicationPolicy {
     
     excludedNodes.clear();
     chosenNodes.clear();
-    excludedNodes.put(dataNodes[1], dataNodes[1]); 
+    excludedNodes.add(dataNodes[1]); 
     targets = chooseTarget(4, chosenNodes, excludedNodes);
     assertEquals(targets.length, 4);
     assertEquals(targets[0], dataNodes[0]);
@@ -260,7 +262,7 @@ public class TestReplicationPolicy {
 
     excludedNodes.clear();
     chosenNodes.clear();
-    excludedNodes.put(dataNodes[1], dataNodes[1]); 
+    excludedNodes.add(dataNodes[1]); 
     chosenNodes.add(dataNodes[2]);
     targets = replicator.chooseTarget(filename, 1, dataNodes[0], chosenNodes, true,
         excludedNodes, BLOCK_SIZE);
@@ -481,8 +483,8 @@ public class TestReplicationPolicy {
     assertEquals(targets.length, 1);
     assertEquals(targets[0], dataNodes[1]);
 
-    HashMap<Node, Node> excludedNodes = new HashMap<Node, Node>();
-    excludedNodes.put(dataNodes[1], dataNodes[1]);
+    Set<Node> excludedNodes = new HashSet<Node>();
+    excludedNodes.add(dataNodes[1]);
     List<DatanodeDescriptor> chosenNodes = new ArrayList<DatanodeDescriptor>();
     targets = chooseTarget(1, chosenNodes, excludedNodes);
     assertEquals(targets.length, 1);

+ 5 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java

@@ -181,7 +181,7 @@ public class TestReplicationPolicyWithNodeGroup {
       int numOfReplicas,
       DatanodeDescriptor writer,
       List<DatanodeDescriptor> chosenNodes,
-      Map<Node, Node> excludedNodes) {
+      Set<Node> excludedNodes) {
     return replicator.chooseTarget(filename, numOfReplicas, writer, chosenNodes,
         false, excludedNodes, BLOCK_SIZE);
   }
@@ -252,14 +252,13 @@ public class TestReplicationPolicyWithNodeGroup {
    * @throws Exception
    */
   @Test
-  public void testChooseTarget2() throws Exception { 
-    HashMap<Node, Node> excludedNodes;
+  public void testChooseTarget2() throws Exception {
     DatanodeDescriptor[] targets;
     BlockPlacementPolicyDefault repl = (BlockPlacementPolicyDefault)replicator;
     List<DatanodeDescriptor> chosenNodes = new ArrayList<DatanodeDescriptor>();
 
-    excludedNodes = new HashMap<Node, Node>();
-    excludedNodes.put(dataNodes[1], dataNodes[1]); 
+    Set<Node> excludedNodes = new HashSet<Node>();
+    excludedNodes.add(dataNodes[1]); 
     targets = repl.chooseTarget(filename, 4, dataNodes[0], chosenNodes, false, 
         excludedNodes, BLOCK_SIZE);
     assertEquals(targets.length, 4);
@@ -275,7 +274,7 @@ public class TestReplicationPolicyWithNodeGroup {
 
     excludedNodes.clear();
     chosenNodes.clear();
-    excludedNodes.put(dataNodes[1], dataNodes[1]); 
+    excludedNodes.add(dataNodes[1]); 
     chosenNodes.add(dataNodes[2]);
     targets = repl.chooseTarget(filename, 1, dataNodes[0], chosenNodes, true,
         excludedNodes, BLOCK_SIZE);

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java

@@ -25,7 +25,7 @@ import static org.mockito.Mockito.spy;
 
 import java.lang.reflect.Field;
 import java.util.EnumSet;
-import java.util.HashMap;
+import java.util.HashSet;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
@@ -119,7 +119,7 @@ public class TestAddBlockRetry {
         return ret;
       }
     }).when(spyBM).chooseTarget(Mockito.anyString(), Mockito.anyInt(),
-        Mockito.<DatanodeDescriptor>any(), Mockito.<HashMap<Node, Node>>any(),
+        Mockito.<DatanodeDescriptor>any(), Mockito.<HashSet<Node>>any(),
         Mockito.anyLong(), Mockito.<List<String>>any());
 
     // create file

+ 0 - 2
hadoop-project/pom.xml

@@ -566,7 +566,6 @@
         <groupId>junit</groupId>
         <artifactId>junit</artifactId>
         <version>4.10</version>
-        <scope>test</scope>
       </dependency>
       <dependency>
         <groupId>commons-lang</groupId>
@@ -627,7 +626,6 @@
         <groupId>org.mockito</groupId>
         <artifactId>mockito-all</artifactId>
         <version>1.8.5</version>
-        <scope>test</scope>
       </dependency>
       <dependency>
         <groupId>org.apache.avro</groupId>

+ 5 - 0
hadoop-yarn-project/CHANGES.txt

@@ -106,6 +106,9 @@ Release 2.1.1-beta - UNRELEASED
     YARN-1137. Add support whitelist for system users to Yarn 
     container-executor.c. (rvs via tucu)
 
+    YARN-1001. Added a web-service to get statistics about per application-type
+    per state for consumption by downstream projects. (Zhijie Shen via vinodkv)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -211,6 +214,8 @@ Release 2.1.1-beta - UNRELEASED
     YARN-540. Race condition causing RM to potentially relaunch already
     unregistered AMs on RM restart (Jian He via bikas)
 
+    YARN-1184. ClassCastException during preemption enforcement. (cdouglas)
+
 Release 2.1.0-beta - 2013-08-22
 
   INCOMPATIBLE CHANGES

+ 5 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java

@@ -1609,9 +1609,12 @@ public class LeafQueue implements CSQueue {
 
   }
 
-  // need to access the list of apps from the preemption monitor
+  /**
+   * Obtain (read-only) collection of active applications.
+   */
   public Set<FiCaSchedulerApp> getApplications() {
-    return Collections.unmodifiableSet(activeApplications);
+    // need to access the list of apps from the preemption monitor
+    return activeApplications;
   }
 
   // return a single Resource capturing the overal amount of pending resources

+ 4 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java

@@ -34,12 +34,14 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.UserInfo
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationStatisticsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerQueueInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerQueueInfoList;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.StatisticsItemInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FifoSchedulerInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesInfo;
@@ -65,7 +67,8 @@ public class JAXBContextResolver implements ContextResolver<JAXBContext> {
       CapacitySchedulerInfo.class, ClusterMetricsInfo.class,
       SchedulerInfo.class, AppsInfo.class, NodesInfo.class,
       RemoteExceptionData.class, CapacitySchedulerQueueInfoList.class,
-      ResourceInfo.class, UsersInfo.class, UserInfo.class};
+      ResourceInfo.class, UsersInfo.class, UserInfo.class,
+      ApplicationStatisticsInfo.class, StatisticsItemInfo.class};
 
   public JAXBContextResolver() throws Exception {
     this.types = new HashSet<Class>(Arrays.asList(cTypes));

+ 128 - 44
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java

@@ -22,7 +22,9 @@ import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.EnumSet;
+import java.util.HashMap;
 import java.util.HashSet;
+import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.ConcurrentMap;
 
@@ -42,12 +44,12 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeState;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils;
 import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
-import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
@@ -58,6 +60,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoSchedule
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppAttemptsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationStatisticsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo;
@@ -68,6 +71,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerInfo;
 import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo;
+import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.StatisticsItemInfo;
 import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
 import org.apache.hadoop.yarn.util.ConverterUtils;
 import org.apache.hadoop.yarn.webapp.BadRequestException;
@@ -80,6 +84,7 @@ import com.google.inject.Singleton;
 @Path("/ws/v1/cluster")
 public class RMWebServices {
   private static final String EMPTY = "";
+  private static final String ANY = "*";
   private final ResourceManager rm;
   private static RecordFactory recordFactory = RecordFactoryProvider
       .getRecordFactory(null);
@@ -303,53 +308,16 @@ public class RMWebServices {
           "finishTimeEnd must be greater than finishTimeBegin");
     }
 
-    Set<String> appTypes = new HashSet<String>();
-    if (!applicationTypes.isEmpty()) {
-      for (String applicationType : applicationTypes) {
-        if (applicationType != null && !applicationType.trim().isEmpty()) {
-          if (applicationType.indexOf(",") == -1) {
-            appTypes.add(applicationType.trim());
-          } else {
-            String[] types = applicationType.split(",");
-            for (String type : types) {
-              if (!type.trim().isEmpty()) {
-                appTypes.add(type.trim());
-              }
-            }
-          }
-        }
-      }
-    }
+    Set<String> appTypes = parseQueries(applicationTypes, false);
     if (!appTypes.isEmpty()) {
       checkAppTypes = true;
     }
 
-    String allAppStates;
-    RMAppState[] stateArray = RMAppState.values();
-    allAppStates = Arrays.toString(stateArray);
-
-    Set<String> appStates = new HashSet<String>();
     // stateQuery is deprecated.
     if (stateQuery != null && !stateQuery.isEmpty()) {
       statesQuery.add(stateQuery);
     }
-    if (!statesQuery.isEmpty()) {
-      for (String applicationState : statesQuery) {
-        if (applicationState != null && !applicationState.isEmpty()) {
-          String[] states = applicationState.split(",");
-          for (String state : states) {
-            try {
-              RMAppState.valueOf(state.trim());
-            } catch (IllegalArgumentException iae) {
-              throw new BadRequestException(
-                  "Invalid application-state " + state
-                  + " specified. It should be one of " + allAppStates);
-            }
-            appStates.add(state.trim().toLowerCase());
-          }
-        }
-      }
-    }
+    Set<String> appStates = parseQueries(statesQuery, true);
     if (!appStates.isEmpty()) {
       checkAppStates = true;
     }
@@ -363,8 +331,8 @@ public class RMWebServices {
         break;
       }
 
-      if (checkAppStates
-          && !appStates.contains(rmapp.getState().toString().toLowerCase())) {
+      if (checkAppStates && !appStates.contains(
+          rmapp.createApplicationState().toString().toLowerCase())) {
         continue;
       }
       if (finalStatusQuery != null && !finalStatusQuery.isEmpty()) {
@@ -394,8 +362,8 @@ public class RMWebServices {
           continue;
         }
       }
-      if (checkAppTypes
-          && !appTypes.contains(rmapp.getApplicationType())) {
+      if (checkAppTypes && !appTypes.contains(
+          rmapp.getApplicationType().trim().toLowerCase())) {
         continue;
       }
 
@@ -415,6 +383,122 @@ public class RMWebServices {
     return allApps;
   }
 
+  @GET
+  @Path("/appstatistics")
+  @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
+  public ApplicationStatisticsInfo getAppStatistics(
+      @Context HttpServletRequest hsr,
+      @QueryParam("states") Set<String> stateQueries,
+      @QueryParam("applicationTypes") Set<String> typeQueries) {
+    init();
+
+    // parse the params and build the scoreboard
+    // converting state/type name to lowercase
+    Set<String> states = parseQueries(stateQueries, true);
+    Set<String> types = parseQueries(typeQueries, false);
+    // if no types, counts the applications of any types
+    if (types.size() == 0) {
+      types.add(ANY);
+    } else if (types.size() != 1) {
+      throw new BadRequestException("# of applicationTypes = " + types.size()
+          + ", we temporarily support at most one applicationType");
+    }
+    // if no states, returns the counts of all RMAppStates
+    if (states.size() == 0) {
+      for (YarnApplicationState state : YarnApplicationState.values()) {
+        states.add(state.toString().toLowerCase());
+      }
+    }
+    // in case we extend to multiple applicationTypes in the future
+    Map<YarnApplicationState, Map<String, Long>> scoreboard =
+        buildScoreboard(states, types);
+
+    // go through the apps in RM to count the numbers, ignoring the case of
+    // the state/type name
+    ConcurrentMap<ApplicationId, RMApp> apps = rm.getRMContext().getRMApps();
+    for (RMApp rmapp : apps.values()) {
+      YarnApplicationState state = rmapp.createApplicationState();
+      String type = rmapp.getApplicationType().trim().toLowerCase();
+      if (states.contains(state.toString().toLowerCase())) {
+        if (types.contains(ANY)) {
+          countApp(scoreboard, state, ANY);
+        } else if (types.contains(type)) {
+          countApp(scoreboard, state, type);
+        }
+      }
+    }
+
+    // fill the response object
+    ApplicationStatisticsInfo appStatInfo = new ApplicationStatisticsInfo();
+    for (Map.Entry<YarnApplicationState, Map<String, Long>> partScoreboard
+        : scoreboard.entrySet()) {
+      for (Map.Entry<String, Long> statEntry
+          : partScoreboard.getValue().entrySet()) {
+        StatisticsItemInfo statItem = new StatisticsItemInfo(
+            partScoreboard.getKey(), statEntry.getKey(), statEntry.getValue());
+        appStatInfo.add(statItem);
+      }
+    }
+    return appStatInfo;
+  }
+
+  private static Set<String> parseQueries(
+      Set<String> queries, boolean isState) {
+    Set<String> params = new HashSet<String>();
+    if (!queries.isEmpty()) {
+      for (String query : queries) {
+        if (query != null && !query.trim().isEmpty()) {
+          String[] paramStrs = query.split(",");
+          for (String paramStr : paramStrs) {
+            if (paramStr != null && !paramStr.trim().isEmpty()) {
+              if (isState) {
+                try {
+                  // enum string is in the uppercase
+                  YarnApplicationState.valueOf(paramStr.trim().toUpperCase());
+                } catch (RuntimeException e) {
+                  YarnApplicationState[] stateArray =
+                      YarnApplicationState.values();
+                  String allAppStates = Arrays.toString(stateArray);
+                  throw new BadRequestException(
+                      "Invalid application-state " + paramStr.trim()
+                      + " specified. It should be one of " + allAppStates);
+                }
+              }
+              params.add(paramStr.trim().toLowerCase());
+            }
+          }
+        }
+      }
+    }
+    return params;
+  }
+
+  private static Map<YarnApplicationState, Map<String, Long>> buildScoreboard(
+     Set<String> states, Set<String> types) {
+    Map<YarnApplicationState, Map<String, Long>> scoreboard
+        = new HashMap<YarnApplicationState, Map<String, Long>>();
+    // default states will result in enumerating all YarnApplicationStates
+    assert !states.isEmpty();
+    for (String state : states) {
+      Map<String, Long> partScoreboard = new HashMap<String, Long>();
+      scoreboard.put(
+          YarnApplicationState.valueOf(state.toUpperCase()), partScoreboard);
+      // types is verified no to be empty
+      for (String type : types) {
+        partScoreboard.put(type, 0L);
+      }
+    }
+    return scoreboard;
+  }
+
+  private static void countApp(
+      Map<YarnApplicationState, Map<String, Long>> scoreboard,
+      YarnApplicationState state, String type) {
+    Map<String, Long> partScoreboard = scoreboard.get(state);
+    Long count = partScoreboard.get(type);
+    partScoreboard.put(type, count + 1L);
+  }
+
   @GET
   @Path("/apps/{appid}")
   @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })

+ 44 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ApplicationStatisticsInfo.java

@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+
+import java.util.ArrayList;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlRootElement;
+
+@XmlRootElement(name = "appStatInfo")
+@XmlAccessorType(XmlAccessType.FIELD)
+public class ApplicationStatisticsInfo {
+
+  protected ArrayList<StatisticsItemInfo> statItem
+      = new ArrayList<StatisticsItemInfo>();
+
+  public ApplicationStatisticsInfo() {
+  } // JAXB needs this
+
+  public void add(StatisticsItemInfo statItem) {
+    this.statItem.add(statItem);
+  }
+
+  public ArrayList<StatisticsItemInfo> getStatItems() {
+    return statItem;
+  }
+
+}

+ 56 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/StatisticsItemInfo.java

@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
+
+import javax.xml.bind.annotation.XmlAccessType;
+import javax.xml.bind.annotation.XmlAccessorType;
+import javax.xml.bind.annotation.XmlRootElement;
+
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
+
+@XmlRootElement(name = "statItem")
+@XmlAccessorType(XmlAccessType.FIELD)
+public class StatisticsItemInfo {
+
+  protected YarnApplicationState state;
+  protected String type;
+  protected long count;
+
+  public StatisticsItemInfo() {
+  } // JAXB needs this
+
+  public StatisticsItemInfo(
+      YarnApplicationState state, String type, long count) {
+    this.state = state;
+    this.type = type;
+    this.count = count;
+  }
+
+  public YarnApplicationState getState() {
+    return state;
+  }
+
+  public String getType() {
+    return type;
+  }
+
+  public long getCount() {
+    return count;
+  }
+
+}

+ 176 - 9
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java

@@ -33,6 +33,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ContainerState;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
@@ -227,7 +228,8 @@ public class TestRMWebServicesApps extends JerseyTest {
     WebResource r = resource();
 
     ClientResponse response = r.path("ws").path("v1").path("cluster")
-        .path("apps").queryParam("state", RMAppState.ACCEPTED.toString())
+        .path("apps")
+        .queryParam("state", YarnApplicationState.ACCEPTED.toString())
         .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
     JSONObject json = response.getEntity(JSONObject.class);
@@ -252,7 +254,7 @@ public class TestRMWebServicesApps extends JerseyTest {
 
     WebResource r = resource();
     MultivaluedMapImpl params = new MultivaluedMapImpl();
-    params.add("states", RMAppState.ACCEPTED.toString());
+    params.add("states", YarnApplicationState.ACCEPTED.toString());
     ClientResponse response = r.path("ws").path("v1").path("cluster")
         .path("apps").queryParams(params)
         .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
@@ -268,8 +270,8 @@ public class TestRMWebServicesApps extends JerseyTest {
 
     r = resource();
     params = new MultivaluedMapImpl();
-    params.add("states", RMAppState.ACCEPTED.toString());
-    params.add("states", RMAppState.KILLED.toString());
+    params.add("states", YarnApplicationState.ACCEPTED.toString());
+    params.add("states", YarnApplicationState.KILLED.toString());
     response = r.path("ws").path("v1").path("cluster")
         .path("apps").queryParams(params)
         .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
@@ -301,7 +303,7 @@ public class TestRMWebServicesApps extends JerseyTest {
 
     WebResource r = resource();
     MultivaluedMapImpl params = new MultivaluedMapImpl();
-    params.add("states", RMAppState.ACCEPTED.toString());
+    params.add("states", YarnApplicationState.ACCEPTED.toString());
     ClientResponse response = r.path("ws").path("v1").path("cluster")
         .path("apps").queryParams(params)
         .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
@@ -317,8 +319,8 @@ public class TestRMWebServicesApps extends JerseyTest {
 
     r = resource();
     params = new MultivaluedMapImpl();
-    params.add("states", RMAppState.ACCEPTED.toString() + ","
-        + RMAppState.KILLED.toString());
+    params.add("states", YarnApplicationState.ACCEPTED.toString() + ","
+        + YarnApplicationState.KILLED.toString());
     response = r.path("ws").path("v1").path("cluster")
         .path("apps").queryParams(params)
         .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
@@ -347,7 +349,8 @@ public class TestRMWebServicesApps extends JerseyTest {
     WebResource r = resource();
 
     ClientResponse response = r.path("ws").path("v1").path("cluster")
-        .path("apps").queryParam("states", RMAppState.RUNNING.toString())
+        .path("apps")
+        .queryParam("states", YarnApplicationState.RUNNING.toString())
         .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
     JSONObject json = response.getEntity(JSONObject.class);
@@ -365,7 +368,8 @@ public class TestRMWebServicesApps extends JerseyTest {
     WebResource r = resource();
 
     ClientResponse response = r.path("ws").path("v1").path("cluster")
-        .path("apps").queryParam("state", RMAppState.RUNNING.toString())
+        .path("apps")
+        .queryParam("state", YarnApplicationState.RUNNING.toString())
         .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
     assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
     JSONObject json = response.getEntity(JSONObject.class);
@@ -975,6 +979,169 @@ public class TestRMWebServicesApps extends JerseyTest {
     rm.stop();
   }
 
+  @Test
+  public void testAppStatistics() throws JSONException, Exception {
+    try {
+      rm.start();
+      MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 4096);
+      Thread.sleep(1);
+      RMApp app1 = rm.submitApp(1024, "", UserGroupInformation.getCurrentUser()
+          .getShortUserName(), null, false, null, 2, null, "MAPREDUCE");
+      amNodeManager.nodeHeartbeat(true);
+      // finish App
+      MockAM am = rm
+          .sendAMLaunched(app1.getCurrentAppAttempt().getAppAttemptId());
+      am.registerAppAttempt();
+      am.unregisterAppAttempt();
+      amNodeManager.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(),
+          1, ContainerState.COMPLETE);
+
+      rm.submitApp(1024, "", UserGroupInformation.getCurrentUser()
+          .getShortUserName(), null, false, null, 2, null, "MAPREDUCE");
+      rm.submitApp(1024, "", UserGroupInformation.getCurrentUser()
+          .getShortUserName(), null, false, null, 2, null, "OTHER");
+
+      // zero type, zero state
+      WebResource r = resource();
+      ClientResponse response = r.path("ws").path("v1").path("cluster")
+          .path("appstatistics")
+          .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      JSONObject json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      JSONObject appsStatInfo = json.getJSONObject("appStatInfo");
+      assertEquals("incorrect number of elements", 1, appsStatInfo.length());
+      JSONArray statItems = appsStatInfo.getJSONArray("statItem");
+      assertEquals("incorrect number of elements",
+          YarnApplicationState.values().length, statItems.length());
+      for (int i = 0; i < YarnApplicationState.values().length; ++i) {
+        assertEquals("*", statItems.getJSONObject(0).getString("type"));
+        if (statItems.getJSONObject(0).getString("state").equals("ACCEPTED")) {
+          assertEquals("2", statItems.getJSONObject(0).getString("count"));
+        } else if (
+            statItems.getJSONObject(0).getString("state").equals("FINISHED")) {
+          assertEquals("1", statItems.getJSONObject(0).getString("count"));
+        } else {
+          assertEquals("0", statItems.getJSONObject(0).getString("count"));
+        }
+      }
+
+      // zero type, one state
+      r = resource();
+      response = r.path("ws").path("v1").path("cluster")
+          .path("appstatistics")
+          .queryParam("states", YarnApplicationState.ACCEPTED.toString())
+          .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      appsStatInfo = json.getJSONObject("appStatInfo");
+      assertEquals("incorrect number of elements", 1, appsStatInfo.length());
+      statItems = appsStatInfo.getJSONArray("statItem");
+      assertEquals("incorrect number of elements", 1, statItems.length());
+      assertEquals("ACCEPTED", statItems.getJSONObject(0).getString("state"));
+      assertEquals("*", statItems.getJSONObject(0).getString("type"));
+      assertEquals("2", statItems.getJSONObject(0).getString("count"));
+
+      // one type, zero state
+      r = resource();
+      response = r.path("ws").path("v1").path("cluster")
+          .path("appstatistics")
+          .queryParam("applicationTypes", "MAPREDUCE")
+          .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      appsStatInfo = json.getJSONObject("appStatInfo");
+      assertEquals("incorrect number of elements", 1, appsStatInfo.length());
+      statItems = appsStatInfo.getJSONArray("statItem");
+      assertEquals("incorrect number of elements",
+          YarnApplicationState.values().length, statItems.length());
+      for (int i = 0; i < YarnApplicationState.values().length; ++i) {
+        assertEquals("mapreduce", statItems.getJSONObject(0).getString("type"));
+        if (statItems.getJSONObject(0).getString("state").equals("ACCEPTED")) {
+          assertEquals("1", statItems.getJSONObject(0).getString("count"));
+        } else if (
+            statItems.getJSONObject(0).getString("state").equals("FINISHED")) {
+          assertEquals("1", statItems.getJSONObject(0).getString("count"));
+        } else {
+          assertEquals("0", statItems.getJSONObject(0).getString("count"));
+        }
+      }
+
+      // two types, zero state
+      r = resource();
+      response = r.path("ws").path("v1").path("cluster")
+          .path("appstatistics")
+          .queryParam("applicationTypes", "MAPREDUCE,OTHER")
+          .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+      assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      JSONObject exception = json.getJSONObject("RemoteException");
+      assertEquals("incorrect number of elements", 3, exception.length());
+      String message = exception.getString("message");
+      String type = exception.getString("exception");
+      String className = exception.getString("javaClassName");
+      WebServicesTestUtils.checkStringContains("exception message",
+          "we temporarily support at most one applicationType", message);
+      WebServicesTestUtils.checkStringEqual("exception type",
+          "BadRequestException", type);
+      WebServicesTestUtils.checkStringEqual("exception className",
+          "org.apache.hadoop.yarn.webapp.BadRequestException", className);
+
+      // one type, two states
+      r = resource();
+      response = r.path("ws").path("v1").path("cluster")
+          .path("appstatistics")
+          .queryParam("states", YarnApplicationState.FINISHED.toString()
+              + "," + YarnApplicationState.ACCEPTED.toString())
+          .queryParam("applicationTypes", "MAPREDUCE")
+          .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      appsStatInfo = json.getJSONObject("appStatInfo");
+      assertEquals("incorrect number of elements", 1, appsStatInfo.length());
+      statItems = appsStatInfo.getJSONArray("statItem");
+      assertEquals("incorrect number of elements", 2, statItems.length());
+      JSONObject statItem1 = statItems.getJSONObject(0);
+      JSONObject statItem2 = statItems.getJSONObject(1);
+      assertTrue((statItem1.getString("state").equals("ACCEPTED") &&
+          statItem2.getString("state").equals("FINISHED")) ||
+          (statItem2.getString("state").equals("ACCEPTED") &&
+          statItem1.getString("state").equals("FINISHED")));
+      assertEquals("mapreduce", statItem1.getString("type"));
+      assertEquals("1", statItem1.getString("count"));
+      assertEquals("mapreduce", statItem2.getString("type"));
+      assertEquals("1", statItem2.getString("count"));
+
+      // invalid state
+      r = resource();
+      response = r.path("ws").path("v1").path("cluster")
+          .path("appstatistics").queryParam("states", "wrong_state")
+          .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+      assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus());
+      assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+      json = response.getEntity(JSONObject.class);
+      assertEquals("incorrect number of elements", 1, json.length());
+      exception = json.getJSONObject("RemoteException");
+      assertEquals("incorrect number of elements", 3, exception.length());
+      message = exception.getString("message");
+      type = exception.getString("exception");
+      className = exception.getString("javaClassName");
+      WebServicesTestUtils.checkStringContains("exception message",
+          "Invalid application-state wrong_state", message);
+      WebServicesTestUtils.checkStringEqual("exception type",
+          "BadRequestException", type);
+      WebServicesTestUtils.checkStringEqual("exception className",
+          "org.apache.hadoop.yarn.webapp.BadRequestException", className);
+    } finally {
+      rm.stop();
+    }
+  }
+
   @Test
   public void testSingleApp() throws JSONException, Exception {
     rm.start();

+ 123 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm

@@ -1269,6 +1269,129 @@ _01_000001</amContainerLogs>
 
 +---+
 
+* Cluster Application Statistics API
+
+  With the Application Statistics API, you can obtain a collection of triples, each of which contains the application type, the application state and the number of applications of this type and this state in ResourceManager context. Note that with the performance concern, we currently only support at most one applicationType per query. We may support multiple applicationTypes per query as well as more statistics in the future. When you run a GET operation on this resource, you obtain a collection of statItem objects. 
+
+** URI
+
+------
+  * http://<rm http address:port>/ws/v1/cluster/appstatistics
+------
+
+** HTTP Operations Supported
+
+------
+  * GET
+------
+
+** Query Parameters Required
+
+  Two paramters can be specified. The parameters are case insensitive.
+
+------
+  * states - states of the applications, specified as a comma-separated list. If states is not provided, the API will enumerate all application states and return the counts of them.
+  * applicationTypes - types of the applications, specified as a comma-separated list. If applicationTypes is not provided, the API will count the applications of any application type. In this case, the response shows * to indicate any application type. Note that we only support at most one applicationType temporarily. Otherwise, users will expect an BadRequestException.
+------
+
+** Elements of the <appStatInfo> (statItems) object
+
+  When you make a request for the list of statistics items, the information will be returned as a collection of statItem objects
+
+*-----------+----------------------------------------------------------------------+-------------------------------------+
+|| Item     || Data Type                                                           || Description                        |
+*-----------+----------------------------------------------------------------------+-------------------------------------+
+| statItem  | array of statItem objects(JSON)/zero or more statItem objects(XML)   | The collection of statItem objects  |
+*-----------+----------------------------------------------------------------------+-------------------------------------+
+
+** Response Examples
+
+  <<JSON response>>
+
+  HTTP Request:
+
+------
+  GET http://<rm http address:port>/ws/v1/cluster/appstatistics?states=accepted,running,finished&applicationTypes=mapreduce
+------
+
+  Response Header:
+
++---+
+  HTTP/1.1 200 OK
+  Content-Type: application/json
+  Transfer-Encoding: chunked
+  Server: Jetty(6.1.26)
++---+
+
+  Response Body:
+
++---+
+{
+  "appStatInfo":
+  {
+    "statItem":
+    [
+       {
+          "state" : "accepted",
+          "type" : "mapreduce",
+          "count" : 4
+       },
+       {
+          "state" : "running",
+          "type" : "mapreduce",
+          "count" : 1
+       },
+       {
+          "state" : "finished",
+          "type" : "mapreduce",
+          "count" : 7
+       }
+    ]
+  }
+}
++---+
+
+  <<XML response>>
+
+  HTTP Request:
+
+------
+  GET http://<rm http address:port>/ws/v1/cluster/appstatistics?states=accepted,running,finished&applicationTypes=mapreduce
+  Accept: application/xml
+------
+
+  Response Header:
+
++---+
+  HTTP/1.1 200 OK
+  Content-Type: application/xml
+  Content-Length: 2459
+  Server: Jetty(6.1.26)
++---+
+
+  Response Body:
+
++---+
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<appStatInfo>
+  <statItem>
+    <state>accepted</state>
+    <type>mapreduce</type>
+    <count>4</count>
+  </statItem>
+  <statItem>
+    <state>running</state>
+    <type>mapreduce</type>
+    <count>1</count>
+  </statItem>
+  <statItem>
+    <state>finished</state>
+    <type>mapreduce</type>
+    <count>7</count>
+  </statItem>
+</appStatInfo>
++---+
+
 * Cluster {Application API}
 
   An application resource contains information about a particular application that was submitted to a cluster.