Browse Source

Merge trunk to branch-trunk-win

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-trunk-win@1441218 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas 12 years ago
parent
commit
e77e85507b

+ 3 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -325,6 +325,9 @@ Trunk (Unreleased)
     HADOOP-9249. hadoop-maven-plugins version-info goal causes build failure
     when running with Clover. (Chris Nauroth via suresh)
 
+    HADOOP-9264. Port change to use Java untar API on Windows from 
+    branch-1-win to trunk. (Chris Nauroth via suresh)
+
   OPTIMIZATIONS
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -314,6 +314,10 @@ Release 2.0.3-alpha - Unreleased
     HDFS-4369. GetBlockKeysResponseProto does not handle null response.
     (suresh)
 
+    HDFS-4451. hdfs balancer command returns exit code 1 on success instead
+    of 0. (Joshua Blatt via suresh)
+
+
   NEW FEATURES
 
     HDFS-2656. Add libwebhdfs, a pure C client based on WebHDFS.

+ 9 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java

@@ -1333,8 +1333,9 @@ public class Balancer {
 
   // Exit status
   enum ReturnStatus {
-    SUCCESS(1),
-    IN_PROGRESS(0),
+    // These int values will map directly to the balancer process's exit code.
+    SUCCESS(0),
+    IN_PROGRESS(1),
     ALREADY_RUNNING(-1),
     NO_MOVE_BLOCK(-2),
     NO_MOVE_PROGRESS(-3),
@@ -1507,7 +1508,12 @@ public class Balancer {
   }
 
   static class Cli extends Configured implements Tool {
-    /** Parse arguments and then run Balancer */
+    /**
+     * Parse arguments and then run Balancer.
+     * 
+     * @param args command specific arguments.
+     * @return exit code. 0 indicates success, non-zero indicates failure.
+     */
     @Override
     public int run(String[] args) {
       final long startTime = Time.now();

+ 43 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java

@@ -46,8 +46,10 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.server.balancer.Balancer.Cli;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
 import org.apache.hadoop.util.Time;
+import org.apache.hadoop.util.Tool;
 import org.junit.Test;
 
 /**
@@ -95,7 +97,6 @@ public class TestBalancer {
     DFSTestUtil.waitReplication(fs, filePath, replicationFactor);
   }
 
-
   /* fill up a cluster with <code>numNodes</code> datanodes 
    * whose used space to be <code>size</code>
    */
@@ -301,10 +302,12 @@ public class TestBalancer {
    * @param racks - array of racks for original nodes in cluster
    * @param newCapacity - new node's capacity
    * @param newRack - new node's rack
+   * @param useTool - if true run test via Cli with command-line argument 
+   *   parsing, etc.   Otherwise invoke balancer API directly.
    * @throws Exception
    */
   private void doTest(Configuration conf, long[] capacities, String[] racks, 
-      long newCapacity, String newRack) throws Exception {
+      long newCapacity, String newRack, boolean useTool) throws Exception {
     assertEquals(capacities.length, racks.length);
     int numOfDatanodes = capacities.length;
     cluster = new MiniDFSCluster.Builder(conf)
@@ -330,7 +333,11 @@ public class TestBalancer {
       totalCapacity += newCapacity;
 
       // run balancer and validate results
-      runBalancer(conf, totalUsedSpace, totalCapacity);
+      if (useTool) {
+        runBalancerCli(conf, totalUsedSpace, totalCapacity);
+      } else {
+        runBalancer(conf, totalUsedSpace, totalCapacity);
+      }
     } finally {
       cluster.shutdown();
     }
@@ -350,22 +357,38 @@ public class TestBalancer {
     waitForBalancer(totalUsedSpace, totalCapacity, client, cluster);
   }
   
+  private void runBalancerCli(Configuration conf,
+      long totalUsedSpace, long totalCapacity) throws Exception {
+    waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
+
+    final String[] args = { "-policy", "datanode" };
+    final Tool tool = new Cli();    
+    tool.setConf(conf);
+    final int r = tool.run(args); // start rebalancing
+    
+    assertEquals("Tools should exit 0 on success", 0, r);
+    waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster);
+    LOG.info("Rebalancing with default ctor.");
+    waitForBalancer(totalUsedSpace, totalCapacity, client, cluster);
+  }
+  
   /** one-node cluster test*/
-  private void oneNodeTest(Configuration conf) throws Exception {
+  private void oneNodeTest(Configuration conf, boolean useTool) throws Exception {
     // add an empty node with half of the CAPACITY & the same rack
-    doTest(conf, new long[]{CAPACITY}, new String[]{RACK0}, CAPACITY/2, RACK0);
+    doTest(conf, new long[]{CAPACITY}, new String[]{RACK0}, CAPACITY/2, 
+            RACK0, useTool);
   }
   
   /** two-node cluster test */
   private void twoNodeTest(Configuration conf) throws Exception {
     doTest(conf, new long[]{CAPACITY, CAPACITY}, new String[]{RACK0, RACK1},
-        CAPACITY, RACK2);
+        CAPACITY, RACK2, false);
   }
   
   /** test using a user-supplied conf */
   public void integrationTest(Configuration conf) throws Exception {
     initConf(conf);
-    oneNodeTest(conf);
+    oneNodeTest(conf, false);
   }
   
   /**
@@ -401,7 +424,7 @@ public class TestBalancer {
   
   void testBalancer0Internal(Configuration conf) throws Exception {
     initConf(conf);
-    oneNodeTest(conf);
+    oneNodeTest(conf, false);
     twoNodeTest(conf);
   }
 
@@ -495,7 +518,18 @@ public class TestBalancer {
 
   }
 
-
+  /**
+   * Verify balancer exits 0 on success.
+   */
+  @Test(timeout=100000)
+  public void testExitZeroOnSuccess() throws Exception {
+    final Configuration conf = new HdfsConfiguration();
+    
+    initConf(conf);
+    
+    oneNodeTest(conf, true);
+  }
+  
   /**
    * @param args
    */