Prechádzať zdrojové kódy

Merge -r 765950:765951 from trunk onto 0.20 branch. Fixes HADOOP-5646.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/branches/branch-0.20@765952 13f79535-47bb-0310-9956-ffa450edef68
Devaraj Das 16 rokov pred
rodič
commit
942d51509a

+ 3 - 0
CHANGES.txt

@@ -875,6 +875,9 @@ Release 0.20.0 - 2009-04-15
     HADOOP-5655. TestMRServerPorts fails on java.net.BindException. (Devaraj
     Das via hairong)
 
+    HADOOP-5646. Fixes a problem in TestQueueCapacities. 
+    (Vinod Kumar Vavilapalli via ddas)
+
 Release 0.19.2 - Unreleased
 
   BUG FIXES

+ 8 - 17
src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/ClusterWithCapacityScheduler.java

@@ -47,7 +47,6 @@ public class ClusterWithCapacityScheduler extends TestCase {
 
   static final Log LOG = LogFactory.getLog(ClusterWithCapacityScheduler.class);
   private MiniMRCluster mrCluster;
-  private MiniDFSCluster dfsCluster;
 
   private JobConf jobConf;
 
@@ -71,7 +70,7 @@ public class ClusterWithCapacityScheduler extends TestCase {
   protected void startCluster(Properties clusterProperties,
       Properties schedulerProperties)
       throws IOException {
-    startCluster(2, 2, clusterProperties, schedulerProperties);
+    startCluster(2, clusterProperties, schedulerProperties);
   }
 
   /**
@@ -81,12 +80,11 @@ public class ClusterWithCapacityScheduler extends TestCase {
    * user provided properties are missing (null/empty)
    * 
    * @param numTaskTrackers
-   * @param numDataNodes
    * @param clusterProperties
    * @param schedulerProperties
    * @throws IOException
    */
-  protected void startCluster(int numTaskTrackers, int numDataNodes,
+  protected void startCluster(int numTaskTrackers,
       Properties clusterProperties, Properties schedulerProperties)
       throws IOException {
     Thread.currentThread().setContextClassLoader(
@@ -99,7 +97,6 @@ public class ClusterWithCapacityScheduler extends TestCase {
         clusterConf.set(key, (String) clusterProperties.get(key));
       }
     }
-    dfsCluster = new MiniDFSCluster(clusterConf, numDataNodes, true, null);
 
     if (schedulerProperties != null) {
       setUpSchedulerConfigFile(schedulerProperties);
@@ -108,23 +105,22 @@ public class ClusterWithCapacityScheduler extends TestCase {
     clusterConf.set("mapred.jobtracker.taskScheduler",
         CapacityTaskScheduler.class.getName());
     mrCluster =
-        new MiniMRCluster(numTaskTrackers, dfsCluster.getFileSystem().getUri()
-            .toString(), 1, null, null, clusterConf);
+        new MiniMRCluster(numTaskTrackers, "file:///", 1, null, null,
+            clusterConf);
 
     this.jobConf = mrCluster.createJobConf(clusterConf);
   }
 
   private void setUpSchedulerConfigFile(Properties schedulerConfProps)
       throws IOException {
-    Configuration config = new Configuration(false);
-
-    LocalFileSystem fs = FileSystem.getLocal(config);
+    LocalFileSystem fs = FileSystem.getLocal(new Configuration());
 
     String myResourcePath = System.getProperty("test.build.data");
     Path schedulerConfigFilePath =
         new Path(myResourcePath, CapacitySchedulerConf.SCHEDULER_CONF_FILE);
     OutputStream out = fs.create(schedulerConfigFilePath);
 
+    Configuration config = new Configuration(false);
     for (Enumeration<?> e = schedulerConfProps.propertyNames(); e
         .hasMoreElements();) {
       String key = (String) e.nextElement();
@@ -141,9 +137,7 @@ public class ClusterWithCapacityScheduler extends TestCase {
   }
 
   private void cleanUpSchedulerConfigFile() throws IOException {
-    Configuration config = new Configuration(false);
-
-    LocalFileSystem fs = FileSystem.getLocal(config);
+    LocalFileSystem fs = FileSystem.getLocal(new Configuration());
 
     String myResourcePath = System.getProperty("test.build.data");
     Path schedulerConfigFilePath =
@@ -167,9 +161,6 @@ public class ClusterWithCapacityScheduler extends TestCase {
     if (mrCluster != null) {
       mrCluster.shutdown();
     }
-    if (dfsCluster != null) {
-      dfsCluster.shutdown();
-    }
   }
 
   /**
@@ -232,4 +223,4 @@ public class ClusterWithCapacityScheduler extends TestCase {
       return super.findResource(name);
     }
   }
-}
+}

+ 4 - 4
src/contrib/capacity-scheduler/src/test/org/apache/hadoop/mapred/TestQueueCapacities.java

@@ -51,7 +51,7 @@ public class TestQueueCapacities extends ClusterWithCapacityScheduler {
     clusterProps.put("mapred.tasktracker.reduce.tasks.maximum", String
         .valueOf(3));
     // cluster capacity 12 maps, 12 reduces
-    startCluster(4, 2, clusterProps, schedulerProps);
+    startCluster(4, clusterProps, schedulerProps);
 
     ControlledMapReduceJobRunner jobRunner =
         ControlledMapReduceJobRunner.getControlledMapReduceJobRunner(
@@ -126,7 +126,7 @@ public class TestQueueCapacities extends ClusterWithCapacityScheduler {
     clusterProps.put("mapred.tasktracker.reduce.tasks.maximum", String
         .valueOf(0));
     // cluster capacity 12 maps, 0 reduces
-    startCluster(4, 2, clusterProps, schedulerProps);
+    startCluster(4, clusterProps, schedulerProps);
 
     singleQMultipleJobs1();
     singleQMultipleJobs2();
@@ -166,7 +166,7 @@ public class TestQueueCapacities extends ClusterWithCapacityScheduler {
 
     // cluster capacity 10 maps, 10 reduces and 4 queues with capacities 1, 2,
     // 3, 4 respectively.
-    startCluster(5, 2, clusterProps, schedulerProps);
+    startCluster(5, clusterProps, schedulerProps);
 
     multipleQsWithOneQBeyondCapacity(queues);
     multipleQueuesWithinCapacities(queues);
@@ -437,4 +437,4 @@ public class TestQueueCapacities extends ClusterWithCapacityScheduler {
       numTasks += 1;
     }
   }
-}
+}