Browse Source

MAPREDUCE-3176. Fixed ant mapreduce tests that are timing out because of wrong framework name. Contributed by Hitesh Shah.
svn merge -c r1186368 --ignore-ancestry ../../trunk/


git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1186369 13f79535-47bb-0310-9956-ffa450edef68

Vinod Kumar Vavilapalli 13 years ago
parent
commit
3b39b29b9c

+ 3 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -1632,6 +1632,9 @@ Release 0.23.0 - Unreleased
     MAPREDUCE-3162. Separated application-init and container-init event types
     in NodeManager's Application state machine. (Todd Lipcon via vinodkv)
 
+    MAPREDUCE-3176. Fixed ant mapreduce tests that are timing out because
+    of wrong framework name. (Hitesh Shah via vinodkv)
+
 Release 0.22.0 - Unreleased
 
   INCOMPATIBLE CHANGES

+ 9 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ReduceTask.java

@@ -342,9 +342,15 @@ public class ReduceTask extends Task {
     RawKeyValueIterator rIter = null;
     
     boolean isLocal = false; 
-    // local iff framework == local
-    String framework = job.get(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
-    isLocal = framework.equals(MRConfig.LOCAL_FRAMEWORK_NAME);
+    // local if
+    // 1) framework == local or
+    // 2) framework == null and job tracker address == local
+    String framework = job.get(MRConfig.FRAMEWORK_NAME);
+    String masterAddr = job.get(MRConfig.MASTER_ADDRESS, "local");
+    if ((framework == null && masterAddr.equals("local"))
+        || (framework != null && framework.equals(MRConfig.LOCAL_FRAMEWORK_NAME))) {
+      isLocal = true;
+    }
     
     if (!isLocal) {
       Class combinerClass = conf.getCombinerClass();

+ 28 - 11
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java

@@ -25,6 +25,8 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.ServiceLoader;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -62,6 +64,7 @@ public class Cluster {
   private Path sysDir = null;
   private Path stagingAreaDir = null;
   private Path jobHistoryDir = null;
+  private static final Log LOG = LogFactory.getLog(Cluster.class);
 
   static {
     ConfigUtil.loadResources();
@@ -83,17 +86,31 @@ public class Cluster {
 
     for (ClientProtocolProvider provider : ServiceLoader
         .load(ClientProtocolProvider.class)) {
-      ClientProtocol clientProtocol = null;
-      if (jobTrackAddr == null) {
-        clientProtocol = provider.create(conf);
-      } else {
-        clientProtocol = provider.create(jobTrackAddr, conf);
-      }
-
-      if (clientProtocol != null) {
-        clientProtocolProvider = provider;
-        client = clientProtocol;
-        break;
+      LOG.debug("Trying ClientProtocolProvider : "
+          + provider.getClass().getName());
+      ClientProtocol clientProtocol = null; 
+      try {
+        if (jobTrackAddr == null) {
+          clientProtocol = provider.create(conf);
+        } else {
+          clientProtocol = provider.create(jobTrackAddr, conf);
+        }
+  
+        if (clientProtocol != null) {
+          clientProtocolProvider = provider;
+          client = clientProtocol;
+          LOG.debug("Picked " + provider.getClass().getName()
+              + " as the ClientProtocolProvider");
+          break;
+        }
+        else {
+          LOG.info("Cannot pick " + provider.getClass().getName()
+              + " as the ClientProtocolProvider - returned null protocol");
+        }
+      } 
+      catch (Exception e) {
+        LOG.info("Failed to use " + provider.getClass().getName()
+            + " due to error: " + e.getMessage());
       }
     }
 

+ 7 - 1
hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/fs/TestFileSystem.java

@@ -94,7 +94,13 @@ public class TestFileSystem extends TestCase {
     CommandFormat cf;
     cf= new CommandFormat("copyToLocal", 2,2,"crc","ignoreCrc");
     assertEquals(cf.parse(new String[] {"-get","file", "-"}, 1).get(1), "-");
-    assertEquals(cf.parse(new String[] {"-get","file","-ignoreCrc","/foo"}, 1).get(1),"/foo");
+    try {
+      cf.parse(new String[] {"-get","file","-ignoreCrc","/foo"}, 1);
+      fail("Expected parsing to fail as it should stop at first non-option");
+    }
+    catch (Exception e) {
+      // Expected
+    }  
     cf = new CommandFormat("tail", 1, 1, "f");
     assertEquals(cf.parse(new String[] {"-tail","fileName"}, 1).get(0),"fileName");
     assertEquals(cf.parse(new String[] {"-tail","-f","fileName"}, 1).get(0),"fileName");

+ 2 - 0
hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/ipc/TestSocketFactory.java

@@ -33,6 +33,7 @@ import org.apache.hadoop.mapred.JobClient;
 import org.apache.hadoop.mapred.JobConf;
 import org.apache.hadoop.mapred.JobStatus;
 import org.apache.hadoop.mapred.MiniMRCluster;
+import org.apache.hadoop.mapreduce.MRConfig;
 import org.apache.hadoop.net.StandardSocketFactory;
 
 /**
@@ -92,6 +93,7 @@ public class TestSocketFactory extends TestCase {
       JobConf jconf = new JobConf(cconf);
       jconf.set("mapred.job.tracker", String.format("localhost:%d",
           jobTrackerPort + 10));
+      jconf.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME);
       client = new JobClient(jconf);
 
       JobStatus[] jobs = client.jobsToComplete();

+ 21 - 0
hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapreduce/TestClientProtocolProviderImpls.java

@@ -96,4 +96,25 @@ public class TestClientProtocolProviderImpls extends TestCase {
     }
   }
 
+  @Test
+  public void testClusterException() {
+
+    Configuration conf = new Configuration();
+    conf.set(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME);
+    conf.set(JTConfig.JT_IPC_ADDRESS, "local");
+
+    // initializing a cluster with this conf should throw an error.
+    // However the exception thrown should not be specific to either
+    // the job tracker client provider or the local provider
+    boolean errorThrown = false;
+    try {
+      Cluster cluster = new Cluster(conf);
+      cluster.close();
+      fail("Not expected - cluster init should have failed");
+    } catch (IOException e) {
+      errorThrown = true;
+      assert(e.getMessage().contains("Cannot initialize Cluster. Please check"));
+    }
+    assert(errorThrown);
+  }
 }