Pārlūkot izejas kodu

HADOOP-482. Fix unit tests to work when a cluster is running on the same machine. Contributed by Wendy.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@469642 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 18 gadi atpakaļ
vecāks
revīzija
440443c4b7

+ 3 - 0
CHANGES.txt

@@ -91,6 +91,9 @@ Trunk (unreleased changes)
     extend the cache to permit symbolic links to cached items, rather
     extend the cache to permit symbolic links to cached items, rather
     than local file copies.  (Mahadev Konar via cutting)
     than local file copies.  (Mahadev Konar via cutting)
 
 
+25. HADOOP-482.  Fix unit tests to work when a cluster is running on
+    the same machine, removing port conflicts.  (Wendy Chien via cutting)
+
 
 
 Release 0.7.2 - 2006-10-18
 Release 0.7.2 - 2006-10-18
 
 

+ 0 - 3
build.xml

@@ -29,7 +29,6 @@
   <property name="build.examples" value="${build.dir}/examples"/>
   <property name="build.examples" value="${build.dir}/examples"/>
   <property name="build.libhdfs" value="${build.dir}/libhdfs"/>
   <property name="build.libhdfs" value="${build.dir}/libhdfs"/>
   <property name="build.docs" value="${build.dir}/docs"/>
   <property name="build.docs" value="${build.dir}/docs"/>
-  <property name="build.minimr" value="${build.dir}/minimr"/>
   <property name="build.javadoc" value="${build.docs}/api"/>
   <property name="build.javadoc" value="${build.docs}/api"/>
   <property name="build.encoding" value="ISO-8859-1"/>
   <property name="build.encoding" value="ISO-8859-1"/>
 
 
@@ -74,7 +73,6 @@
   <!-- the unit test classpath: uses test.src.dir for configuration -->
   <!-- the unit test classpath: uses test.src.dir for configuration -->
   <path id="test.classpath">
   <path id="test.classpath">
     <pathelement location="${test.build.classes}" />
     <pathelement location="${test.build.classes}" />
-    <pathelement location="${build.minimr}" />
     <pathelement location="${test.src.dir}"/>
     <pathelement location="${test.src.dir}"/>
     <pathelement location="${build.dir}"/>
     <pathelement location="${build.dir}"/>
     <pathelement location="${build.examples}"/>
     <pathelement location="${build.examples}"/>
@@ -101,7 +99,6 @@
     <mkdir dir="${build.webapps}/dfs/WEB-INF"/>
     <mkdir dir="${build.webapps}/dfs/WEB-INF"/>
     <mkdir dir="${build.webapps}/datanode/WEB-INF"/>
     <mkdir dir="${build.webapps}/datanode/WEB-INF"/>
     <mkdir dir="${build.examples}"/>
     <mkdir dir="${build.examples}"/>
-    <mkdir dir="${build.minimr}"/>
  
  
     <mkdir dir="${test.build.dir}"/>
     <mkdir dir="${test.build.dir}"/>
     <mkdir dir="${test.build.classes}"/>
     <mkdir dir="${test.build.classes}"/>

+ 4 - 4
src/java/org/apache/hadoop/mapred/JobTracker.java

@@ -87,11 +87,11 @@ public class JobTracker implements MRConstants, InterTrackerProtocol, JobSubmiss
     }
     }
 
 
     public static void stopTracker() throws IOException {
     public static void stopTracker() throws IOException {
-      if (tracker == null)
-        throw new IOException("Trying to stop JobTracker that is not running.");
       runTracker = false;
       runTracker = false;
-      tracker.close();
-      tracker = null;
+      if (tracker != null) {
+        tracker.close();
+        tracker = null;
+      }
     }
     }
     
     
     public long getProtocolVersion(String protocol, long clientVersion) {
     public long getProtocolVersion(String protocol, long clientVersion) {

+ 4 - 6
src/test/hadoop-site.xml

@@ -7,14 +7,12 @@
 
 
 <configuration>
 <configuration>
 
 
-<property>
-  <name>mapred.local.dir</name>
-  <value>build/test/mapred/local</value>
-</property>
 
 
 <property>
 <property>
-  <name>mapred.system.dir</name>
-  <value>build/test/mapred/system</value>
+  <name>hadoop.tmp.dir</name>
+  <value>build/test</value>
+  <description>A base for other temporary directories.</description>
 </property>
 </property>
 
 
+
 </configuration>
 </configuration>

+ 72 - 22
src/test/org/apache/hadoop/dfs/MiniDFSCluster.java

@@ -34,6 +34,12 @@ public class MiniDFSCluster {
   private Thread dataNodeThread;
   private Thread dataNodeThread;
   private NameNodeRunner nameNode;
   private NameNodeRunner nameNode;
   private DataNodeRunner dataNode;
   private DataNodeRunner dataNode;
+  private int maxRetries = 10;
+  private int MAX_RETRIES  = 10;
+  private int MAX_RETRIES_PER_PORT = 10;
+
+  private int nameNodePort = 0;
+  private int nameNodeInfoPort = 0;
 
 
   /**
   /**
    * An inner class that runs a name node.
    * An inner class that runs a name node.
@@ -111,17 +117,22 @@ public class MiniDFSCluster {
       }
       }
     }
     }
   }
   }
-  
+
   /**
   /**
-   * Create the config and start up the servers.
+   * Create the config and start up the servers.  If either the rpc or info port is already 
+   * in use, we will try new ports.
+   * @param namenodePort suggestion for which rpc port to use.  caller should use 
+   *                     getNameNodePort() to get the actual port used.   
    * @param dataNodeFirst should the datanode be brought up before the namenode?
    * @param dataNodeFirst should the datanode be brought up before the namenode?
    */
    */
   public MiniDFSCluster(int namenodePort, 
   public MiniDFSCluster(int namenodePort, 
                         Configuration conf,
                         Configuration conf,
                         boolean dataNodeFirst) throws IOException {
                         boolean dataNodeFirst) throws IOException {
+
     this.conf = conf;
     this.conf = conf;
-    conf.set("fs.default.name", 
-             "localhost:"+ Integer.toString(namenodePort));
+
+    this.nameNodePort = namenodePort;
+    this.nameNodeInfoPort = 50080;   // We just want this port to be different from the default. 
     File base_dir = new File(System.getProperty("test.build.data"),
     File base_dir = new File(System.getProperty("test.build.data"),
                              "dfs/");
                              "dfs/");
     conf.set("dfs.name.dir", new File(base_dir, "name").getPath());
     conf.set("dfs.name.dir", new File(base_dir, "name").getPath());
@@ -131,27 +142,66 @@ public class MiniDFSCluster {
     // this timeout seems to control the minimum time for the test, so
     // this timeout seems to control the minimum time for the test, so
     // decrease it considerably.
     // decrease it considerably.
     conf.setInt("ipc.client.timeout", 1000);
     conf.setInt("ipc.client.timeout", 1000);
-    NameNode.format(conf);
-    nameNode = new NameNodeRunner();
-    nameNodeThread = new Thread(nameNode);
-    dataNode = new DataNodeRunner();
-    dataNodeThread = new Thread(dataNode);
-    if (dataNodeFirst) {
-      dataNodeThread.start();      
-      nameNodeThread.start();      
-    } else {
-      nameNodeThread.start();
-      dataNodeThread.start();      
-    }
-    while (!nameNode.isUp()) {
-      try {                                     // let daemons get started
-        System.out.println("waiting for dfs minicluster to start");
-        Thread.sleep(1000);
-      } catch(InterruptedException e) {
+
+    // Loops until we find ports that work or we give up because 
+    // too many tries have failed.
+    boolean foundPorts = false;
+    int portsTried = 0;
+    while ((!foundPorts) && (portsTried < MAX_RETRIES)) {
+      conf.set("fs.default.name", 
+               "localhost:"+ Integer.toString(nameNodePort));
+      conf.set("dfs.info.port", nameNodeInfoPort);
+      
+      NameNode.format(conf);
+      nameNode = new NameNodeRunner();
+      nameNodeThread = new Thread(nameNode);
+      dataNode = new DataNodeRunner();
+      dataNodeThread = new Thread(dataNode);
+      if (dataNodeFirst) {
+        dataNodeThread.start();      
+        nameNodeThread.start();      
+      } else {
+        nameNodeThread.start();
+        dataNodeThread.start();      
       }
       }
+
+      int retry = 0;
+      while (!nameNode.isUp() && (retry < MAX_RETRIES_PER_PORT)) {
+        try {                                     // let daemons get started
+          System.out.println("waiting for dfs minicluster to start");
+          Thread.sleep(1000);
+        } catch(InterruptedException e) {
+        }
+        retry++;
+      }
+      if (retry >= MAX_RETRIES_PER_PORT) {
+        this.nameNodePort += 3;
+        this.nameNodeInfoPort += 7;
+        System.out.println("Failed to start DFS minicluster in " + retry + " attempts.  Trying new ports:");
+        System.out.println("\tNameNode RPC port: " + nameNodePort);
+        System.out.println("\tNameNode info port: " + nameNodeInfoPort);
+
+        nameNode.shutdown();
+        dataNode.shutdown();
+        
+      } else {
+        foundPorts = true;
+      }
+      portsTried++;
+    } 
+    if (portsTried >= MAX_RETRIES) {
+        throw new IOException("Failed to start a DFS minicluster after trying " + portsTried + " ports.");
     }
     }
   }
   }
-  
+
+  /**
+   * Returns the rpc port used by the mini cluster, because the caller supplied port is 
+   * not necessarily the actual port used.
+   */     
+  public int getNameNodePort() {
+    return nameNodePort;
+  }
+    
   /**
   /**
    * Shut down the servers.
    * Shut down the servers.
    */
    */

+ 68 - 25
src/test/org/apache/hadoop/mapred/MiniMRCluster.java

@@ -32,7 +32,7 @@ public class MiniMRCluster {
     
     
     private int jobTrackerPort = 0;
     private int jobTrackerPort = 0;
     private int taskTrackerPort = 0;
     private int taskTrackerPort = 0;
-    
+    private int jobTrackerInfoPort = 0;
     private int numTaskTrackers;
     private int numTaskTrackers;
     
     
     private List taskTrackerList = new ArrayList();
     private List taskTrackerList = new ArrayList();
@@ -40,10 +40,17 @@ public class MiniMRCluster {
     
     
     private String namenode;
     private String namenode;
     
     
+    private int MAX_RETRIES_PER_PORT = 10;
+    private int MAX_RETRIES = 10;
+
     /**
     /**
      * An inner class that runs a job tracker.
      * An inner class that runs a job tracker.
      */
      */
     class JobTrackerRunner implements Runnable {
     class JobTrackerRunner implements Runnable {
+
+        public boolean isUp() {
+            return (JobTracker.getTracker() != null);
+        }
         /**
         /**
          * Create the job tracker and run it.
          * Create the job tracker and run it.
          */
          */
@@ -52,6 +59,7 @@ public class MiniMRCluster {
                 JobConf jc = new JobConf();
                 JobConf jc = new JobConf();
                 jc.set("fs.name.node", namenode);
                 jc.set("fs.name.node", namenode);
                 jc.set("mapred.job.tracker", "localhost:"+jobTrackerPort);
                 jc.set("mapred.job.tracker", "localhost:"+jobTrackerPort);
+                jc.set("mapred.job.tracker.info.port", jobTrackerInfoPort);
                 // this timeout seems to control the minimum time for the test, so
                 // this timeout seems to control the minimum time for the test, so
                 // set it down at 2 seconds.
                 // set it down at 2 seconds.
                 jc.setInt("ipc.client.timeout", 1000);
                 jc.setInt("ipc.client.timeout", 1000);
@@ -194,9 +202,18 @@ public class MiniMRCluster {
         }
         }
       }
       }
     }
     }
-    
+
+    /** 
+     * Get the actual rpc port used.
+     */
+    public int getJobTrackerPort() {
+        return jobTrackerPort;
+    }
+
     /**
     /**
-     * Create the config and start up the servers.
+     * Create the config and start up the servers.  The ports supplied by the user are
+     * just used as suggestions.  If those ports are already in use, new ports
+     * are tried.  The caller should call getJobTrackerPort to get the actual rpc port used.
      */
      */
     public MiniMRCluster(int jobTrackerPort,
     public MiniMRCluster(int jobTrackerPort,
                          int taskTrackerPort,
                          int taskTrackerPort,
@@ -211,39 +228,65 @@ public class MiniMRCluster {
             int numTaskTrackers,
             int numTaskTrackers,
             String namenode,
             String namenode,
             boolean taskTrackerFirst, int numDir) throws IOException {
             boolean taskTrackerFirst, int numDir) throws IOException {
+        
         this.jobTrackerPort = jobTrackerPort;
         this.jobTrackerPort = jobTrackerPort;
         this.taskTrackerPort = taskTrackerPort;
         this.taskTrackerPort = taskTrackerPort;
+        this.jobTrackerInfoPort = 50030;
         this.numTaskTrackers = numTaskTrackers;
         this.numTaskTrackers = numTaskTrackers;
         this.namenode = namenode;
         this.namenode = namenode;
-        
-        File configDir = new File("build", "minimr");
-        configDir.mkdirs();
-        File siteFile = new File(configDir, "hadoop-site.xml");
-        PrintWriter pw = new PrintWriter(siteFile);
-        pw.print("<?xml version=\"1.0\"?>\n"+
-                "<?xml-stylesheet type=\"text/xsl\" href=\"configuration.xsl\"?>\n"+
-                "<configuration>\n"+
-                " <property>\n"+
-                "   <name>mapred.system.dir</name>\n"+
-                "   <value>build/test/mapred/system</value>\n"+
-                " </property>\n"+
-                "</configuration>\n");
-        pw.close();
-        jobTracker = new JobTrackerRunner();
-        jobTrackerThread = new Thread(jobTracker);
-        if (!taskTrackerFirst) {
-          jobTrackerThread.start();
-        }
-        for (int idx = 0; idx < numTaskTrackers; idx++) {
+
+        // Loop until we find a set of ports that are all unused or until we
+        // give up because it's taken too many tries.
+        boolean foundPorts = false;
+        int portsTried = 0;
+        while ((!foundPorts) && (portsTried < MAX_RETRIES)) {
+          jobTracker = new JobTrackerRunner();
+          jobTrackerThread = new Thread(jobTracker);
+          if (!taskTrackerFirst) {
+            jobTrackerThread.start();
+          }
+          for (int idx = 0; idx < numTaskTrackers; idx++) {
             TaskTrackerRunner taskTracker = new TaskTrackerRunner(numDir);
             TaskTrackerRunner taskTracker = new TaskTrackerRunner(numDir);
             Thread taskTrackerThread = new Thread(taskTracker);
             Thread taskTrackerThread = new Thread(taskTracker);
             taskTrackerThread.start();
             taskTrackerThread.start();
             taskTrackerList.add(taskTracker);
             taskTrackerList.add(taskTracker);
             taskTrackerThreadList.add(taskTrackerThread);
             taskTrackerThreadList.add(taskTrackerThread);
+          }
+          if (taskTrackerFirst) {
+            jobTrackerThread.start();
+          }
+          int retry = 0;
+          while (!jobTracker.isUp() && (retry < MAX_RETRIES_PER_PORT)) {
+            try {                                     // let daemons get started
+              System.err.println("waiting for jobtracker to start");
+              Thread.sleep(1000);
+            } catch(InterruptedException e) {
+            }
+            retry++;
+          }
+          if (retry >= MAX_RETRIES_PER_PORT) {
+              // Try new ports.
+              this.jobTrackerPort += 7;
+              this.jobTrackerInfoPort += 3;
+              this.taskTrackerPort++;
+
+              System.err.println("Failed to start MR minicluster in " + retry + 
+                                 " attempts.  Retrying with new ports:");
+              System.err.println("\tJobTracker RPC port = " + jobTrackerPort);
+              System.err.println("\tJobTracker info port = " + jobTrackerInfoPort);
+              System.err.println("\tTaskTracker RPC port(s) = " + 
+                                 taskTrackerPort + "-" + (taskTrackerPort+numTaskTrackers-1));
+              shutdown();
+              taskTrackerList.clear();
+          } else {
+            foundPorts = true;
+          }
+          portsTried++;
         }
         }
-        if (taskTrackerFirst) {
-          jobTrackerThread.start();
+        if (portsTried >= MAX_RETRIES) {
+            throw new IOException("Failed to start MR minicluster after trying " + portsTried + " ports.");
         }
         }
+        
         waitUntilIdle();
         waitUntilIdle();
     }
     }
     
     

+ 2 - 3
src/test/org/apache/hadoop/mapred/TestEmptyJobWithDFS.java

@@ -101,15 +101,14 @@ public class TestEmptyJobWithDFS extends TestCase {
       FileSystem fileSys = null;
       FileSystem fileSys = null;
       try {
       try {
           final int taskTrackers = 4;
           final int taskTrackers = 4;
-          final int jobTrackerPort = 50050;
-          final String jobTrackerName = "localhost:" + jobTrackerPort;
+          final int jobTrackerPort = 60050;
           Configuration conf = new Configuration();
           Configuration conf = new Configuration();
           dfs = new MiniDFSCluster(65315, conf, true);
           dfs = new MiniDFSCluster(65315, conf, true);
           fileSys = dfs.getFileSystem();
           fileSys = dfs.getFileSystem();
           namenode = fileSys.getName();
           namenode = fileSys.getName();
           mr = new MiniMRCluster(jobTrackerPort, 50060, taskTrackers, 
           mr = new MiniMRCluster(jobTrackerPort, 50060, taskTrackers, 
                                  namenode, true, 2);
                                  namenode, true, 2);
-
+          final String jobTrackerName = "localhost:" + mr.getJobTrackerPort();
           JobConf jobConf = new JobConf();
           JobConf jobConf = new JobConf();
           boolean result;
           boolean result;
           result = launchEmptyJob(namenode, jobTrackerName, jobConf, 
           result = launchEmptyJob(namenode, jobTrackerName, jobConf, 

+ 3 - 2
src/test/org/apache/hadoop/mapred/TestMiniMRClasspath.java

@@ -96,8 +96,8 @@ public class TestMiniMRClasspath extends TestCase {
       FileSystem fileSys = null;
       FileSystem fileSys = null;
       try {
       try {
           final int taskTrackers = 4;
           final int taskTrackers = 4;
-          final int jobTrackerPort = 50050;
-          final String jobTrackerName = "localhost:" + jobTrackerPort;
+          final int jobTrackerPort = 60050;
+
           Configuration conf = new Configuration();
           Configuration conf = new Configuration();
           dfs = new MiniDFSCluster(65314, conf, true);
           dfs = new MiniDFSCluster(65314, conf, true);
           fileSys = dfs.getFileSystem();
           fileSys = dfs.getFileSystem();
@@ -106,6 +106,7 @@ public class TestMiniMRClasspath extends TestCase {
                                  namenode, true, 3);
                                  namenode, true, 3);
           JobConf jobConf = new JobConf();
           JobConf jobConf = new JobConf();
           String result;
           String result;
+          final String jobTrackerName = "localhost:" + mr.getJobTrackerPort();
           result = launchWordCount(namenode, jobTrackerName, jobConf, 
           result = launchWordCount(namenode, jobTrackerName, jobConf, 
                                    "The quick brown fox\nhas many silly\n" + 
                                    "The quick brown fox\nhas many silly\n" + 
                                    "red fox sox\n",
                                    "red fox sox\n",

+ 2 - 2
src/test/org/apache/hadoop/mapred/TestMiniMRDFSCaching.java

@@ -44,9 +44,9 @@ public class TestMiniMRDFSCaching extends TestCase {
       dfs = new MiniDFSCluster(65314, conf, true);
       dfs = new MiniDFSCluster(65314, conf, true);
       fileSys = dfs.getFileSystem();
       fileSys = dfs.getFileSystem();
       namenode = fileSys.getName();
       namenode = fileSys.getName();
-      mr = new MiniMRCluster(50050, 50060, 2, namenode, true, 4);
+      mr = new MiniMRCluster(60050, 50060, 2, namenode, true, 4);
       // run the wordcount example with caching
       // run the wordcount example with caching
-      boolean ret = MRCaching.launchMRCache("localhost:50050",
+      boolean ret = MRCaching.launchMRCache("localhost:"+mr.getJobTrackerPort(),
                                             "/testing/wc/input",
                                             "/testing/wc/input",
                                             "/testing/wc/output", namenode,
                                             "/testing/wc/output", namenode,
                                             conf,
                                             conf,

+ 3 - 2
src/test/org/apache/hadoop/mapred/TestMiniMRLocalFS.java

@@ -35,12 +35,13 @@ public class TestMiniMRLocalFS extends TestCase {
       MiniMRCluster mr = null;
       MiniMRCluster mr = null;
       try {
       try {
           mr = new MiniMRCluster(60030, 60040, 2, "local", false, 3);
           mr = new MiniMRCluster(60030, 60040, 2, "local", false, 3);
-          double estimate = PiEstimator.launch(NUM_MAPS, NUM_SAMPLES, "localhost:60030", "local");
+          String jobTrackerName = "localhost:" + mr.getJobTrackerPort();
+          double estimate = PiEstimator.launch(NUM_MAPS, NUM_SAMPLES, jobTrackerName, "local");
           double error = Math.abs(Math.PI - estimate);
           double error = Math.abs(Math.PI - estimate);
           assertTrue("Error in PI estimation "+error+" exceeds 0.01", (error < 0.01));
           assertTrue("Error in PI estimation "+error+" exceeds 0.01", (error < 0.01));
           JobConf jconf = new JobConf();
           JobConf jconf = new JobConf();
           // run the wordcount example with caching
           // run the wordcount example with caching
-          boolean ret = MRCaching.launchMRCache("localhost:60030", "/tmp/wc/input",
+          boolean ret = MRCaching.launchMRCache(jobTrackerName, "/tmp/wc/input",
                                                 "/tmp/wc/output", "local", jconf,
                                                 "/tmp/wc/output", "local", jconf,
                                                 "The quick brown fox\nhas many silly\n"
                                                 "The quick brown fox\nhas many silly\n"
                                                     + "red fox sox\n");
                                                     + "red fox sox\n");

+ 3 - 2
src/test/org/apache/hadoop/mapred/TestMiniMRWithDFS.java

@@ -145,14 +145,15 @@ public class TestMiniMRWithDFS extends TestCase {
       FileSystem fileSys = null;
       FileSystem fileSys = null;
       try {
       try {
           final int taskTrackers = 4;
           final int taskTrackers = 4;
-          final int jobTrackerPort = 50050;
-          final String jobTrackerName = "localhost:" + jobTrackerPort;
+          final int jobTrackerPort = 60050;
+
           Configuration conf = new Configuration();
           Configuration conf = new Configuration();
           dfs = new MiniDFSCluster(65314, conf, true);
           dfs = new MiniDFSCluster(65314, conf, true);
           fileSys = dfs.getFileSystem();
           fileSys = dfs.getFileSystem();
           namenode = fileSys.getName();
           namenode = fileSys.getName();
           mr = new MiniMRCluster(jobTrackerPort, 50060, taskTrackers, 
           mr = new MiniMRCluster(jobTrackerPort, 50060, taskTrackers, 
                                  namenode, true);
                                  namenode, true);
+          final String jobTrackerName = "localhost:" + mr.getJobTrackerPort();
           double estimate = PiEstimator.launch(NUM_MAPS, NUM_SAMPLES, 
           double estimate = PiEstimator.launch(NUM_MAPS, NUM_SAMPLES, 
                                                jobTrackerName, namenode);
                                                jobTrackerName, namenode);
           double error = Math.abs(Math.PI - estimate);
           double error = Math.abs(Math.PI - estimate);