Browse Source

MAPREDUCE-4681. Fix unit tests broken by HDFS-3910. Contributed by Arun C. Murthy.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1392075 13f79535-47bb-0310-9956-ffa450edef68
Arun Murthy 12 years ago
parent
commit
57807d50bf

+ 2 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -156,6 +156,8 @@ Release 2.0.3-alpha - Unreleased
     MAPREDUCE-4674. Hadoop examples secondarysort has a typo
     "secondarysrot" in the usage. (Robert Justice via eli)
 
+    MAPREDUCE-4681. Fix unit tests broken by HDFS-3910. (acmurthy) 
+
 Release 2.0.2-alpha - 2012-09-07 
 
   INCOMPATIBLE CHANGES

+ 1 - 1
hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestJobQueueInformation.java

@@ -98,7 +98,7 @@ public class TestJobQueueInformation extends TestCase {
     dfsCluster.shutdown();
   }
 
-  public void testJobQueues() throws IOException {
+  public void testJobQueues() throws Exception {
     JobClient jc = new JobClient(mrCluster.createJobConf());
     String expectedQueueInfo = "Maximum Tasks Per Job :: 10";
     JobQueueInfo[] queueInfos = jc.getQueues();

+ 2 - 2
hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestSetupAndCleanupFailure.java

@@ -149,7 +149,7 @@ public class TestSetupAndCleanupFailure extends TestCase {
   private void testSetupAndCleanupKill(MiniMRCluster mr, 
                                        MiniDFSCluster dfs, 
                                        boolean commandLineKill) 
-  throws IOException {
+  throws Exception {
     // launch job with waiting setup/cleanup
     RunningJob job = launchJobWithWaitingSetupAndCleanup(mr);
     
@@ -223,7 +223,7 @@ public class TestSetupAndCleanupFailure extends TestCase {
   // Also Tests the command-line kill for setup/cleanup attempts. 
   // tests the setup/cleanup attempts getting killed if 
   // they were running on a lost tracker
-  public void testWithDFS() throws IOException {
+  public void testWithDFS() throws Exception {
     MiniDFSCluster dfs = null;
     MiniMRCluster mr = null;
     FileSystem fileSys = null;

+ 3 - 3
hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/UtilsForTests.java

@@ -449,7 +449,7 @@ public class UtilsForTests {
   static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys, 
                           String mapSignalFile, 
                           String reduceSignalFile, int replication) 
-  throws IOException {
+  throws Exception {
     writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(mapSignalFile), 
               (short)replication);
     writeFile(dfs.getNameNode(), fileSys.getConf(), new Path(reduceSignalFile), 
@@ -462,7 +462,7 @@ public class UtilsForTests {
   static void signalTasks(MiniDFSCluster dfs, FileSystem fileSys, 
                           boolean isMap, String mapSignalFile, 
                           String reduceSignalFile)
-  throws IOException {
+  throws Exception {
     //  signal the maps to complete
     writeFile(dfs.getNameNode(), fileSys.getConf(),
               isMap 
@@ -483,7 +483,7 @@ public class UtilsForTests {
   }
   
   static void writeFile(NameNode namenode, Configuration conf, Path name, 
-      short replication) throws IOException {
+      short replication) throws Exception {
     FileSystem fileSys = FileSystem.get(conf);
     SequenceFile.Writer writer = 
       SequenceFile.createWriter(fileSys, conf, name,