소스 검색

HADOOP-1629. Added a upgrade test for HADOOP-1134. Contributed by Raghu.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@565591 13f79535-47bb-0310-9956-ffa450edef68
Nigel Daley 18 년 전
부모
커밋
2789abe6f4
4개의 변경된 파일52개의 추가작업 그리고 3개의 파일을 삭제
  1. 5 0
      CHANGES.txt
  2. 3 0
      build.xml
  3. 40 0
      src/java/org/apache/hadoop/fs/Command.java
  4. 4 3
      src/test/org/apache/hadoop/dfs/MiniDFSCluster.java

+ 5 - 0
CHANGES.txt

@@ -515,6 +515,11 @@ Branch 0.14 (unreleased changes)
 150. HADOOP-1568.  Expose HDFS as xml/http filesystem to provide cross-version
      compatability. (Chris Douglas via omalley)
 
+151. HADOOP-1668.  Added an INCOMPATIBILITY section to CHANGES.txt. (nigel)
+
+152. HADOOP-1629.  Added a upgrade test for HADOOP-1134.
+     (Raghu Angadi via nigel)
+
 Release 0.13.0 - 2007-06-08
 
  1. HADOOP-1047.  Fix TestReplication to succeed more reliably.

+ 3 - 0
build.xml

@@ -448,6 +448,8 @@
     <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.txt" todir="${test.cache.data}"/>
     <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.jar" todir="${test.cache.data}"/>
     <copy file="${test.src.dir}/org/apache/hadoop/mapred/test.zip" todir="${test.cache.data}"/>
+    <copy file="${test.src.dir}/org/apache/hadoop/dfs/hadoop-12-dfs-dir.tgz" todir="${test.cache.data}"/>
+    <copy file="${test.src.dir}/org/apache/hadoop/dfs/hadoop-12-dfs-dir.txt" todir="${test.cache.data}"/>
   </target>
 
   <!-- ================================================================== -->
@@ -463,6 +465,7 @@
            fork="yes" maxmemory="256m" dir="${basedir}" timeout="${test.timeout}"
       errorProperty="tests.failed" failureProperty="tests.failed">
       <sysproperty key="test.build.data" value="${test.build.data}"/>
+      <sysproperty key="test.cache.data" value="${test.cache.data}"/>    	
       <sysproperty key="hadoop.log.dir" value="${test.log.dir}"/>
       <sysproperty key="test.src.dir" value="${test.src.dir}"/>
       <sysproperty key="java.library.path"

+ 40 - 0
src/java/org/apache/hadoop/fs/Command.java

@@ -49,4 +49,44 @@ abstract public class Command {
   /** Parse the execution result */
   protected abstract void parseExecResult(BufferedReader lines)
   throws IOException;
+
+  /// A simple implementation of Command
+  private static class SimpleCommandExecutor extends Command {
+    
+    private String[] command;
+    private StringBuffer reply;
+    
+    SimpleCommandExecutor(String[] execString) {
+      command = execString;
+    }
+
+    @Override
+    protected String[] getExecString() {
+      return command;
+    }
+
+    @Override
+    protected void parseExecResult(BufferedReader lines) throws IOException {
+      reply = new StringBuffer();
+      char[] buf = new char[512];
+      int nRead;
+      while ( (nRead = lines.read(buf, 0, buf.length)) > 0 ) {
+        reply.append(buf, 0, nRead);
+      }
+    }
+    
+    String getReply() {
+      return (reply == null) ? "" : reply.toString();
+    }
+  }
+  
+  /** 
+   * Static method to execute a command. Covers most of the simple cases 
+   * without requiring the user to implement Command interface.
+   */
+  public static String execCommand(String[] cmd) throws IOException {
+    SimpleCommandExecutor exec = new SimpleCommandExecutor(cmd);
+    exec.run();
+    return exec.getReply();
   }
+}

+ 4 - 3
src/test/org/apache/hadoop/dfs/MiniDFSCluster.java

@@ -178,8 +178,9 @@ public class MiniDFSCluster {
     String[] args = (operation == null ||
                      operation == StartupOption.FORMAT ||
                      operation == StartupOption.REGULAR) ?
-      new String[] {} : new String[] {"-"+operation.toString()};
-        
+                    null : new String[] {"-"+operation.toString()};
+    String [] dnArgs = (operation == StartupOption.UPGRADE) ? null : args;
+    
     for (int i = 0; i < numDataNodes; i++) {
       Configuration dnConf = new Configuration(conf);
       if (manageDfsDirs) {
@@ -198,7 +199,7 @@ public class MiniDFSCluster {
       }
       System.out.println("Starting DataNode " + i + " with dfs.data.dir: " 
                          + dnConf.get("dfs.data.dir"));
-      dataNodes.add(DataNode.createDataNode(args, dnConf));
+      dataNodes.add(DataNode.createDataNode(dnArgs, dnConf));
     }
   }