Sfoglia il codice sorgente

HADOOP-368. Improvements to DistributedFSCheck and TestDFSIO. Contributed by Konstantin.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@423054 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 19 anni fa
parent
commit
d2fcbca290

+ 15 - 10
src/test/org/apache/hadoop/fs/DistributedFSCheck.java

@@ -81,10 +81,11 @@ public class DistributedFSCheck extends TestCase {
   public void testFSBlocks( String rootName ) throws Exception {
     createInputFile(rootName);
     runDistributedFSCheck();
+    cleanup();  // clean up after all to restore the system state
   }
 
   private void createInputFile( String rootName ) throws IOException {
-    fs.delete(MAP_INPUT_DIR);
+    cleanup();  // clean up if previous run failed
 
     Path inputFile = new Path(MAP_INPUT_DIR, "in_file");
     SequenceFile.Writer writer =
@@ -133,18 +134,22 @@ public class DistributedFSCheck extends TestCase {
                         long offset 
                         ) throws IOException {
       // open file
-      DataInputStream in;
-      in = new DataInputStream(fs.open(new Path(name)));
+      FSDataInputStream in = null;
+      try {
+        in = fs.open(new Path(name));
+      } catch( IOException e ) {
+        return name + "@(missing)";
+      }
+      in.seek( offset );
       long actualSize = 0;
       try {
         long blockSize = fs.getDefaultBlockSize();
-        int curSize = bufferSize;
-        for(  actualSize = 0; 
+        reporter.setStatus( "reading " + name + "@" + 
+            offset + "/" + blockSize );
+        for(  int curSize = bufferSize; 
               curSize == bufferSize && actualSize < blockSize;
               actualSize += curSize) {
-          curSize = in.read( buffer, (int)offset, Math.min(bufferSize, (int)(blockSize - actualSize)) );
-          reporter.setStatus( "reading " + name + "@" + 
-                              offset + "/" + blockSize );
+          curSize = in.read( buffer, 0, bufferSize );
         }
       } catch( IOException e ) {
         LOG.info( "Corrupted block detected in \"" + name + "\" at " + offset );
@@ -178,7 +183,6 @@ public class DistributedFSCheck extends TestCase {
   }
   
   private void runDistributedFSCheck() throws Exception {
-    fs.delete(READ_DIR);
     JobConf job = new JobConf( fs.getConf(), DistributedFSCheck.class );
 
     job.setInputPath(MAP_INPUT_DIR);
@@ -240,6 +244,7 @@ public class DistributedFSCheck extends TestCase {
     long execTime = System.currentTimeMillis() - tStart;
     
     test.analyzeResult( execTime, resFileName, viewStats );
+    // test.cleanup();  // clean up after all to restore the system state
   }
   
   private void analyzeResult( long execTime,
@@ -318,7 +323,7 @@ public class DistributedFSCheck extends TestCase {
     }
   }
 
-  private void cleanup() throws Exception {
+  private void cleanup() throws IOException {
     LOG.info( "Cleaning up test files" );
     fs.delete(TEST_ROOT_DIR);
   }

+ 3 - 3
src/test/org/apache/hadoop/fs/TestDFSIO.java

@@ -99,6 +99,7 @@ public class TestDFSIO extends TestCase {
     createControlFile(fs, fileSize, nrFiles);
     writeTest(fs);
     readTest(fs);
+    cleanup(fs);
   }
 
   private static void createControlFile(
@@ -249,8 +250,7 @@ public class TestDFSIO extends TestCase {
                         ) throws IOException {
       totalSize *= MEGA;
       // open file
-      DataInputStream in;
-      in = new DataInputStream(fs.open(new Path(DATA_DIR, name)));
+      DataInputStream in = fs.open(new Path(DATA_DIR, name));
       try {
         long actualSize = 0;
         for( int curSize = bufferSize; curSize == bufferSize; ) {
@@ -425,7 +425,7 @@ public class TestDFSIO extends TestCase {
     }
   }
 
-  private static void cleanup( FileSystem fs ) throws Exception {
+  private static void cleanup( FileSystem fs ) throws IOException {
     LOG.info( "Cleaning up test files" );
     fs.delete(new Path(TEST_ROOT_DIR));
   }