Ver código fonte

HADOOP-1412. Fix 'dodgy' bugs identified by FindBugs in fs and io packages. Contributed by Hairong.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@546264 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 18 anos atrás
pai
commit
853c1b0736

+ 4 - 1
CHANGES.txt

@@ -84,9 +84,12 @@ Trunk (unreleased changes)
  27. HADOOP-1414.  Fix a number of issues identified by FindBugs as
  27. HADOOP-1414.  Fix a number of issues identified by FindBugs as
      "Bad Practice".  (Dhruba Borthakur via cutting)
      "Bad Practice".  (Dhruba Borthakur via cutting)
 
 
- 28. HADOOP-1392.  Fix "correctness" bugs reported by FindBugs in
+ 28. HADOOP-1392.  Fix "correctness" bugs identified by FindBugs in
      fs and dfs packages.  (Raghu Angadi via cutting)
      fs and dfs packages.  (Raghu Angadi via cutting)
 
 
+ 29. HADOOP-1412.  Fix "dodgy" bugs identified by FindBugs in fs and
+     io packages.  (Hairong Kuang via cutting)
+
 
 
 Release 0.13.0 - 2007-06-08
 Release 0.13.0 - 2007-06-08
 
 

+ 2 - 4
src/java/org/apache/hadoop/filecache/DistributedCache.java

@@ -327,9 +327,7 @@ public class DistributedCache {
     } else {
     } else {
       FSDataInputStream fsStream = fileSystem.open(md5File);
       FSDataInputStream fsStream = fileSystem.open(md5File);
       digest = new byte[md5.getDigestLength()];
       digest = new byte[md5.getDigestLength()];
-      // assuming reading 16 bytes once is not a problem
-      // though it should be checked if 16 bytes have been read or not
-      int read = fsStream.read(digest);
+      fsStream.readFully(digest);
       fsStream.close();
       fsStream.close();
     }
     }
 
 
@@ -636,7 +634,7 @@ public class DistributedCache {
         String frag1 = uriFiles[i].getFragment();
         String frag1 = uriFiles[i].getFragment();
         if (frag1 == null)
         if (frag1 == null)
           return false;
           return false;
-        for (int j=i+1; j < uriFiles.length; i++){
+        for (int j=i+1; j < uriFiles.length; j++){
           String frag2 = uriFiles[j].getFragment();
           String frag2 = uriFiles[j].getFragment();
           if (frag2 == null)
           if (frag2 == null)
             return false;
             return false;

+ 10 - 2
src/java/org/apache/hadoop/fs/DF.java

@@ -144,12 +144,20 @@ public class DF {
   private void parseExecResult(BufferedReader lines) throws IOException {
   private void parseExecResult(BufferedReader lines) throws IOException {
     lines.readLine();                         // skip headings
     lines.readLine();                         // skip headings
   
   
+    String line = lines.readLine();
+    if (line == null) {
+      throw new IOException( "Expecting a line not the end of stream" );
+    }
     StringTokenizer tokens =
     StringTokenizer tokens =
-      new StringTokenizer(lines.readLine(), " \t\n\r\f%");
+      new StringTokenizer(line, " \t\n\r\f%");
     
     
     this.filesystem = tokens.nextToken();
     this.filesystem = tokens.nextToken();
     if (!tokens.hasMoreTokens()) {            // for long filesystem name
     if (!tokens.hasMoreTokens()) {            // for long filesystem name
-      tokens = new StringTokenizer(lines.readLine(), " \t\n\r\f%");
+      line = lines.readLine();
+      if (line == null) {
+        throw new IOException( "Expecting a line not the end of stream" );
+      }
+      tokens = new StringTokenizer(line, " \t\n\r\f%");
     }
     }
     this.capacity = Long.parseLong(tokens.nextToken()) * 1024;
     this.capacity = Long.parseLong(tokens.nextToken()) * 1024;
     this.used = Long.parseLong(tokens.nextToken()) * 1024;
     this.used = Long.parseLong(tokens.nextToken()) * 1024;

+ 1 - 1
src/java/org/apache/hadoop/fs/InMemoryFileSystem.java

@@ -66,7 +66,7 @@ public class InMemoryFileSystem extends ChecksumFileSystem {
     public void initialize(URI uri, Configuration conf) {
     public void initialize(URI uri, Configuration conf) {
       setConf(conf);
       setConf(conf);
       int size = Integer.parseInt(conf.get("fs.inmemory.size.mb", "100"));
       int size = Integer.parseInt(conf.get("fs.inmemory.size.mb", "100"));
-      this.fsSize = size * 1024 * 1024;
+      this.fsSize = size * 1024L * 1024L;
       this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
       this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
       String path = this.uri.getPath();
       String path = this.uri.getPath();
       if (path.length() == 0) {
       if (path.length() == 0) {

+ 1 - 2
src/java/org/apache/hadoop/io/MapFile.java

@@ -387,7 +387,7 @@ public class MapFile {
       int high = count-1;
       int high = count-1;
 
 
       while (low <= high) {
       while (low <= high) {
-        int mid = (low + high) >> 1;
+        int mid = (low + high) >>> 1;
         WritableComparable midVal = keys[mid];
         WritableComparable midVal = keys[mid];
         int cmp = comparator.compare(midVal, key);
         int cmp = comparator.compare(midVal, key);
 
 
@@ -537,7 +537,6 @@ public class MapFile {
     String out = args[1];
     String out = args[1];
 
 
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
-    int ioFileBufferSize = conf.getInt("io.file.buffer.size", 4096);
     FileSystem fs = FileSystem.getLocal(conf);
     FileSystem fs = FileSystem.getLocal(conf);
     MapFile.Reader reader = new MapFile.Reader(fs, in, conf);
     MapFile.Reader reader = new MapFile.Reader(fs, in, conf);
     MapFile.Writer writer =
     MapFile.Writer writer =

+ 1 - 1
src/java/org/apache/hadoop/io/SequenceFile.java

@@ -1827,7 +1827,7 @@ public class SequenceFile {
 
 
       int segments = sortPass(deleteInput);
       int segments = sortPass(deleteInput);
       if (segments > 1) {
       if (segments > 1) {
-        segments = mergePass(outFile.getParent());
+        mergePass(outFile.getParent());
       }
       }
     }
     }
 
 

+ 15 - 8
src/java/org/apache/hadoop/util/CopyFiles.java

@@ -571,17 +571,24 @@ public class CopyFiles extends ToolBase {
         reporter.setStatus("Copied: " + srcURI.toString() + 
         reporter.setStatus("Copied: " + srcURI.toString() + 
                            " to: " + destinationPath.toString());
                            " to: " + destinationPath.toString());
         
         
-      } catch(Exception e) {
-        reporter.setStatus("Failed to copy from: " + (Text)key);
-        if (ignoreReadFailures) {
-          return;
-        } else {
-          throw new IOException("Failed to copy from: " + (Text)key);
-        }
+      } catch (URISyntaxException e) {
+        handleException(reporter, (Text)key, e);
+      } catch (IOException ioe) {
+        handleException(reporter,(Text)key, ioe);
+      }
+    }
+
+    /* handle exceptions */
+    private void handleException( Reporter reporter, Text key, Throwable e )
+    throws IOException {
+      String errMsg = "Failed to copy from: " + (Text)key;
+      reporter.setStatus(errMsg);
+      if ( !ignoreReadFailures ) {
+        throw new IOException(errMsg);
       }
       }
     }
     }
   }
   }
-  
+    
   /**
   /**
    * Factory to create requisite Mapper objects for distcp.
    * Factory to create requisite Mapper objects for distcp.
    * @author Arun C Murthy
    * @author Arun C Murthy

+ 1 - 1
src/java/org/apache/hadoop/util/MergeSort.java

@@ -51,7 +51,7 @@ public class MergeSort {
     }
     }
 
 
     // Recursively sort halves of dest into src
     // Recursively sort halves of dest into src
-    int mid = (low + high) >> 1;
+    int mid = (low + high) >>> 1;
     mergeSort(dest, src, low, mid);
     mergeSort(dest, src, low, mid);
     mergeSort(dest, src, mid, high);
     mergeSort(dest, src, mid, high);