Ver código fonte

HADOOP-757. Fix 'Bad File Descriptor' exception in HDFS client when an output file is closed twice. Contributed by Raghu.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@496844 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 18 anos atrás
pai
commit
362a5a7288
2 arquivos alterados com 37 adições e 6 exclusões
  1. 3 0
      CHANGES.txt
  2. 34 6
      src/java/org/apache/hadoop/dfs/DFSClient.java

+ 3 - 0
CHANGES.txt

@@ -14,6 +14,9 @@ Trunk (unreleased changes)
  3. HADOOP-852.  Add an ant task to compile record definitions, and
     use it to compile record unit tests.  (Milind Bhandarkar via cutting)
 
+ 4. HADOOP-757.  Fix "Bad File Descriptor" exception in HDFS client
+    when an output file is closed twice.  (Raghu Angadi via cutting)
+
 
 Release 0.10.1 - 2007-01-10
 

+ 34 - 6
src/java/org/apache/hadoop/dfs/DFSClient.java

@@ -936,6 +936,25 @@ class DFSClient implements FSConstants {
             }
         }
 
+        /* Wrapper for closing backupStream. This sets backupStream to null so
+         * that we do not attempt to write to backupStream that could be
+         * invalid in subsequent writes. Otherwise we might end trying to write
+         * filedescriptor that we don't own.
+         */
+        private void closeBackupStream() throws IOException {
+          OutputStream stream = backupStream;
+          backupStream = null;
+          stream.close();
+        }
+        /* Similar to closeBackupStream(). Theoritically deleting a file
+         * twice could result in deleting a file that we should not.
+         */
+        private void deleteBackupFile() {
+          File file = backupFile;
+          backupFile = null;
+          file.delete();
+        }
+        
         private File newBackupFile() throws IOException {
           File result = conf.getFile("dfs.client.buffer.dir",
                                      "tmp"+File.separator+
@@ -1147,6 +1166,10 @@ class DFSClient implements FSConstants {
             int workingPos = Math.min(pos, maxPos);
             
             if (workingPos > 0) {
+                if ( backupStream == null ) {
+                    throw new IOException( "Trying to write to backupStream " +
+                                           "but it already closed or not open");
+                }
                 //
                 // To the local block backup, write just the bytes
                 //
@@ -1168,7 +1191,7 @@ class DFSClient implements FSConstants {
             //
             // Done with local copy
             //
-            backupStream.close();
+            closeBackupStream();
 
             //
             // Send it to datanode
@@ -1204,10 +1227,11 @@ class DFSClient implements FSConstants {
             //
             // Delete local backup, start new one
             //
-            backupFile.delete();
-            backupFile = newBackupFile();
-            backupStream = new FileOutputStream(backupFile);
+            deleteBackupFile();
+            File tmpFile = newBackupFile();
             bytesWrittenToBlock = 0;
+            backupStream = new FileOutputStream(tmpFile);
+            backupFile = tmpFile;
         }
 
         /**
@@ -1273,8 +1297,12 @@ class DFSClient implements FSConstants {
               }
             }
             
-            backupStream.close();
-            backupFile.delete();
+            if ( backupStream != null ) {
+              closeBackupStream();
+            }
+            if ( backupFile != null ) {
+              deleteBackupFile();
+            }
 
             if (s != null) {
                 s.close();