ソースを参照

HDFS-14048. DFSOutputStream close() throws exception on subsequent call after DataNode restart. Contributed by Erik Krogen.

Inigo Goiri 6 年 前
コミット
d9b3b58389

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java

@@ -290,7 +290,7 @@ class DataStreamer extends Daemon {
     }
     packets.clear();
   }
-  
+
   static class LastExceptionInStreamer {
     private IOException thrown;
 
@@ -1754,6 +1754,7 @@ class DataStreamer extends Daemon {
         blockStream = out;
         result =  true; // success
         errorState.reset();
+        lastException.clear();
         // remove all restarting nodes from failed nodes list
         failed.removeAll(restartingNodes);
         restartingNodes.clear();
@@ -1835,7 +1836,7 @@ class DataStreamer extends Daemon {
 
   protected LocatedBlock locateFollowingBlock(DatanodeInfo[] excluded,
       ExtendedBlock oldBlock) throws IOException {
-    final DfsClientConf conf = dfsClient.getConf(); 
+    final DfsClientConf conf = dfsClient.getConf();
     int retries = conf.getNumBlockWriteLocateFollowingRetry();
     long sleeptime = conf.getBlockWriteLocateFollowingInitialDelayMs();
     while (true) {

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java

@@ -436,6 +436,8 @@ public class TestClientProtocolForPipelineRecovery {
           0, out.getStreamer().getPipelineRecoveryCount());
       out.write(1);
       out.close();
+      // Ensure that subsequent closes are idempotent and do not throw errors
+      out.close();
     } finally {
       if (cluster != null) {
         cluster.shutdown();