Browse Source

HDFS-11915. Sync rbw dir on the first hsync() to avoid file lost on power failure. Contributed by Vinayakumar B.

(cherry picked from commit 2273499aef18ac2c7ffc435a61db8cea591e8b1f)
(cherry picked from commit f24d3b69b403f3a2c5af6b9c74a643fb9f4492e5)
Wei-Chiu Chuang 7 years ago
parent
commit
b42f02ca0c

+ 9 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java

@@ -24,6 +24,7 @@ import java.io.Closeable;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.EOFException;
+import java.io.File;
 import java.io.IOException;
 import java.io.OutputStreamWriter;
 import java.io.Writer;
@@ -127,6 +128,7 @@ class BlockReceiver implements Closeable {
 
   private boolean syncOnClose;
   private volatile boolean dirSyncOnFinalize;
+  private boolean dirSyncOnHSyncDone = false;
   private long restartBudget;
   /** the reference of the volume where the block receiver writes to */
   private ReplicaHandler replicaHandler;
@@ -421,6 +423,13 @@ class BlockReceiver implements Closeable {
       }
       flushTotalNanos += flushEndNanos - flushStartNanos;
     }
+    if (isSync && !dirSyncOnHSyncDone && replicaInfo instanceof ReplicaInfo) {
+      ReplicaInfo rInfo = (ReplicaInfo) replicaInfo;
+      File baseDir = rInfo.getBlockFile().getParentFile();
+      FileIoProvider fileIoProvider = datanode.getFileIoProvider();
+      DatanodeUtil.fsyncDirectory(fileIoProvider, rInfo.getVolume(), baseDir);
+      dirSyncOnHSyncDone = true;
+    }
     if (checksumOut != null || streams.getDataOut() != null) {
       datanode.metrics.addFlushNanos(flushTotalNanos);
       if (isSync) {

+ 18 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java

@@ -142,4 +142,22 @@ public class DatanodeUtil {
     }
     return (FileInputStream)lin.getWrappedStream();
   }
+
+  /**
+   * Call fsync on specified directories to sync metadata changes.
+   * @param fileIoProvider
+   * @param volume
+   * @param dirs
+   * @throws IOException
+   */
+  public static void fsyncDirectory(FileIoProvider fileIoProvider,
+      FsVolumeSpi volume, File... dirs) throws IOException {
+    for (File dir : dirs) {
+      try {
+        fileIoProvider.dirSync(volume, dir);
+      } catch (IOException e) {
+        throw new IOException("Failed to sync " + dir, e);
+      }
+    }
+  }
 }

+ 2 - 13
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

@@ -929,18 +929,6 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
     return dstfile;
   }
 
-  private void fsyncDirectory(FsVolumeSpi volume, File... dirs)
-      throws IOException {
-    FileIoProvider fileIoProvider = datanode.getFileIoProvider();
-    for (File dir : dirs) {
-      try {
-        fileIoProvider.dirSync(volume, dir);
-      } catch (IOException e) {
-        throw new IOException("Failed to sync " + dir, e);
-      }
-    }
-  }
-
   /**
    * Copy the block and meta files for the given block to the given destination.
    * @return the new meta and block files.
@@ -1801,7 +1789,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
       FsVolumeSpi v = replicaInfo.getVolume();
       File f = replicaInfo.getBlockFile();
       File dest = finalizedReplicaInfo.getBlockFile();
-      fsyncDirectory(v, dest.getParentFile(), f.getParentFile());
+      DatanodeUtil.fsyncDirectory(datanode.getFileIoProvider(), v,
+          dest.getParentFile(), f.getParentFile());
     }
   }