Przeglądaj źródła

HDFS-16910. Fix incorrectly initializing RandomAccessFile caused flush performance decreased for JN (#5359)

huhaiyang 2 lat temu
rodzic
commit
d5c046518e

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java

@@ -84,9 +84,9 @@ public class EditLogFileOutputStream extends EditLogOutputStream {
     doubleBuf = new EditsDoubleBuffer(size);
     RandomAccessFile rp;
     if (shouldSyncWritesAndSkipFsync) {
-      rp = new RandomAccessFile(name, "rw");
+      rp = new RandomAccessFile(name, "rwd");
     } else {
-      rp = new RandomAccessFile(name, "rws");
+      rp = new RandomAccessFile(name, "rw");
     }
     try {
       fp = new FileOutputStream(rp.getFD()); // open for append

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

@@ -2721,10 +2721,10 @@
   <description>
     Specifies whether to flush edit log file channel. When set, expensive
     FileChannel#force calls are skipped and synchronous disk writes are
-    enabled instead by opening the edit log file with RandomAccessFile("rws")
+    enabled instead by opening the edit log file with RandomAccessFile("rwd")
     flags. This can significantly improve the performance of edit log writes
     on the Windows platform.
-    Note that the behavior of the "rws" flags is platform and hardware specific
+    Note that the behavior of the "rwd" flags is platform and hardware specific
     and might not provide the same level of guarantees as FileChannel#force.
     For example, the write will skip the disk-cache on SAS and SCSI devices
     while it might not on SATA devices. This is an expert level setting,