瀏覽代碼

HDFS-14476. lock too long when fix inconsistent blocks between disk and in-memory. Contributed by Sean Chow.

(cherry picked from commit 8b802d6b444b95791ba156c7bb307dd584cc9dba)
Wei-Chiu Chuang 5 年之前
父節點
當前提交
e978c6c9ed

+ 14 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java

@@ -67,6 +67,7 @@ public class DirectoryScanner implements Runnable {
       + " starting at %s with interval of %dms";
   private static final String START_MESSAGE_WITH_THROTTLE = START_MESSAGE
       + " and throttle limit of %dms/s";
+  private static final int RECONCILE_BLOCKS_BATCH_SIZE = 1000;
 
   private final FsDatasetSpi<?> dataset;
   private final ExecutorService reportCompileThreadPool;
@@ -372,7 +373,11 @@ public class DirectoryScanner implements Runnable {
    */
   @VisibleForTesting
   public void reconcile() throws IOException {
+    LOG.debug("reconcile start DirectoryScanning");
     scan();
+    // HDFS-14476: run checkAndUpadte with batch to avoid holding the lock too
+    // long
+    int loopCount = 0;
     for (Entry<String, LinkedList<ScanInfo>> entry : diffs.entrySet()) {
       String bpid = entry.getKey();
       LinkedList<ScanInfo> diff = entry.getValue();
@@ -380,6 +385,15 @@ public class DirectoryScanner implements Runnable {
       for (ScanInfo info : diff) {
         dataset.checkAndUpdate(bpid, info);
       }
+
+      if (loopCount % RECONCILE_BLOCKS_BATCH_SIZE == 0) {
+        try {
+          Thread.sleep(2000);
+        } catch (InterruptedException e) {
+          // do nothing
+        }
+      }
+      loopCount++;
     }
     if (!retainDiffs) clear();
   }