瀏覽代碼

HDFS-8046. Allow better control of getContentSummary. Contributed by Kihwal Lee.
(cherry picked from commit 285b31e75e51ec8e3a796c2cb0208739368ca9b8)

(cherry picked from commit 7e622076d41a85fc9a8600fb270564a085f5cd83)

Conflicts:
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

(cherry picked from commit 1ef5e0b18066ca949adcf4c55a41f186c47e7264)

Kihwal Lee 10 年之前
父節點
當前提交
619f793846

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -22,6 +22,8 @@ Release 2.6.1 - UNRELEASED
     HDFS-7596. NameNode should prune dead storages from storageMap.
     (Arpit Agarwal via cnauroth)
 
+    HDFS-8046. Allow better control of getContentSummary (kihwal)
+
   OPTIMIZATIONS
 
   BUG FIXES

+ 3 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -272,7 +272,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_LIST_LIMIT = "dfs.ls.limit";
   public static final int     DFS_LIST_LIMIT_DEFAULT = 1000;
   public static final String  DFS_CONTENT_SUMMARY_LIMIT_KEY = "dfs.content-summary.limit";
-  public static final int     DFS_CONTENT_SUMMARY_LIMIT_DEFAULT = 0;
+  public static final int     DFS_CONTENT_SUMMARY_LIMIT_DEFAULT = 5000;
+  public static final String  DFS_CONTENT_SUMMARY_SLEEP_MICROSEC_KEY = "dfs.content-summary.sleep-microsec";
+  public static final long    DFS_CONTENT_SUMMARY_SLEEP_MICROSEC_DEFAULT = 500;
   public static final String  DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY = "dfs.datanode.failed.volumes.tolerated";
   public static final int     DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT = 0;
   public static final String  DFS_DATANODE_SYNCONCLOSE_KEY = "dfs.datanode.synconclose";

+ 7 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java

@@ -29,6 +29,8 @@ public class ContentSummaryComputationContext {
   private long nextCountLimit = 0;
   private long limitPerRun = 0;
   private long yieldCount = 0;
+  private long sleepMilliSec = 0;
+  private int sleepNanoSec = 0;
 
   /**
    * Constructor
@@ -40,17 +42,19 @@ public class ContentSummaryComputationContext {
    *        no limit (i.e. no yielding)
    */
   public ContentSummaryComputationContext(FSDirectory dir,
-      FSNamesystem fsn, long limitPerRun) {
+      FSNamesystem fsn, long limitPerRun, long sleepMicroSec) {
     this.dir = dir;
     this.fsn = fsn;
     this.limitPerRun = limitPerRun;
     this.nextCountLimit = limitPerRun;
     this.counts = Content.Counts.newInstance();
+    this.sleepMilliSec = sleepMicroSec/1000;
+    this.sleepNanoSec = (int)((sleepMicroSec%1000)*1000);
   }
 
   /** Constructor for blocking computation. */
   public ContentSummaryComputationContext() {
-    this(null, null, 0);
+    this(null, null, 0, 1000);
   }
 
   /** Return current yield count */
@@ -101,7 +105,7 @@ public class ContentSummaryComputationContext {
     fsn.readUnlock();
 
     try {
-      Thread.sleep(1);
+      Thread.sleep(sleepMilliSec, sleepNanoSec);
     } catch (InterruptedException ie) {
     } finally {
       // reacquire

+ 9 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -144,6 +144,7 @@ public class FSDirectory implements Closeable {
   private final int maxDirItems;
   private final int lsLimit;  // max list limit
   private final int contentCountLimit; // max content summary counts per run
+  private final long contentSleepMicroSec;
   private final INodeMap inodeMap; // Synchronized by dirLock
   private long yieldCount = 0; // keep track of lock yield count.
   private final int inodeXAttrsLimit; //inode xattrs max limit
@@ -204,6 +205,9 @@ public class FSDirectory implements Closeable {
     this.contentCountLimit = conf.getInt(
         DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_KEY,
         DFSConfigKeys.DFS_CONTENT_SUMMARY_LIMIT_DEFAULT);
+    this.contentSleepMicroSec = conf.getLong(
+        DFSConfigKeys.DFS_CONTENT_SUMMARY_SLEEP_MICROSEC_KEY,
+        DFSConfigKeys.DFS_CONTENT_SUMMARY_SLEEP_MICROSEC_DEFAULT);
     
     // filesystem limits
     this.maxComponentLength = conf.getInt(
@@ -252,6 +256,10 @@ public class FSDirectory implements Closeable {
     return rootDir;
   }
 
+  long getContentSleepMicroSec() {
+    return contentSleepMicroSec;
+  }
+
   /**
    * Shutdown the filestore
    */
@@ -2166,7 +2174,7 @@ public class FSDirectory implements Closeable {
         ContentSummaryComputationContext cscc =
 
             new ContentSummaryComputationContext(this, getFSNamesystem(),
-            contentCountLimit);
+            contentCountLimit, contentCountLimit);
         ContentSummary cs = targetNode.computeAndConvertContentSummary(cscc);
         yieldCount += cscc.getYieldCount();
         return cs;