Browse Source

commit f8cfbf27caaad5a6987ee05bf84355bfa808fa0b
Author: Tsz Wo Wo Sze <tsz@ucdev29.inktomisearch.com>
Date: Thu Nov 25 00:34:37 2010 +0000

. Make BLOCK_INVALIDATE_LIMIT configurable.

+++ b/YAHOO-CHANGES.txt
+ . Make BLOCK_INVALIDATE_LIMIT configurable. (szetszwo)
+


git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.20-security-patches@1077748 13f79535-47bb-0310-9956-ffa450edef68

Owen O'Malley 14 years ago
parent
commit
adf81cd6d9

+ 2 - 0
src/hdfs/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -189,6 +189,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final long    DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT = 21600000;
   public static final String  DFS_BLOCKREPORT_INITIAL_DELAY_KEY = "dfs.blockreport.initialDelay";
   public static final int     DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT = 0;
+  public static final String  DFS_BLOCK_INVALIDATE_LIMIT_KEY = "dfs.block.invalidate.limit";
+  public static final int     DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT = 100;
 
   //Keys with no defaults
   public static final String  DFS_DATANODE_PLUGINS_KEY = "dfs.datanode.plugins";

+ 0 - 3
src/hdfs/org/apache/hadoop/hdfs/protocol/FSConstants.java

@@ -26,9 +26,6 @@ import org.apache.hadoop.conf.Configuration;
 public interface FSConstants {
   public static int MIN_BLOCKS_FOR_WRITE = 5;
 
-  // Chunk the block Invalidate message
-  public static final int BLOCK_INVALIDATE_CHUNK = 100;
-
   // Long that indicates "leave current quota unchanged"
   public static final long QUOTA_DONT_SET = Long.MAX_VALUE;
   public static final long QUOTA_RESET = -1L;

+ 8 - 1
src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -337,7 +337,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
   private final GenerationStamp generationStamp = new GenerationStamp();
 
   // Ask Datanode only up to this many blocks to delete.
-  private int blockInvalidateLimit = FSConstants.BLOCK_INVALIDATE_CHUNK;
+  private int blockInvalidateLimit = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT;
 
   // precision of access times.
   private long accessTimePrecision = 0;
@@ -504,8 +504,15 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
       conf.getInt("dfs.replication.interval", 3) * 1000L;
     this.defaultBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
     this.maxFsObjects = conf.getLong("dfs.max.objects", 0);
+
+    //default limit
     this.blockInvalidateLimit = Math.max(this.blockInvalidateLimit, 
                                          20*(int)(heartbeatInterval/1000));
+    //use conf value if it is set.
+    this.blockInvalidateLimit = conf.getInt(
+        DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY, this.blockInvalidateLimit);
+    LOG.info(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_KEY + "=" + this.blockInvalidateLimit);
+
     this.accessTimePrecision = conf.getLong("dfs.access.time.precision", 0);
     this.supportAppends = conf.getBoolean("dfs.support.append", false);
     this.isAccessTokenEnabled = conf.getBoolean(

+ 4 - 3
src/test/org/apache/hadoop/hdfs/server/namenode/TestHeartbeatHandling.java

@@ -2,7 +2,10 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import java.util.ArrayList;
 
+import junit.framework.TestCase;
+
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
@@ -11,8 +14,6 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 
-import junit.framework.TestCase;
-
 /**
  * Test if FSNamesystem handles heartbeat right
  */
@@ -33,7 +34,7 @@ public class TestHeartbeatHandling extends TestCase {
       
       final int REMAINING_BLOCKS = 1;
       final int MAX_REPLICATE_LIMIT = conf.getInt("dfs.max-repl-streams", 2);
-      final int MAX_INVALIDATE_LIMIT = FSNamesystem.BLOCK_INVALIDATE_CHUNK;
+      final int MAX_INVALIDATE_LIMIT = DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT;
       final int MAX_INVALIDATE_BLOCKS = 2*MAX_INVALIDATE_LIMIT+REMAINING_BLOCKS;
       final int MAX_REPLICATE_BLOCKS = 2*MAX_REPLICATE_LIMIT+REMAINING_BLOCKS;
       final DatanodeDescriptor[] ONE_TARGET = new DatanodeDescriptor[1];