Browse Source

HADOOP-3859 Merge -r 683362:683365 from branch-0.18 to branch-0.17.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/branches/branch-0.17@683366 13f79535-47bb-0310-9956-ffa450edef68
Owen O'Malley 16 years ago
parent
commit
728dcad8c4
2 changed files with 14 additions and 9 deletions
  1. 3 0
      CHANGES.txt
  2. 11 9
      src/java/org/apache/hadoop/dfs/DataNode.java

+ 3 - 0
CHANGES.txt

@@ -35,6 +35,9 @@ Release 0.17.2 - Unreleased
     FileSystem.delete rather than the FileUtil.fullyDelete. (Amareshwari
     Sri Ramadasu via acmurthy)  
 
+    HADOOP-3859. Allow the maximum number of xceivers in the data node to
+    be configurable. (Johan Oskarsson via omalley)
+
 Release 0.17.1 - 2008-06-23
 
   INCOMPATIBLE CHANGES

+ 11 - 9
src/java/org/apache/hadoop/dfs/DataNode.java

@@ -127,6 +127,14 @@ public class DataNode implements FSConstants, Runnable {
   
   private static final Random R = new Random();
 
+  /**
+   * Maximal number of concurrent xceivers per node.
+   * Enforcing the limit is required in order to avoid data-node
+   * running out of memory.
+   */
+  private static final int MAX_XCEIVER_COUNT = 256;
+  private int maxXceiverCount = MAX_XCEIVER_COUNT;
+  
   /**
    * We need an estimate for block size to check if the disk partition has
    * enough space. For now we set it to be the default block size set
@@ -267,6 +275,7 @@ public class DataNode implements FSConstants, Runnable {
     this.dnRegistration.setName(machineName + ":" + tmpPort);
     LOG.info("Opened server at " + tmpPort);
       
+    this.maxXceiverCount = conf.getInt("dfs.datanode.max.xcievers", MAX_XCEIVER_COUNT);
     this.threadGroup = new ThreadGroup("dataXceiveServer");
     this.dataXceiveServer = new Daemon(threadGroup, new DataXceiveServer(ss));
     this.threadGroup.setDaemon(true); // auto destroy when empty
@@ -569,13 +578,6 @@ public class DataNode implements FSConstants, Runnable {
     shutdown();
   }
     
-  /**
-   * Maximal number of concurrent xceivers per node.
-   * Enforcing the limit is required in order to avoid data-node
-   * running out of memory.
-   */
-  private final static int MAX_XCEIVER_COUNT = 256;
-
   /** Number of concurrent xceivers per node. */
   int getXceiverCount() {
     return threadGroup == null ? 0 : threadGroup.activeCount();
@@ -973,10 +975,10 @@ public class DataNode implements FSConstants, Runnable {
         byte op = in.readByte();
         // Make sure the xciver count is not exceeded
         int curXceiverCount = getXceiverCount();
-        if(curXceiverCount > MAX_XCEIVER_COUNT) {
+        if (curXceiverCount > maxXceiverCount) {
           throw new IOException("xceiverCount " + curXceiverCount
                                 + " exceeds the limit of concurrent xcievers "
-                                + MAX_XCEIVER_COUNT);
+                                + maxXceiverCount);
         }
         long startTime = now();
         switch ( op ) {