浏览代码

HDFS-5348. Fix error message when dfs.datanode.max.locked.memory is improperly configured. (Contributed by Colin Patrick McCabe)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1531460 13f79535-47bb-0310-9956-ffa450edef68
Andrew Wang 11 年之前
父节点
当前提交
09e9e57a0b

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-4949.txt

@@ -83,3 +83,6 @@ HDFS-4949 (Unreleased)
 
 
     HDFS-5314. Do not expose CachePool type in AddCachePoolOp (Colin Patrick
     HDFS-5314. Do not expose CachePool type in AddCachePoolOp (Colin Patrick
     McCabe)
     McCabe)
+
+    HDFS-5348. Fix error message when dfs.datanode.max.locked.memory is
+    improperly configured. (Colin Patrick McCabe)

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -753,7 +753,7 @@ public class DataNode extends Configured
       if (dnConf.maxLockedMemory > ulimit) {
       if (dnConf.maxLockedMemory > ulimit) {
       throw new RuntimeException(String.format(
       throw new RuntimeException(String.format(
           "Cannot start datanode because the configured max locked memory" +
           "Cannot start datanode because the configured max locked memory" +
-          " size (%s) of %d bytes is less than the datanode's available" +
+          " size (%s) of %d bytes is more than the datanode's available" +
           " RLIMIT_MEMLOCK ulimit of %d bytes.",
           " RLIMIT_MEMLOCK ulimit of %d bytes.",
           DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
           DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
           dnConf.maxLockedMemory,
           dnConf.maxLockedMemory,

+ 25 - 17
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java

@@ -114,25 +114,33 @@ public class TestDatanodeConfig {
   public void testMemlockLimit() throws Exception {
   public void testMemlockLimit() throws Exception {
     assumeTrue(NativeIO.isAvailable());
     assumeTrue(NativeIO.isAvailable());
     final long memlockLimit = NativeIO.getMemlockLimit();
     final long memlockLimit = NativeIO.getMemlockLimit();
+
+    // Can't increase the memlock limit past the maximum.
+    assumeTrue(memlockLimit != Long.MAX_VALUE);
+
     Configuration conf = cluster.getConfiguration(0);
     Configuration conf = cluster.getConfiguration(0);
-    // Try starting the DN with limit configured to the ulimit
-    conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
-        memlockLimit);
-    if (memlockLimit == Long.MAX_VALUE) {
-      // Can't increase the memlock limit past the maximum.
-      return;
-    }
-    DataNode dn = null;
-    dn = DataNode.createDataNode(new String[]{},  conf);
-    dn.shutdown();
-    // Try starting the DN with a limit > ulimit
-    conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
-        memlockLimit+1);
+    long prevLimit = conf.
+        getLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
+            DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT);
     try {
     try {
-      dn = DataNode.createDataNode(new String[]{}, conf);
-    } catch (RuntimeException e) {
-      GenericTestUtils.assertExceptionContains(
-          "less than the datanode's available RLIMIT_MEMLOCK", e);
+      // Try starting the DN with limit configured to the ulimit
+      conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
+          memlockLimit);
+      DataNode dn = null;
+      dn = DataNode.createDataNode(new String[]{},  conf);
+      dn.shutdown();
+      // Try starting the DN with a limit > ulimit
+      conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
+          memlockLimit+1);
+      try {
+        dn = DataNode.createDataNode(new String[]{}, conf);
+      } catch (RuntimeException e) {
+        GenericTestUtils.assertExceptionContains(
+            "more than the datanode's available RLIMIT_MEMLOCK", e);
+      }
+    } finally {
+      conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
+          prevLimit);
     }
     }
   }
   }
 }
 }