浏览代码

Merge branch 'apache-ref/branch-2.4' into baikal-GA-2.4

cnauroth 11 年之前
父节点
当前提交
c04d146218

+ 3 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -2466,6 +2466,9 @@ Release 0.23.11 - UNRELEASED
 
     HADOOP-10129. Distcp may succeed when it fails (daryn)
 
+    HADOOP-8826. Docs still refer to 0.20.205 as stable line (Mit Desai via
+    jeagles)
+
 Release 0.23.10 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -37,6 +37,12 @@ Release 2.4.1 - UNRELEASED
     HDFS-6231. DFSClient hangs infinitely if using hedged reads and all eligible
     datanodes die. (cnauroth)
 
+    HDFS-6234. TestDatanodeConfig#testMemlockLimit fails on Windows due to
+    invalid file path. (cnauroth)
+
+    HDFS-6235. TestFileJournalManager can fail on Windows due to file locking if
+    tests run out of order. (cnauroth)
+
 Release 2.4.0 - 2014-04-07 
 
   INCOMPATIBLE CHANGES

+ 12 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeConfig.java

@@ -86,9 +86,11 @@ public class TestDatanodeConfig {
       fail();
     } catch(Exception e) {
       // expecting exception here
+    } finally {
+      if (dn != null) {
+        dn.shutdown();
+      }
     }
-    if(dn != null)
-      dn.shutdown();
     assertNull("Data-node startup should have failed.", dn);
 
     // 2. Test "file:" schema and no schema (path-only). Both should work.
@@ -121,17 +123,21 @@ public class TestDatanodeConfig {
     // Can't increase the memlock limit past the maximum.
     assumeTrue(memlockLimit != Long.MAX_VALUE);
 
+    File dataDir = new File(BASE_DIR, "data").getCanonicalFile();
     Configuration conf = cluster.getConfiguration(0);
+    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY,
+      makeURI("file", null, fileAsURI(dataDir).getPath()));
     long prevLimit = conf.
         getLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
             DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_DEFAULT);
+    DataNode dn = null;
     try {
       // Try starting the DN with limit configured to the ulimit
       conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
           memlockLimit);
-      DataNode dn = null;
       dn = DataNode.createDataNode(new String[]{},  conf);
       dn.shutdown();
+      dn = null;
       // Try starting the DN with a limit > ulimit
       conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
           memlockLimit+1);
@@ -142,6 +148,9 @@ public class TestDatanodeConfig {
             "more than the datanode's available RLIMIT_MEMLOCK", e);
       }
     } finally {
+      if (dn != null) {
+        dn.shutdown();
+      }
       conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
           prevLimit);
     }

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java

@@ -140,7 +140,7 @@ public class TestFileJournalManager {
    */
   @Test
   public void testInprogressRecovery() throws IOException {
-    File f = new File(TestEditLog.TEST_DIR + "/filejournaltest0");
+    File f = new File(TestEditLog.TEST_DIR + "/inprogressrecovery");
     // abort after the 5th roll 
     NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()),
                                    5, new AbortSpec(5, 0));
@@ -256,7 +256,7 @@ public class TestFileJournalManager {
    */
   @Test 
   public void testReadFromStream() throws IOException {
-    File f = new File(TestEditLog.TEST_DIR + "/filejournaltest1");
+    File f = new File(TestEditLog.TEST_DIR + "/readfromstream");
     // abort after 10th roll
     NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()),
                                    10, new AbortSpec(10, 0));
@@ -283,7 +283,7 @@ public class TestFileJournalManager {
    */
   @Test
   public void testAskForTransactionsMidfile() throws IOException {
-    File f = new File(TestEditLog.TEST_DIR + "/filejournaltest2");
+    File f = new File(TestEditLog.TEST_DIR + "/askfortransactionsmidfile");
     NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 
                                    10);
     StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
@@ -308,7 +308,7 @@ public class TestFileJournalManager {
    */
   @Test
   public void testManyLogsWithGaps() throws IOException {
-    File f = new File(TestEditLog.TEST_DIR + "/filejournaltest3");
+    File f = new File(TestEditLog.TEST_DIR + "/manylogswithgaps");
     NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 10);
     StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
 
@@ -342,7 +342,7 @@ public class TestFileJournalManager {
    */
   @Test
   public void testManyLogsWithCorruptInprogress() throws IOException {
-    File f = new File(TestEditLog.TEST_DIR + "/filejournaltest5");
+    File f = new File(TestEditLog.TEST_DIR + "/manylogswithcorruptinprogress");
     NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 10, new AbortSpec(10, 0));
     StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
 
@@ -427,7 +427,7 @@ public class TestFileJournalManager {
   @Test
   public void testReadFromMiddleOfEditLog() throws CorruptionException,
       IOException {
-    File f = new File(TestEditLog.TEST_DIR + "/filejournaltest2");
+    File f = new File(TestEditLog.TEST_DIR + "/readfrommiddleofeditlog");
     NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 
                                    10);
     StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
@@ -450,7 +450,7 @@ public class TestFileJournalManager {
   @Test
   public void testExcludeInProgressStreams() throws CorruptionException,
       IOException {
-    File f = new File(TestEditLog.TEST_DIR + "/filejournaltest2");
+    File f = new File(TestEditLog.TEST_DIR + "/excludeinprogressstreams");
     
     // Don't close the edit log once the files have been set up.
     NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/YARN.apt.vm

@@ -72,6 +72,6 @@ Apache Hadoop NextGen MapReduce (YARN)
   monitoring for progress.
 
   MRV2 maintains <<API compatibility>> with previous stable release 
-  (hadoop-0.20.205).  This means that all Map-Reduce jobs should still run 
+  (hadoop-1.x).  This means that all Map-Reduce jobs should still run 
   unchanged on top of MRv2 with just a recompile.