Browse Source

HDFS-5457. Fix TestDatanodeRegistration, TestFsck and TestAddBlockRetry. (Contributed by szetszwo)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2832@1538794 13f79535-47bb-0310-9956-ffa450edef68
Arpit Agarwal 11 năm trước cách đây
mục cha
commit
d2b7b6589d

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-2832.txt

@@ -70,3 +70,6 @@ IMPROVEMENTS:
 
     HDFS-5455. NN should update storageMap on first heartbeat. (Arpit Agarwal)
 
+    HDFS-5457. Fix TestDatanodeRegistration, TestFsck and TestAddBlockRetry.
+    (Contributed bt szetszwo)
+

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java

@@ -385,7 +385,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
     @Override
     public boolean hasNext() {
       update();
-      return iterators.get(index).hasNext();
+      return !iterators.isEmpty() && iterators.get(index).hasNext();
     }
 
     @Override

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -5774,7 +5774,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
         DatanodeInfo[] nodes = blocks[i].getLocations();
         String[] storageIDs = blocks[i].getStorageIDs();
         for (int j = 0; j < nodes.length; j++) {
-          blockManager.findAndMarkBlockAsCorrupt(blk, nodes[j], storageIDs[j], 
+          blockManager.findAndMarkBlockAsCorrupt(blk, nodes[j],
+              storageIDs == null ? null: storageIDs[j], 
               "client machine reported it");
         }
       }

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java

@@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.net.Node;
@@ -99,13 +100,13 @@ public class TestAddBlockRetry {
     bmField.setAccessible(true);
     bmField.set(ns, spyBM);
 
-    doAnswer(new Answer<DatanodeDescriptor[]>() {
+    doAnswer(new Answer<DatanodeStorageInfo[]>() {
       @Override
-      public DatanodeDescriptor[] answer(InvocationOnMock invocation)
+      public DatanodeStorageInfo[] answer(InvocationOnMock invocation)
           throws Throwable {
         LOG.info("chooseTarget for " + src);
-        DatanodeDescriptor[] ret =
-            (DatanodeDescriptor[]) invocation.callRealMethod();
+        DatanodeStorageInfo[] ret =
+            (DatanodeStorageInfo[]) invocation.callRealMethod();
         count++;
         if(count == 1) { // run second addBlock()
           LOG.info("Starting second addBlock for " + src);