1
0
فهرست منبع

HDFS-6552. add DN storage to a BlockInfo will not replace the different storage from same DN. (Contributed by Amir Langer)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1603602 13f79535-47bb-0310-9956-ffa450edef68
Arpit Agarwal 11 سال پیش
والد
کامیت
52d18aa217

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -653,6 +653,9 @@ Release 2.5.0 - UNRELEASED
     HDFS-6527. Edit log corruption due to defered INode removal. (kihwal and
     HDFS-6527. Edit log corruption due to defered INode removal. (kihwal and
     jing9 via jing9)
     jing9 via jing9)
 
 
+    HDFS-6552. add DN storage to a BlockInfo will not replace the different
+    storage from same DN. (Amir Langer via Arpit Agarwal)
+
   BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
   BREAKDOWN OF HDFS-2006 SUBTASKS AND RELATED JIRAS
 
 
     HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)
     HDFS-6299. Protobuf for XAttr and client-side implementation. (Yi Liu via umamahesh)

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java

@@ -203,7 +203,7 @@ public class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
       } else {
       } else {
         // The block is on the DN but belongs to a different storage.
         // The block is on the DN but belongs to a different storage.
         // Update our state.
         // Update our state.
-        removeStorage(storage);
+        removeStorage(getStorageInfo(idx));
         added = false;      // Just updating storage. Return false.
         added = false;      // Just updating storage. Return false.
       }
       }
     }
     }

+ 30 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockInfo.java

@@ -29,6 +29,8 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
 
 
 /**
 /**
@@ -42,6 +44,34 @@ public class TestBlockInfo {
   private static final Log LOG = LogFactory
   private static final Log LOG = LogFactory
       .getLog("org.apache.hadoop.hdfs.TestBlockInfo");
       .getLog("org.apache.hadoop.hdfs.TestBlockInfo");
 
 
+
+  @Test
+  public void testAddStorage() throws Exception {
+    BlockInfo blockInfo = new BlockInfo(3);
+
+    final DatanodeStorageInfo storage = DFSTestUtil.createDatanodeStorageInfo("storageID", "127.0.0.1");
+
+    boolean added = blockInfo.addStorage(storage);
+
+    Assert.assertTrue(added);
+    Assert.assertEquals(storage, blockInfo.getStorageInfo(0));
+  }
+
+
+  @Test
+  public void testReplaceStorageIfDifferetnOneAlreadyExistedFromSameDataNode() throws Exception {
+    BlockInfo blockInfo = new BlockInfo(3);
+
+    final DatanodeStorageInfo storage1 = DFSTestUtil.createDatanodeStorageInfo("storageID1", "127.0.0.1");
+    final DatanodeStorageInfo storage2 = new DatanodeStorageInfo(storage1.getDatanodeDescriptor(), new DatanodeStorage("storageID2"));
+
+    blockInfo.addStorage(storage1);
+    boolean added = blockInfo.addStorage(storage2);
+
+    Assert.assertFalse(added);
+    Assert.assertEquals(storage2, blockInfo.getStorageInfo(0));
+  }
+
   @Test
   @Test
   public void testBlockListMoveToHead() throws Exception {
   public void testBlockListMoveToHead() throws Exception {
     LOG.info("BlockInfo moveToHead tests...");
     LOG.info("BlockInfo moveToHead tests...");