소스 검색

HDFS-9083. Replication violates block placement policy (Rushabh Shah)

Sangjin Lee 9 년 전
부모
커밋
b9a6f9aa16

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -18,6 +18,8 @@ Release 2.6.3 - UNRELEASED
     HDFS-9431. DistributedFileSystem#concat fails if the target path is
     relative. (Kazuho Fujii via aajisaka)
 
+    HDFS-9083. Replication violates block placement policy (Rushabh Shah)
+
 Release 2.6.2 - 2015-10-28
 
   INCOMPATIBLE CHANGES

+ 0 - 10
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -225,9 +225,6 @@ public class BlockManager {
 
   final float blocksInvalidateWorkPct;
   final int blocksReplWorkMultiplier;
-
-  /** variable to enable check for enough racks */
-  final boolean shouldCheckForEnoughRacks;
   
   // whether or not to issue block encryption keys.
   final boolean encryptDataTransfer;
@@ -325,9 +322,6 @@ public class BlockManager {
         conf.getInt(
             DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY,
             DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT);
-    this.shouldCheckForEnoughRacks =
-        conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) == null
-            ? false : true;
 
     this.blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
     this.blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
@@ -351,7 +345,6 @@ public class BlockManager {
     LOG.info("maxReplication             = " + maxReplication);
     LOG.info("minReplication             = " + minReplication);
     LOG.info("maxReplicationStreams      = " + maxReplicationStreams);
-    LOG.info("shouldCheckForEnoughRacks  = " + shouldCheckForEnoughRacks);
     LOG.info("replicationRecheckInterval = " + replicationRecheckInterval);
     LOG.info("encryptDataTransfer        = " + encryptDataTransfer);
     LOG.info("maxNumBlocksToLog          = " + maxNumBlocksToLog);
@@ -3548,9 +3541,6 @@ public class BlockManager {
   }
 
   boolean blockHasEnoughRacks(Block b) {
-    if (!this.shouldCheckForEnoughRacks) {
-      return true;
-    }
     boolean enoughRacks = false;;
     Collection<DatanodeDescriptor> corruptNodes = 
                                   corruptReplicas.getNodes(b);

+ 24 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java

@@ -827,4 +827,28 @@ public class TestBlockManager {
     Assert.assertFalse(BlockManager.useDelHint(true, delHint, null,
         moreThan1Racks, excessTypes));
   }
+
+  /**
+   * {@link BlockManager#blockHasEnoughRacks(BlockInfo)} should return false
+   * if all the replicas are on the same rack and shouldn't be dependent on
+   * CommonConfigurationKeysPublic.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY
+   * @throws Exception
+   */
+  @Test
+  public void testAllReplicasOnSameRack() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    conf.unset(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY);
+    fsn = Mockito.mock(FSNamesystem.class);
+    Mockito.doReturn(true).when(fsn).hasWriteLock();
+    Mockito.doReturn(true).when(fsn).hasReadLock();
+    bm = new BlockManager(fsn, fsn, conf);
+    // Add nodes on two racks
+    addNodes(nodes);
+    // Added a new block in blocksMap and all the replicas are on the same rack
+    BlockInfo blockInfo = addBlockOnNodes(1, rackA);
+    // Since the network toppolgy is multi-rack, the blockHasEnoughRacks
+    // should return false.
+    assertFalse("Replicas for block is not stored on enough racks",
+        bm.blockHasEnoughRacks(blockInfo));
+  }
 }