فهرست منبع

HADOOP-4598. Setrep command skips under-replicated blocks. Contributed by Hairong Kuang.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/trunk@719393 13f79535-47bb-0310-9956-ffa450edef68
Hairong Kuang 16 سال پیش
والد
کامیت
18c33482e8

+ 2 - 0
CHANGES.txt

@@ -183,6 +183,8 @@ Trunk (unreleased changes)
 
     HADOOP-4691. Correct a link in the javadoc of IndexedSortable. (szetszwo)
 
+    HADOOP-4598. '-setrep' command skips under-replicated blocks. (hairong)
+
 Release 0.19.0 - 2008-11-18
 
   INCOMPATIBLE CHANGES

+ 4 - 3
src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -887,8 +887,6 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
       return true;
 
     // update needReplication priority queues
-    LOG.info("Increasing replication for file " + src 
-             + ". New replication is " + replication);
     for(int idx = 0; idx < fileBlocks.length; idx++)
       updateNeededReplications(fileBlocks[idx], 0, replication-oldRepl);
       
@@ -898,6 +896,9 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
                + ". New replication is " + replication);
       for(int idx = 0; idx < fileBlocks.length; idx++)
         processOverReplicatedBlock(fileBlocks[idx], replication, null, null);
+    } else { // replication factor is increased
+      LOG.info("Increasing replication for file " + src 
+          + ". New replication is " + replication);
     }
     return true;
   }
@@ -1436,7 +1437,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
    * @param b block
    * @param n datanode
    */
-  private void addToInvalidates(Block b, DatanodeInfo n) {
+  void addToInvalidates(Block b, DatanodeInfo n) {
     addToInvalidatesNoLog(b, n);
     NameNode.stateChangeLog.info("BLOCK* NameSystem.addToInvalidates: "
         + b.getBlockName() + " is added to invalidSet of " + n.getName());

+ 1 - 2
src/hdfs/org/apache/hadoop/hdfs/server/namenode/UnderReplicatedBlocks.java

@@ -171,8 +171,7 @@ class UnderReplicatedBlocks implements Iterable<Block> {
     if(oldPri != LEVEL && oldPri != curPri) {
       remove(block, oldPri);
     }
-    if(curPri != LEVEL && oldPri != curPri 
-        && priorityQueues.get(curPri).add(block)) {
+    if(curPri != LEVEL && priorityQueues.get(curPri).add(block)) {
       NameNode.stateChangeLog.debug(
                                     "BLOCK* NameSystem.UnderReplicationBlock.update:"
                                     + block

+ 44 - 0
src/test/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java

@@ -0,0 +1,44 @@
+package org.apache.hadoop.hdfs.server.namenode;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.Block;
+
+import junit.framework.TestCase;
+
+public class TestUnderReplicatedBlocks extends TestCase {
+  public void testSetrepIncWithUnderReplicatedBlocks() throws Exception {
+    Configuration conf = new Configuration();
+    final short REPLICATION_FACTOR = 2;
+    final String FILE_NAME = "/testFile";
+    final Path FILE_PATH = new Path(FILE_NAME);
+    MiniDFSCluster cluster = new MiniDFSCluster(conf, REPLICATION_FACTOR+1, true, null);
+    try {
+      // create a file with one block with a replication factor of 2
+      final FileSystem fs = cluster.getFileSystem();
+      DFSTestUtil.createFile(fs, FILE_PATH, 1L, REPLICATION_FACTOR, 1L);
+      DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
+      
+      // remove one replica from the blocksMap so block becomes under-replicated
+      // but the block does not get put into the under-replicated blocks queue
+      FSNamesystem namesystem = cluster.getNameNode().namesystem;
+      Block b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
+      DatanodeDescriptor dn = namesystem.blocksMap.nodeIterator(b).next();
+      namesystem.addToInvalidates(b, dn);
+      namesystem.blocksMap.removeNode(b, dn);
+      
+      // increment this file's replication factor
+      FsShell shell = new FsShell(conf);
+      assertEquals(0, shell.run(new String[]{
+          "-setrep", "-w", Integer.toString(1+REPLICATION_FACTOR), FILE_NAME}));
+    } finally {
+      cluster.shutdown();
+    }
+    
+  }
+
+}