Ver código fonte

HDFS-2245. Fix a NullPointerException in BlockManager.chooseTarget(..).

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1156490 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 13 anos atrás
pai
commit
6a7c0306bd

+ 3 - 0
hdfs/CHANGES.txt

@@ -951,6 +951,9 @@ Trunk (unreleased changes)
     HDFS-2196. Make ant build system work with hadoop-common JAR generated
     by Maven. (Alejandro Abdelnur via tomwhite)
 
+    HDFS-2245. Fix a NullPointerException in BlockManager.chooseTarget(..).
+    (szetszwo)
+
   BREAKDOWN OF HDFS-1073 SUBTASKS
 
     HDFS-1521. Persist transaction ID on disk between NN restarts.

+ 7 - 6
hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -1221,12 +1221,13 @@ public class BlockManager {
     final DatanodeDescriptor targets[] = blockplacement.chooseTarget(
         src, numOfReplicas, client, excludedNodes, blocksize);
     if (targets.length < minReplication) {
-      throw new IOException("File " + src + " could only be replicated to " +
-                            targets.length + " nodes, instead of " +
-                            minReplication + ". There are "
-                            + getDatanodeManager().getNetworkTopology().getNumOfLeaves()
-                            + " datanode(s) running but "+excludedNodes.size() +
-                            " node(s) are excluded in this operation.");
+      throw new IOException("File " + src + " could only be replicated to "
+          + targets.length + " nodes instead of minReplication (="
+          + minReplication + ").  There are "
+          + getDatanodeManager().getNetworkTopology().getNumOfLeaves()
+          + " datanode(s) running and "
+          + (excludedNodes == null? "no": excludedNodes.size())
+          + " node(s) are excluded in this operation.");
     }
     return targets;
   }

+ 30 - 0
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java

@@ -404,6 +404,36 @@ public class TestFileCreation extends junit.framework.TestCase {
     }
   }
 
+  /** test addBlock(..) when replication<min and excludeNodes==null. */
+  public void testFileCreationError3() throws IOException {
+    System.out.println("testFileCreationError3 start");
+    Configuration conf = new HdfsConfiguration();
+    // create cluster
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+    DistributedFileSystem dfs = null;
+    try {
+      cluster.waitActive();
+      dfs = (DistributedFileSystem)cluster.getFileSystem();
+      DFSClient client = dfs.dfs;
+
+      // create a new file.
+      final Path f = new Path("/foo.txt");
+      createFile(dfs, f, 3);
+      try {
+        cluster.getNameNode().addBlock(f.toString(), 
+            client.clientName, null, null);
+        fail();
+      } catch(IOException ioe) {
+        FileSystem.LOG.info("GOOD!", ioe);
+      }
+
+      System.out.println("testFileCreationError3 successful");
+    } finally {
+      IOUtils.closeStream(dfs);
+      cluster.shutdown();
+    }
+  }
+
   /**
    * Test that file leases are persisted across namenode restarts.
    * This test is currently not triggered because more HDFS work is