Browse Source

HDFS-604. Merge -r 813631:814221 from trunk to the append branch.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/branches/HDFS-265@814244 13f79535-47bb-0310-9956-ffa450edef68
Konstantin Shvachko 16 năm trước cách đây
mục cha
commit
711e52c2ad

+ 12 - 0
CHANGES.txt

@@ -54,6 +54,9 @@ Trunk (unreleased changes)
     FileNotFoundException from FileSystem::listStatus rather than returning
     null. (Jakob Homan via cdouglas)
 
+    HDFS-602. DistributedFileSystem mkdirs throws FileAlreadyExistsException
+    instead of FileNotFoundException. (Boris Shkolnik via suresh)
+
   NEW FEATURES
 
     HDFS-436. Introduce AspectJ framework for HDFS code and tests.
@@ -269,6 +272,15 @@ Trunk (unreleased changes)
     HDFS-605. Do not run fault injection tests in the run-test-hdfs-with-mr
     target.  (Konstantin Boudnik via szetszwo)
 
+    HDFS-606. Fix ConcurrentModificationException in invalidateCorruptReplicas()
+    (shv)
+
+    HDFS-601. TestBlockReport obtains data directories directly from
+    MiniHDFSCluster. (Konstantin Boudnik via shv)
+
+    HDFS-614. TestDatanodeBlockScanner obtains data directories directly from
+    MiniHDFSCluster. (shv)
+
 Release 0.20.1 - Unreleased
 
   IMPROVEMENTS

BIN
lib/hadoop-mapred-0.21.0-dev.jar


BIN
lib/hadoop-mapred-examples-0.21.0-dev.jar


BIN
lib/hadoop-mapred-test-0.21.0-dev.jar


+ 3 - 1
src/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -65,6 +65,7 @@ import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSInputChecker;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.FSOutputSummer;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsServerDefaults;
@@ -975,7 +976,8 @@ public class DFSClient implements FSConstants, java.io.Closeable {
     } catch(RemoteException re) {
       throw re.unwrapRemoteException(AccessControlException.class,
                                      NSQuotaExceededException.class,
-                                     DSQuotaExceededException.class);
+                                     DSQuotaExceededException.class,
+                                     FileAlreadyExistsException.class);
     }
   }
 

+ 4 - 2
src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java

@@ -1136,8 +1136,10 @@ public class BlockManager {
     boolean gotException = false;
     if (nodes == null)
       return;
-    for (Iterator<DatanodeDescriptor> it = nodes.iterator(); it.hasNext(); ) {
-      DatanodeDescriptor node = it.next();
+    // make a copy of the array of nodes in order to avoid
+    // ConcurrentModificationException, when the block is removed from the node
+    DatanodeDescriptor[] nodesCopy = nodes.toArray(new DatanodeDescriptor[0]);
+    for (DatanodeDescriptor node : nodesCopy) {
       try {
         invalidateBlock(blk, node);
       } catch (IOException e) {

+ 17 - 10
src/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -17,23 +17,30 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import java.io.*;
+import java.io.Closeable;
+import java.io.FileNotFoundException;
+import java.io.IOException;
 import java.net.URI;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.permission.*;
-import org.apache.hadoop.metrics.MetricsRecord;
-import org.apache.hadoop.metrics.MetricsUtil;
-import org.apache.hadoop.metrics.MetricsContext;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.BlockUCState;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.metrics.MetricsContext;
+import org.apache.hadoop.metrics.MetricsRecord;
+import org.apache.hadoop.metrics.MetricsUtil;
 
 /*************************************************
  * FSDirectory stores the filesystem directory state.
@@ -957,7 +964,7 @@ class FSDirectory implements Closeable {
    */
   boolean mkdirs(String src, PermissionStatus permissions,
       boolean inheritPermission, long now)
-      throws FileNotFoundException, QuotaExceededException {
+      throws FileAlreadyExistsException, QuotaExceededException {
     src = normalizePath(src);
     String[] names = INode.getPathNames(src);
     byte[][] components = INode.getPathComponents(names);
@@ -972,7 +979,7 @@ class FSDirectory implements Closeable {
       for(; i < inodes.length && inodes[i] != null; i++) {
         pathbuilder.append(Path.SEPARATOR + names[i]);
         if (!inodes[i].isDirectory()) {
-          throw new FileNotFoundException("Parent path is not a directory: "
+          throw new FileAlreadyExistsException("Parent path is not a directory: "
               + pathbuilder);
         }
       }

+ 11 - 10
src/test/hdfs/org/apache/hadoop/hdfs/TestBlockReport.java

@@ -17,6 +17,16 @@
  */
 package org.apache.hadoop.hdfs;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.File;
+import java.io.FilenameFilter;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
@@ -30,18 +40,9 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.log4j.Level;
 import org.junit.After;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.io.File;
-import java.io.FilenameFilter;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-
 /**
  * This test simulates a variety of situations when blocks are being intentionally
  * corrupted, unexpectedly modified, and so on before a block report is happening
@@ -155,7 +156,7 @@ public class TestBlockReport {
         (long)AppendTestUtil.FILE_SIZE, REPL_FACTOR, rand.nextLong());
 
     // mock around with newly created blocks and delete some
-    String testDataDirectory = System.getProperty("test.build.data");
+    String testDataDirectory = cluster.getDataDirectory();
 
     File dataDir = new File(testDataDirectory);
     assertTrue(dataDir.isDirectory());

+ 7 - 7
src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java

@@ -131,7 +131,7 @@ public class TestDatanodeBlockScanner extends TestCase {
 
   public static boolean corruptReplica(String blockName, int replica) throws IOException {
     Random random = new Random();
-    File baseDir = new File(System.getProperty("test.build.data"), "dfs/data");
+    File baseDir = new File(MiniDFSCluster.getBaseDirectory(), "data");
     boolean corrupted = false;
     for (int i=replica*2; i<replica*2+2; i++) {
       File blockFile = new File(baseDir, "data" + (i+1) + 
@@ -183,7 +183,7 @@ public class TestDatanodeBlockScanner extends TestCase {
     assertTrue(blocks.get(0).isCorrupt() == false);
 
     // Corrupt random replica of block 
-    corruptReplica(block, rand);
+    assertTrue(corruptReplica(block, rand));
 
     // Restart the datanode hoping the corrupt block to be reported
     cluster.restartDataNode(rand);
@@ -203,9 +203,9 @@ public class TestDatanodeBlockScanner extends TestCase {
   
     // Corrupt all replicas. Now, block should be marked as corrupt
     // and we should get all the replicas 
-    corruptReplica(block, 0);
-    corruptReplica(block, 1);
-    corruptReplica(block, 2);
+    assertTrue(corruptReplica(block, 0));
+    assertTrue(corruptReplica(block, 1));
+    assertTrue(corruptReplica(block, 2));
 
     // Read the file to trigger reportBadBlocks by client
     try {
@@ -410,7 +410,7 @@ public class TestDatanodeBlockScanner extends TestCase {
    * Change the length of a block at datanode dnIndex
    */
   static boolean changeReplicaLength(String blockName, int dnIndex, int lenDelta) throws IOException {
-    File baseDir = new File(System.getProperty("test.build.data"), "dfs/data");
+    File baseDir = new File(MiniDFSCluster.getBaseDirectory(), "data");
     for (int i=dnIndex*2; i<dnIndex*2+2; i++) {
       File blockFile = new File(baseDir, "data" + (i+1) + 
           MiniDFSCluster.FINALIZED_DIR_NAME + blockName);
@@ -426,7 +426,7 @@ public class TestDatanodeBlockScanner extends TestCase {
   
   private static void waitForBlockDeleted(String blockName, int dnIndex) 
   throws IOException, InterruptedException {
-    File baseDir = new File(System.getProperty("test.build.data"), "dfs/data");
+    File baseDir = new File(MiniDFSCluster.getBaseDirectory(), "data");
     File blockFile1 = new File(baseDir, "data" + (2*dnIndex+1) + 
         MiniDFSCluster.FINALIZED_DIR_NAME + blockName);
     File blockFile2 = new File(baseDir, "data" + (2*dnIndex+2) +