Browse Source

HDFS-5994. Fix TestDataNodeRollingUpgrade. Contributed by Arpit Agarwal

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-5535@1570734 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 11 years ago
parent
commit
3f7852bd27

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES_HDFS-5535.txt

@@ -71,3 +71,5 @@ HDFS-5535 subtasks:
 
     HDFS-5992. Fix NPE in MD5FileUtils and update editsStored for
     TestOfflineEditsViewer.  (szetszwo)
+
+    HDFS-5994. Fix TestDataNodeRollingUpgrade.  (Arpit Agarwal via szetszwo)

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java

@@ -392,11 +392,12 @@ public class BlockPoolSliceStorage extends Storage {
       if (child.isDirectory()) {
         // Recurse to process subdirectories.
         filesRestored += restoreBlockFilesFromTrash(child);
+        continue;
       }
 
       if (restoreDirectory == null) {
         restoreDirectory = new File(getRestoreDirectory(child));
-        if (!restoreDirectory.mkdirs()) {
+        if (!restoreDirectory.exists() && !restoreDirectory.mkdirs()) {
           throw new IOException("Failed to create directory " + restoreDirectory);
         }
       }

+ 5 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetAsyncDiskService.java

@@ -196,9 +196,8 @@ class FsDatasetAsyncDiskService {
     }
 
     private boolean moveFiles() {
-      File newBlockFile = new File(trashDirectory, blockFile.getName());
-      File newMetaFile = new File(trashDirectory, metaFile.getName());
-      if (!new File(trashDirectory).mkdirs()) {
+      File trashDirFile = new File(trashDirectory);
+      if (!trashDirFile.exists() && !trashDirFile.mkdirs()) {
         LOG.error("Failed to create trash directory " + trashDirectory);
         return false;
       }
@@ -207,6 +206,9 @@ class FsDatasetAsyncDiskService {
         LOG.debug("Moving files " + blockFile.getName() + " and " +
             metaFile.getName() + " to trash.");
       }
+
+      File newBlockFile = new File(trashDirectory, blockFile.getName());
+      File newMetaFile = new File(trashDirectory, metaFile.getName());
       return (blockFile.renameTo(newBlockFile) &&
               metaFile.renameTo(newMetaFile));
     }

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java

@@ -46,7 +46,8 @@ public class TestDataNodeRollingUpgrade {
   private static final Log LOG = LogFactory.getLog(TestDataNodeRollingUpgrade.class);
 
   private static final short REPL_FACTOR = 1;
-  private static final long FILE_SIZE = 1024L;
+  private static final int BLOCK_SIZE = 1024 * 1024;
+  private static final long FILE_SIZE = BLOCK_SIZE * 4;
   private static final long SEED = 0x1BADF00DL;
 
   Configuration conf;
@@ -139,7 +140,7 @@ public class TestDataNodeRollingUpgrade {
       Path testFile1 = new Path("/TestDataNodeRollingUpgrade1.dat");
 
       // Create files in DFS.
-      DFSTestUtil.createFile(fs, testFile1, FILE_SIZE, REPL_FACTOR, SEED);
+      DFSTestUtil.createFile(fs, testFile1, BLOCK_SIZE, BLOCK_SIZE, FILE_SIZE, REPL_FACTOR, SEED);
       String fileContents1 = DFSTestUtil.readFile(fs, testFile1);
 
       startRollingUpgrade();