瀏覽代碼

HDFS-5936. MiniDFSCluster does not clean data left behind by SecondaryNameNode. Contributed by Binglin Chang.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1572150 13f79535-47bb-0310-9956-ffa450edef68
Chris Nauroth 11 年之前
父節點
當前提交
5f9cdbd24f

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -631,6 +631,9 @@ Release 2.4.0 - UNRELEASED
     HDFS-6008. Namenode dead node link is giving HTTP error 500.
     (Benoy Antony via cnauroth)
 
+    HDFS-5936. MiniDFSCluster does not clean data left behind by
+    SecondaryNameNode. (Binglin Chang via cnauroth)
+
 Release 2.3.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 9 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -79,6 +79,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.datanode.DataStorage;
@@ -813,6 +814,14 @@ public class MiniDFSCluster {
               throw new IOException("Could not fully delete " + nameDir);
             }
           }
+          Collection<URI> checkpointDirs = Util.stringCollectionAsURIs(conf
+              .getTrimmedStringCollection(DFS_NAMENODE_CHECKPOINT_DIR_KEY));
+          for (URI checkpointDirUri : checkpointDirs) {
+            File checkpointDir = new File(checkpointDirUri);
+            if (checkpointDir.exists() && !FileUtil.fullyDelete(checkpointDir)) {
+              throw new IOException("Could not fully delete " + checkpointDir);
+            }
+          }
         }
         
         boolean formatThisOne = format;