Sfoglia il codice sorgente

HDFS-8243. Files written by TestHostsFiles and TestNameNodeMXBean are causing Release Audit Warnings. (Contributed by Ruth Wisniewski)

Arpit Agarwal 10 anni fa
parent
commit
54fa9b4217

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -752,6 +752,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8380. Always call addStoredBlock on blocks which have been shifted
     from one storage to another (cmccabe)
 
+    HDFS-8243. Files written by TestHostsFiles and TestNameNodeMXBean are
+    causing Release Audit Warnings. (Ruth Wisniewski via Arpit Agarwal)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 11 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java

@@ -20,9 +20,11 @@ package org.apache.hadoop.hdfs.server.namenode;
 import static org.junit.Assert.assertTrue;
 
 import java.lang.management.ManagementFactory;
+import java.io.File;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FileSystem;
@@ -126,7 +128,12 @@ public class TestHostsFiles {
       assertTrue("Live nodes should contain the decommissioned node",
               nodes.contains("Decommissioned"));
     } finally {
-      cluster.shutdown();
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+      if (localFileSys.exists(dir)) {
+        FileUtils.deleteQuietly(new File(dir.toUri().getPath()));
+      }
     }
   }
 
@@ -167,6 +174,9 @@ public class TestHostsFiles {
       if (cluster != null) {
         cluster.shutdown();
       }
+      if (localFileSys.exists(dir)) {
+        FileUtils.deleteQuietly(new File(dir.toUri().getPath()));
+      }
     }
   }
 }

+ 8 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import org.apache.commons.io.FileUtils;
 import com.google.common.util.concurrent.Uninterruptibles;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
@@ -214,6 +215,8 @@ public class TestNameNodeMXBean {
     conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
     MiniDFSCluster cluster = null;
+    FileSystem localFileSys = null;
+    Path dir = null;
 
     try {
       cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
@@ -226,10 +229,9 @@ public class TestNameNodeMXBean {
         "Hadoop:service=NameNode,name=NameNodeInfo");
 
       // Define include file to generate deadNodes metrics
-      FileSystem localFileSys = FileSystem.getLocal(conf);
+      localFileSys = FileSystem.getLocal(conf);
       Path workingDir = localFileSys.getWorkingDirectory();
-      Path dir = new Path(workingDir,
-        "build/test/data/temp/TestNameNodeMXBean");
+      dir = new Path(workingDir,"build/test/data/temp/TestNameNodeMXBean");
       Path includeFile = new Path(dir, "include");
       assertTrue(localFileSys.mkdirs(dir));
       StringBuilder includeHosts = new StringBuilder();
@@ -258,8 +260,10 @@ public class TestNameNodeMXBean {
         assertTrue(deadNode.containsKey("decommissioned"));
         assertTrue(deadNode.containsKey("xferaddr"));
       }
-
     } finally {
+      if ((localFileSys != null) && localFileSys.exists(dir)) {
+        FileUtils.deleteQuietly(new File(dir.toUri().getPath()));
+      }
       if (cluster != null) {
         cluster.shutdown();
       }