Browse Source

HDFS-4610. Reverting the patch Jenkins build is not run.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1477396 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas 12 years ago
parent
commit
e2091275dc
13 changed files with 43 additions and 53 deletions
  1. 0 3
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  2. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
  3. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java
  4. 4 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
  5. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
  6. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
  7. 2 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
  8. 12 13
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java
  9. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java
  10. 2 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
  11. 8 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
  12. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
  13. 3 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java

+ 0 - 3
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -332,9 +332,6 @@ Trunk (Unreleased)
     HDFS-4734. HDFS Tests that use ShellCommandFencer are broken on Windows.
     (Arpit Agarwal via suresh)
 
-    HDFS-4610. Use common utils FileUtil#setReadable/Writable/Executable &
-    FileUtil#canRead/Write/Execute. (Ivan Mitic via suresh)
-
   BREAKDOWN OF HDFS-347 SUBTASKS AND RELATED JIRAS
 
     HDFS-4353. Encapsulate connections to peers in Peer and PeerServer classes.

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java

@@ -448,7 +448,7 @@ public abstract class Storage extends StorageInfo {
           LOG.warn(rootPath + "is not a directory");
           return StorageState.NON_EXISTENT;
         }
-        if (!FileUtil.canWrite(root)) {
+        if (!root.canWrite()) {
           LOG.warn("Cannot access storage directory " + rootPath);
           return StorageState.NON_EXISTENT;
         }

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImagePreTransactionalStorageInspector.java

@@ -33,7 +33,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
@@ -129,7 +128,7 @@ class FSImagePreTransactionalStorageInspector extends FSImageStorageInspector {
   static long readCheckpointTime(StorageDirectory sd) throws IOException {
     File timeFile = NNStorage.getStorageFile(sd, NameNodeFile.TIME);
     long timeStamp = 0L;
-    if (timeFile.exists() && FileUtil.canRead(timeFile)) {
+    if (timeFile.exists() && timeFile.canRead()) {
       DataInputStream in = new DataInputStream(new FileInputStream(timeFile));
       try {
         timeStamp = in.readLong();

+ 4 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java

@@ -34,7 +34,6 @@ import java.util.concurrent.CopyOnWriteArrayList;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
@@ -231,8 +230,8 @@ public class NNStorage extends Storage implements Closeable,
         File root = sd.getRoot();
         LOG.info("currently disabled dir " + root.getAbsolutePath() +
                  "; type="+sd.getStorageDirType() 
-                 + ";canwrite="+FileUtil.canWrite(root));
-        if(root.exists() && FileUtil.canWrite(root)) {
+                 + ";canwrite="+root.canWrite());
+        if(root.exists() && root.canWrite()) {
           LOG.info("restoring dir " + sd.getRoot().getAbsolutePath());
           this.addStorageDir(sd); // restore
           this.removedStorageDirs.remove(sd);
@@ -506,7 +505,7 @@ public class NNStorage extends Storage implements Closeable,
       dirIterator(NameNodeDirType.IMAGE); it.hasNext();) {
       sd = it.next();
       File fsImage = getStorageFile(sd, NameNodeFile.IMAGE, txid);
-      if(FileUtil.canRead(sd.getRoot()) && fsImage.exists())
+      if(sd.getRoot().canRead() && fsImage.exists())
         return fsImage;
     }
     return null;
@@ -723,7 +722,7 @@ public class NNStorage extends Storage implements Closeable,
   private File findFile(NameNodeDirType dirType, String name) {
     for (StorageDirectory sd : dirIterable(dirType)) {
       File candidate = new File(sd.getCurrentDir(), name);
-      if (FileUtil.canRead(sd.getCurrentDir()) &&
+      if (sd.getCurrentDir().canRead() &&
           candidate.exists()) {
         return candidate;
       }

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java

@@ -33,7 +33,6 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.util.Time;
@@ -103,7 +102,7 @@ public class TransferFsImage {
     assert !dstFiles.isEmpty() : "No checkpoint targets.";
     
     for (File f : dstFiles) {
-      if (f.exists() && FileUtil.canRead(f)) {
+      if (f.exists() && f.canRead()) {
         LOG.info("Skipping download of remote edit log " +
             log + " since it already is stored locally at " + f);
         return;

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -681,9 +681,9 @@ public class MiniDFSCluster {
       sb.append("\tabsolute:").append(path.getAbsolutePath()).append("\n");
       sb.append("\tpermissions: ");
       sb.append(path.isDirectory() ? "d": "-");
-      sb.append(FileUtil.canRead(path) ? "r" : "-");
-      sb.append(FileUtil.canWrite(path) ? "w" : "-");
-      sb.append(FileUtil.canExecute(path) ? "x" : "-");
+      sb.append(path.canRead() ? "r" : "-");
+      sb.append(path.canWrite() ? "w" : "-");
+      sb.append(path.canExecute() ? "x" : "-");
       sb.append("\n");
       path = path.getParentFile();
     }

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java

@@ -31,7 +31,6 @@ import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.BlockReaderFactory;
@@ -92,10 +91,10 @@ public class TestDataNodeVolumeFailure {
   @After
   public void tearDown() throws Exception {
     if(data_fail != null) {
-      FileUtil.setWritable(data_fail, true);
+      data_fail.setWritable(true);
     }
     if(failedDir != null) {
-      FileUtil.setWritable(failedDir, true);
+      failedDir.setWritable(true);
     }
     if(cluster != null) {
       cluster.shutdown();

+ 12 - 13
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureReporting.java

@@ -31,7 +31,6 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
@@ -89,8 +88,8 @@ public class TestDataNodeVolumeFailureReporting {
   @After
   public void tearDown() throws Exception {
     for (int i = 0; i < 3; i++) {
-      FileUtil.setExecutable(new File(dataDir, "data"+(2*i+1)), true);
-      FileUtil.setExecutable(new File(dataDir, "data"+(2*i+2)), true);
+      new File(dataDir, "data"+(2*i+1)).setExecutable(true);
+      new File(dataDir, "data"+(2*i+2)).setExecutable(true);
     }
     cluster.shutdown();
   }
@@ -132,8 +131,8 @@ public class TestDataNodeVolumeFailureReporting {
      * fail. The client does not retry failed nodes even though
      * perhaps they could succeed because just a single volume failed.
      */
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, false));
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false));
+    assertTrue("Couldn't chmod local vol", dn1Vol1.setExecutable(false));
+    assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(false));
 
     /*
      * Create file1 and wait for 3 replicas (ie all DNs can still
@@ -169,7 +168,7 @@ public class TestDataNodeVolumeFailureReporting {
      * Now fail a volume on the third datanode. We should be able to get
      * three replicas since we've already identified the other failures.
      */
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol1, false));
+    assertTrue("Couldn't chmod local vol", dn3Vol1.setExecutable(false));
     Path file2 = new Path("/test2");
     DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L);
     DFSTestUtil.waitReplication(fs, file2, (short)3);
@@ -201,7 +200,7 @@ public class TestDataNodeVolumeFailureReporting {
      * and that it's no longer up. Only wait for two replicas since
      * we'll never get a third.
      */
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol2, false));
+    assertTrue("Couldn't chmod local vol", dn3Vol2.setExecutable(false));
     Path file3 = new Path("/test3");
     DFSTestUtil.createFile(fs, file3, 1024, (short)3, 1L);
     DFSTestUtil.waitReplication(fs, file3, (short)2);
@@ -223,10 +222,10 @@ public class TestDataNodeVolumeFailureReporting {
      * restart, so file creation should be able to succeed after
      * restoring the data directories and restarting the datanodes.
      */
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, true));
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, true));
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol1, true));
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn3Vol2, true));
+    assertTrue("Couldn't chmod local vol", dn1Vol1.setExecutable(true));
+    assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(true));
+    assertTrue("Couldn't chmod local vol", dn3Vol1.setExecutable(true));
+    assertTrue("Couldn't chmod local vol", dn3Vol2.setExecutable(true));
     cluster.restartDataNodes();
     cluster.waitActive();
     Path file4 = new Path("/test4");
@@ -262,8 +261,8 @@ public class TestDataNodeVolumeFailureReporting {
     // third healthy so one node in the pipeline will not fail). 
     File dn1Vol1 = new File(dataDir, "data"+(2*0+1));
     File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn1Vol1, false));
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false));
+    assertTrue("Couldn't chmod local vol", dn1Vol1.setExecutable(false));
+    assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(false));
 
     Path file1 = new Path("/test1");
     DFSTestUtil.createFile(fs, file1, 1024, (short)2, 1L);

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailureToleration.java

@@ -77,8 +77,8 @@ public class TestDataNodeVolumeFailureToleration {
   @After
   public void tearDown() throws Exception {
     for (int i = 0; i < 3; i++) {
-      FileUtil.setExecutable(new File(dataDir, "data"+(2*i+1)), true);
-      FileUtil.setExecutable(new File(dataDir, "data"+(2*i+2)), true);
+      new File(dataDir, "data"+(2*i+1)).setExecutable(true);
+      new File(dataDir, "data"+(2*i+2)).setExecutable(true);
     }
     cluster.shutdown();
   }
@@ -152,7 +152,7 @@ public class TestDataNodeVolumeFailureToleration {
 
     // Fail a volume on the 2nd DN
     File dn2Vol1 = new File(dataDir, "data"+(2*1+1));
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, false));
+    assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(false));
 
     // Should only get two replicas (the first DN and the 3rd)
     Path file1 = new Path("/test1");
@@ -165,7 +165,7 @@ public class TestDataNodeVolumeFailureToleration {
 
     // If we restore the volume we should still only be able to get
     // two replicas since the DN is still considered dead.
-    assertTrue("Couldn't chmod local vol", FileUtil.setExecutable(dn2Vol1, true));
+    assertTrue("Couldn't chmod local vol", dn2Vol1.setExecutable(true));
     Path file2 = new Path("/test2");
     DFSTestUtil.createFile(fs, file2, 1024, (short)3, 1L);
     DFSTestUtil.waitReplication(fs, file2, (short)2);

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java

@@ -27,7 +27,6 @@ import java.net.Socket;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -107,8 +106,8 @@ public class TestDiskError {
       }
     } finally {
       // restore its old permission
-      FileUtil.setWritable(dir1, true);
-      FileUtil.setWritable(dir2, true);
+      dir1.setWritable(true);
+      dir2.setWritable(true);
     }
   }
 

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java

@@ -157,7 +157,7 @@ public class TestCheckpoint {
       
       try {
         // Simulate the mount going read-only
-        FileUtil.setWritable(dir, false);
+        dir.setWritable(false);
         cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
             .format(false).build();
         fail("NN should have failed to start with " + dir + " set unreadable");
@@ -167,7 +167,7 @@ public class TestCheckpoint {
       } finally {
         cleanup(cluster);
         cluster = null;
-        FileUtil.setWritable(dir, true);
+        dir.setWritable(true);
       }
     }
   }
@@ -1825,7 +1825,7 @@ public class TestCheckpoint {
       StorageDirectory sd1 = storage.getStorageDir(1);
       
       currentDir = sd0.getCurrentDir();
-      FileUtil.setExecutable(currentDir, false);
+      currentDir.setExecutable(false);
 
       // Upload checkpoint when NN has a bad storage dir. This should
       // succeed and create the checkpoint in the good dir.
@@ -1835,7 +1835,7 @@ public class TestCheckpoint {
           new File(sd1.getCurrentDir(), NNStorage.getImageFileName(2)));
       
       // Restore the good dir
-      FileUtil.setExecutable(currentDir, true);
+      currentDir.setExecutable(true);
       nn.restoreFailedStorage("true");
       nn.rollEditLog();
 
@@ -1846,7 +1846,7 @@ public class TestCheckpoint {
       assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
     } finally {
       if (currentDir != null) {
-        FileUtil.setExecutable(currentDir, true);
+        currentDir.setExecutable(true);
       }
       cleanup(secondary);
       secondary = null;
@@ -1896,7 +1896,7 @@ public class TestCheckpoint {
       StorageDirectory sd0 = storage.getStorageDir(0);
       assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType());
       currentDir = sd0.getCurrentDir();
-      FileUtil.setExecutable(currentDir, false);
+      currentDir.setExecutable(false);
 
       // Try to upload checkpoint -- this should fail since there are no
       // valid storage dirs
@@ -1909,7 +1909,7 @@ public class TestCheckpoint {
       }
       
       // Restore the good dir
-      FileUtil.setExecutable(currentDir, true);
+      currentDir.setExecutable(true);
       nn.restoreFailedStorage("true");
       nn.rollEditLog();
 
@@ -1920,7 +1920,7 @@ public class TestCheckpoint {
       assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
     } finally {
       if (currentDir != null) {
-        FileUtil.setExecutable(currentDir, true);
+        currentDir.setExecutable(true);
       }
       cleanup(secondary);
       secondary = null;

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java

@@ -881,14 +881,14 @@ public class TestEditLog {
     logDir.mkdirs();
     FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir);
     try {
-      FileUtil.setWritable(logDir, false);
+      logDir.setWritable(false);
       log.openForWrite();
       fail("Did no throw exception on only having a bad dir");
     } catch (IOException ioe) {
       GenericTestUtils.assertExceptionContains(
           "too few journals successfully started", ioe);
     } finally {
-      FileUtil.setWritable(logDir, true);
+      logDir.setWritable(true);
       log.close();
     }
   }

+ 3 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java

@@ -28,7 +28,6 @@ import java.io.IOException;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -107,10 +106,10 @@ public class TestNNStorageRetentionFunctional {
           getInProgressEditsFileName(5));
       
       LOG.info("Failing first storage dir by chmodding it");
-      FileUtil.setExecutable(sd0, false);
+      sd0.setExecutable(false);
       doSaveNamespace(nn);      
       LOG.info("Restoring accessibility of first storage dir");      
-      FileUtil.setExecutable(sd0, true);
+      sd0.setExecutable(true);
 
       LOG.info("nothing should have been purged in first storage dir");
       assertGlobEquals(cd0, "fsimage_\\d*",
@@ -139,7 +138,7 @@ public class TestNNStorageRetentionFunctional {
       assertGlobEquals(cd0, "edits_.*",
           getInProgressEditsFileName(9));
     } finally {
-      FileUtil.setExecutable(sd0, true);
+      sd0.setExecutable(true);
 
       LOG.info("Shutting down...");
       if (cluster != null) {