Browse Source

HDFS-4207. All hadoop fs operations fail if the default fs is down even if a different fs is specified in the command. Contributed by Jing Zhao.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-1@1412229 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas 12 years ago
parent
commit
e54df1bb29

+ 3 - 0
CHANGES.txt

@@ -306,6 +306,9 @@ Release 1.2.0 - unreleased
     HADOOP-9036. Fix racy test case TestSinkQueue (Backport HADOOP-7292).
     (Luke Lu backport by suresh)
 
+    HDFS-4207. All hadoop fs operations fail if the default fs is down even if 
+    a different fs is specified in the command. (Jing Zhao via suresh)
+
 Release 1.1.1 - Unreleased
 
   INCOMPATIBLE CHANGES

+ 33 - 20
src/core/org/apache/hadoop/fs/FsShell.java

@@ -22,10 +22,11 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.net.URI;
-import java.text.DecimalFormat;
-import java.text.NumberFormat;
 import java.text.SimpleDateFormat;
-import java.util.*;
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.TimeZone;
 import java.util.zip.GZIPInputStream;
 
 import org.apache.hadoop.conf.Configuration;
@@ -41,14 +42,14 @@ import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import org.apache.hadoop.util.StringUtils;
 
 /** Provide command line access to a FileSystem. */
 public class FsShell extends Configured implements Tool {
 
-  protected FileSystem fs;
+  private FileSystem fs;
   private Trash trash;
   public static final SimpleDateFormat dateForm = 
     new SimpleDateFormat("yyyy-MM-dd HH:mm");
@@ -78,15 +79,22 @@ public class FsShell extends Configured implements Tool {
   
   protected void init() throws IOException {
     getConf().setQuietMode(true);
-    if (this.fs == null) {
-     this.fs = FileSystem.get(getConf());
+  }
+  
+  protected FileSystem getFS() throws IOException {
+    if (fs == null) {
+      fs = FileSystem.get(getConf());
     }
-    if (this.trash == null) {
-      this.trash = new Trash(getConf());
+    return fs;
+  }
+  
+  protected Trash getTrash() throws IOException {
+    if (trash == null) {
+      trash = new Trash(getConf());
     }
+    return trash;
   }
 
-  
   /**
    * Copies from stdin to the indicated file.
    */
@@ -360,7 +368,9 @@ public class FsShell extends Configured implements Tool {
     DataOutputBuffer outbuf;
 
     public TextRecordInputStream(FileStatus f) throws IOException {
-      r = new SequenceFile.Reader(fs, f.getPath(), getConf());
+      FileSystem pFS = f == null ? getFS() : f.getPath().getFileSystem(
+          getConf());
+      r = new SequenceFile.Reader(pFS, f.getPath(), getConf());
       key = ReflectionUtils.newInstance(r.getKeyClass().asSubclass(WritableComparable.class),
                                         getConf());
       val = ReflectionUtils.newInstance(r.getValueClass().asSubclass(Writable.class),
@@ -468,11 +478,12 @@ public class FsShell extends Configured implements Tool {
       System.out.flush();
 
       boolean printWarning = false;
-      FileStatus status = fs.getFileStatus(f);
+      FileSystem pFS = f.getFileSystem(getConf());
+      FileStatus status = pFS.getFileStatus(f);
       long len = status.getLen();
 
       for(boolean done = false; !done; ) {
-        BlockLocation[] locations = fs.getFileBlockLocations(status, 0, len);
+        BlockLocation[] locations = pFS.getFileBlockLocations(status, 0, len);
         int i = 0;
         for(; i < locations.length && 
           locations[i].getHosts().length == rep; i++)
@@ -973,9 +984,10 @@ public class FsShell extends Configured implements Tool {
     //
     if (argv.length > 3) {
       Path dst = new Path(dest);
-      if (!fs.isDirectory(dst)) {
-        throw new IOException("When copying multiple files, " 
-                              + "destination " + dest + " should be a directory.");
+      FileSystem pFS = dst.getFileSystem(conf);
+      if (!pFS.isDirectory(dst)) {
+        throw new IOException("When copying multiple files, " + "destination "
+            + dest + " should be a directory.");
       }
     }
     //
@@ -1081,15 +1093,15 @@ public class FsShell extends Configured implements Tool {
     }
   }
   private void expunge() throws IOException {
-    trash.expunge();
-    trash.checkpoint();
+    getTrash().expunge();
+    getTrash().checkpoint();
   }
 
   /**
    * Returns the Trash object associated with this shell.
    */
-  public Path getCurrentTrashDir() {
-    return trash.getCurrentTrashDir();
+  public Path getCurrentTrashDir() throws IOException {
+    return getTrash().getCurrentTrashDir();
   }
 
   /**
@@ -1783,6 +1795,7 @@ public class FsShell extends Configured implements Tool {
       } else if ("-chmod".equals(cmd) || 
                  "-chown".equals(cmd) ||
                  "-chgrp".equals(cmd)) {
+        // Here fs is not used
         exitCode = FsShellPermissions.changePermissions(fs, cmd, argv, i, this);
       } else if ("-ls".equals(cmd)) {
         if (i < argv.length) {

+ 77 - 104
src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java

@@ -30,7 +30,6 @@ import org.apache.hadoop.fs.shell.CommandFormat;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
@@ -255,73 +254,78 @@ public class DFSAdmin extends FsShell {
     super(conf);
   }
   
+  protected DistributedFileSystem getDFS() throws IOException {
+    FileSystem fs = getFS();
+    if (!(fs instanceof DistributedFileSystem)) {
+      throw new IllegalArgumentException("FileSystem " + fs.getUri()
+          + " is not a distributed file system");
+    }
+    return (DistributedFileSystem) fs;
+  }
+  
   /**
    * Gives a report on how the FileSystem is doing.
    * @exception IOException if the filesystem does not exist.
    */
   public void report() throws IOException {
-    if (fs instanceof DistributedFileSystem) {
-      DistributedFileSystem dfs = (DistributedFileSystem) fs;
-      DiskStatus ds = dfs.getDiskStatus();
-      long capacity = ds.getCapacity();
-      long used = ds.getDfsUsed();
-      long remaining = ds.getRemaining();
-      long presentCapacity = used + remaining;
-      boolean mode = dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET);
-      UpgradeStatusReport status = 
-                      dfs.distributedUpgradeProgress(UpgradeAction.GET_STATUS);
-
-      if (mode) {
-        System.out.println("Safe mode is ON");
-      }
-      if (status != null) {
-        System.out.println(status.getStatusText(false));
-      }
-      System.out.println("Configured Capacity: " + capacity
-                         + " (" + StringUtils.byteDesc(capacity) + ")");
-      System.out.println("Present Capacity: " + presentCapacity
-          + " (" + StringUtils.byteDesc(presentCapacity) + ")");
-      System.out.println("DFS Remaining: " + remaining
-          + " (" + StringUtils.byteDesc(remaining) + ")");
-      System.out.println("DFS Used: " + used
-                         + " (" + StringUtils.byteDesc(used) + ")");
-      System.out.println("DFS Used%: "
-                         + StringUtils.limitDecimalTo2(((1.0 * used) / presentCapacity) * 100)
-                         + "%");
-      
-      /* These counts are not always upto date. They are updated after  
-       * iteration of an internal list. Should be updated in a few seconds to 
-       * minutes. Use "-metaSave" to list of all such blocks and accurate 
-       * counts.
-       */
-      System.out.println("Under replicated blocks: " + 
-                         dfs.getUnderReplicatedBlocksCount());
-      System.out.println("Blocks with corrupt replicas: " + 
-                         dfs.getCorruptBlocksCount());
-      System.out.println("Missing blocks: " + 
-                         dfs.getMissingBlocksCount());
-                           
-      System.out.println();
+    DistributedFileSystem dfs = getDFS();
+    DiskStatus ds = dfs.getDiskStatus();
+    long capacity = ds.getCapacity();
+    long used = ds.getDfsUsed();
+    long remaining = ds.getRemaining();
+    long presentCapacity = used + remaining;
+    boolean mode = dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET);
+    UpgradeStatusReport status = dfs
+        .distributedUpgradeProgress(UpgradeAction.GET_STATUS);
+
+    if (mode) {
+      System.out.println("Safe mode is ON");
+    }
+    if (status != null) {
+      System.out.println(status.getStatusText(false));
+    }
+    System.out.println("Configured Capacity: " + capacity + " ("
+        + StringUtils.byteDesc(capacity) + ")");
+    System.out.println("Present Capacity: " + presentCapacity + " ("
+        + StringUtils.byteDesc(presentCapacity) + ")");
+    System.out.println("DFS Remaining: " + remaining + " ("
+        + StringUtils.byteDesc(remaining) + ")");
+    System.out.println("DFS Used: " + used + " (" + StringUtils.byteDesc(used)
+        + ")");
+    System.out.println("DFS Used%: "
+        + StringUtils.limitDecimalTo2(((1.0 * used) / presentCapacity) * 100)
+        + "%");
+
+    /*
+     * These counts are not always upto date. They are updated after iteration
+     * of an internal list. Should be updated in a few seconds to minutes. Use
+     * "-metaSave" to list of all such blocks and accurate counts.
+     */
+    System.out.println("Under replicated blocks: "
+        + dfs.getUnderReplicatedBlocksCount());
+    System.out.println("Blocks with corrupt replicas: "
+        + dfs.getCorruptBlocksCount());
+    System.out.println("Missing blocks: " + dfs.getMissingBlocksCount());
 
-      System.out.println("-------------------------------------------------");
-      
-      DatanodeInfo[] live = dfs.getClient().datanodeReport(
-                                                   DatanodeReportType.LIVE);
-      DatanodeInfo[] dead = dfs.getClient().datanodeReport(
-                                                   DatanodeReportType.DEAD);
-      System.out.println("Datanodes available: " + live.length +
-                         " (" + (live.length + dead.length) + " total, " + 
-                         dead.length + " dead)\n");
-      
-      for (DatanodeInfo dn : live) {
-        System.out.println(dn.getDatanodeReport());
-        System.out.println();
-      }
-      for (DatanodeInfo dn : dead) {
-        System.out.println(dn.getDatanodeReport());
-        System.out.println();
-      }      
+    System.out.println();
+
+    System.out.println("-------------------------------------------------");
+
+    DatanodeInfo[] live = dfs.getClient().datanodeReport(
+        DatanodeReportType.LIVE);
+    DatanodeInfo[] dead = dfs.getClient().datanodeReport(
+        DatanodeReportType.DEAD);
+    System.out.println("Datanodes available: " + live.length + " ("
+        + (live.length + dead.length) + " total, " + dead.length + " dead)\n");
+
+    for (DatanodeInfo dn : live) {
+      System.out.println(dn.getDatanodeReport());
+      System.out.println();
     }
+    for (DatanodeInfo dn : dead) {
+      System.out.println(dn.getDatanodeReport());
+      System.out.println();
+    }     
   }
 
   /**
@@ -332,10 +336,6 @@ public class DFSAdmin extends FsShell {
    * @exception IOException if the filesystem does not exist.
    */
   public void setSafeMode(String[] argv, int idx) throws IOException {
-    if (!(fs instanceof DistributedFileSystem)) {
-      System.err.println("FileSystem is " + fs.getUri());
-      return;
-    }
     if (idx != argv.length - 1) {
       printUsage("-safemode");
       return;
@@ -356,7 +356,7 @@ public class DFSAdmin extends FsShell {
       printUsage("-safemode");
       return;
     }
-    DistributedFileSystem dfs = (DistributedFileSystem) fs;
+    DistributedFileSystem dfs = getDFS();
     boolean inSafeMode = dfs.setSafeMode(action);
 
     //
@@ -386,12 +386,7 @@ public class DFSAdmin extends FsShell {
   public int saveNamespace() throws IOException {
     int exitCode = -1;
 
-    if (!(fs instanceof DistributedFileSystem)) {
-      System.err.println("FileSystem is " + fs.getUri());
-      return exitCode;
-    }
-
-    DistributedFileSystem dfs = (DistributedFileSystem) fs;
+    DistributedFileSystem dfs = getDFS();
     dfs.saveNamespace();
     exitCode = 0;
    
@@ -407,12 +402,7 @@ public class DFSAdmin extends FsShell {
   public int refreshNodes() throws IOException {
     int exitCode = -1;
 
-    if (!(fs instanceof DistributedFileSystem)) {
-      System.err.println("FileSystem is " + fs.getUri());
-      return exitCode;
-    }
-
-    DistributedFileSystem dfs = (DistributedFileSystem) fs;
+    DistributedFileSystem dfs = getDFS();
     dfs.refreshNodes();
     exitCode = 0;
    
@@ -440,12 +430,7 @@ public class DFSAdmin extends FsShell {
       return exitCode;
     }
 
-    if (!(fs instanceof DistributedFileSystem)) {
-      System.err.println("FileSystem is " + fs.getUri());
-      return exitCode;
-    }
-
-    DistributedFileSystem dfs = (DistributedFileSystem) fs;
+    DistributedFileSystem dfs = getDFS();
     dfs.setBalancerBandwidth(bandwidth);
     exitCode = 0;
    
@@ -595,18 +580,10 @@ public class DFSAdmin extends FsShell {
    * @exception IOException 
    */
   public int finalizeUpgrade() throws IOException {
-    int exitCode = -1;
-
-    if (!(fs instanceof DistributedFileSystem)) {
-      System.out.println("FileSystem is " + fs.getUri());
-      return exitCode;
-    }
-
-    DistributedFileSystem dfs = (DistributedFileSystem) fs;
+    DistributedFileSystem dfs = getDFS();
     dfs.finalizeUpgrade();
-    exitCode = 0;
    
-    return exitCode;
+    return 0;
   }
 
   /**
@@ -617,10 +594,6 @@ public class DFSAdmin extends FsShell {
    * @exception IOException 
    */
   public int upgradeProgress(String[] argv, int idx) throws IOException {
-    if (!(fs instanceof DistributedFileSystem)) {
-      System.out.println("FileSystem is " + fs.getUri());
-      return -1;
-    }
     if (idx != argv.length - 1) {
       printUsage("-upgradeProgress");
       return -1;
@@ -638,7 +611,7 @@ public class DFSAdmin extends FsShell {
       return -1;
     }
 
-    DistributedFileSystem dfs = (DistributedFileSystem) fs;
+    DistributedFileSystem dfs = getDFS();
     UpgradeStatusReport status = dfs.distributedUpgradeProgress(action);
     String statusText = (status == null ? 
         "There are no upgrades in progress." :
@@ -657,7 +630,7 @@ public class DFSAdmin extends FsShell {
    */
   public int metaSave(String[] argv, int idx) throws IOException {
     String pathname = argv[idx];
-    DistributedFileSystem dfs = (DistributedFileSystem) fs;
+    DistributedFileSystem dfs = getDFS();
     dfs.metaSave(pathname);
     System.out.println("Created file " + pathname + " on server " +
                        dfs.getUri());
@@ -936,13 +909,13 @@ public class DFSAdmin extends FsShell {
       } else if ("-metasave".equals(cmd)) {
         exitCode = metaSave(argv, i);
       } else if (ClearQuotaCommand.matches(cmd)) {
-        exitCode = new ClearQuotaCommand(argv, i, fs).runAll();
+        exitCode = new ClearQuotaCommand(argv, i, getDFS()).runAll();
       } else if (SetQuotaCommand.matches(cmd)) {
-        exitCode = new SetQuotaCommand(argv, i, fs).runAll();
+        exitCode = new SetQuotaCommand(argv, i, getDFS()).runAll();
       } else if (ClearSpaceQuotaCommand.matches(cmd)) {
-        exitCode = new ClearSpaceQuotaCommand(argv, i, fs).runAll();
+        exitCode = new ClearSpaceQuotaCommand(argv, i, getDFS()).runAll();
       } else if (SetSpaceQuotaCommand.matches(cmd)) {
-        exitCode = new SetSpaceQuotaCommand(argv, i, fs).runAll();
+        exitCode = new SetSpaceQuotaCommand(argv, i, getDFS()).runAll();
       } else if ("-refreshServiceAcl".equals(cmd)) {
         exitCode = refreshServiceAcl();
       } else if ("-refreshUserToGroupsMappings".equals(cmd)) {

+ 22 - 6
src/test/org/apache/hadoop/fs/TestFsShellReturnCode.java

@@ -18,15 +18,13 @@
 
 package org.apache.hadoop.fs;
 
+import static org.junit.Assert.assertTrue;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-
-import org.apache.hadoop.fs.FileSystem;
-
-import org.junit.Test;
 import org.junit.Assert;
-import static org.junit.Assert.assertTrue;
+import org.junit.Test;
 
 /**
  * This test validates that chmod, chown, chgrp returning correct exit codes
@@ -235,6 +233,24 @@ public class TestFsShellReturnCode {
     // Test 4: exit code for chgrp on existing path with globbed input is 0
     String argv4[] = { "-chgrp", "admin", f7 };
     verify(fs, "-chgrp", argv4, 1, fsShell, 0);
-
+  }
+  
+  @Test
+  public void testInvalidDefautlFS() throws Exception {
+    // if default fs doesn't exist or is invalid, but the path provided in
+    // arguments is valid - fsshell should work
+    FsShell shell = new FsShell();
+    Configuration conf = new Configuration();
+    conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY,
+        "hhhh://doesnotexist/");
+    shell.setConf(conf);
+    String[] args = new String[2];
+    args[0] = "-ls";
+    args[1] = "file:///"; // this is valid, so command should run
+    int res = shell.run(args);
+    System.out.println("res =" + res);
+    shell.setConf(conf);
+    int run = shell.run(args);
+    assertTrue("Return code should be 0", run == 0);
   }
 }

+ 16 - 0
src/test/org/apache/hadoop/hdfs/TestDFSShell.java

@@ -45,6 +45,7 @@ import org.apache.hadoop.fs.shell.Count;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset;
+import org.apache.hadoop.hdfs.tools.DFSAdmin;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
@@ -1393,4 +1394,19 @@ public class TestDFSShell extends TestCase {
     System.out.println("results:\n" + results);
     return results;
   }
+  
+    
+  /**
+   * default setting is file:// which is not a DFS so DFSAdmin should throw and
+   * catch InvalidArgumentException and return -1 exit code.
+   * 
+   * @throws Exception
+   */
+  public void testInvalidShell() throws Exception {
+    Configuration conf = new Configuration(); // default FS (non-DFS)
+    DFSAdmin admin = new DFSAdmin();
+    admin.setConf(conf);
+    int res = admin.run(new String[] { "-refreshNodes" });
+    assertEquals("expected to fail -1", res, -1);
+  }
 }