1
0
Просмотр исходного кода

Merge trunk into branch

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-3077@1373573 13f79535-47bb-0310-9956-ffa450edef68
Todd Lipcon 12 лет назад
Родитель
Сommit
d5abe22844
66 измененных файлов с 1011 добавлено и 263 удалено
  1. 9 1
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 12 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java
  3. 12 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java
  4. 40 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HasFileDescriptor.java
  5. 7 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
  6. 6 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
  7. 3 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
  8. 6 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java
  9. 10 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ExitUtil.java
  10. 10 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
  11. 11 5
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java
  12. 11 3
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java
  13. 30 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  14. 108 0
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java
  15. 10 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
  16. 25 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  17. 4 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  18. 11 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
  19. 7 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
  20. 68 10
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
  21. 24 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
  22. 15 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
  23. 16 10
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
  24. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
  25. 3 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
  26. 4 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java
  27. 33 22
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  28. 14 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
  29. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
  30. 16 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java
  31. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
  32. 5 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  33. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
  34. 90 74
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
  35. 23 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
  36. 4 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java
  37. 4 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java
  38. 32 25
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
  39. 9 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
  40. 4 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java
  41. 7 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java
  42. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/test/test_fuse_dfs.c
  43. 17 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
  44. 47 12
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
  45. 65 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
  46. 0 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
  47. 24 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
  48. 0 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java
  49. 23 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
  50. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java
  51. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
  52. 6 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
  53. 34 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
  54. 5 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
  55. 6 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
  56. 6 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java
  57. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
  58. 6 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
  59. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java
  60. 7 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java
  61. 7 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java
  62. 11 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java
  63. 3 0
      hadoop-mapreduce-project/CHANGES.txt
  64. 10 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java
  65. 18 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java
  66. 1 1
      hadoop-project/pom.xml

+ 9 - 1
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -213,7 +213,10 @@ Branch-2 ( Unreleased changes )
 
     HADOOP-8644. AuthenticatedURL should be able to use SSLFactory. (tucu)
 
-    HADOOP-8681. add support for HTTPS to the web UIs. (tucu)
+    HADOOP-8581. add support for HTTPS to the web UIs. (tucu)
+
+    HADOOP-7754. Expose file descriptors from Hadoop-wrapped local 
+    FileSystems (todd and ahmed via tucu)
 
   IMPROVEMENTS
 
@@ -284,6 +287,8 @@ Branch-2 ( Unreleased changes )
     HADOOP-8620. Add -Drequire.fuse and -Drequire.snappy. (Colin
     Patrick McCabe via eli)
 
+    HADOOP-8687. Upgrade log4j to 1.2.17. (eli)
+
   BUG FIXES
 
     HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname
@@ -418,6 +423,9 @@ Branch-2 ( Unreleased changes )
 
     HADOOP-8660. TestPseudoAuthenticator failing with NPE. (tucu)
 
+    HADOOP-8699. some common testcases create core-site.xml in test-classes 
+    making other testcases to fail. (tucu)
+
 Release 2.0.0-alpha - 05-23-2012
 
   INCOMPATIBLE CHANGES

+ 12 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java

@@ -18,6 +18,8 @@
 package org.apache.hadoop.fs;
 
 import java.io.BufferedInputStream;
+import java.io.FileDescriptor;
+import java.io.FileInputStream;
 import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -31,7 +33,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
 public class BufferedFSInputStream extends BufferedInputStream
-implements Seekable, PositionedReadable {
+implements Seekable, PositionedReadable, HasFileDescriptor {
   /**
    * Creates a <code>BufferedFSInputStream</code>
    * with the specified buffer size,
@@ -97,4 +99,13 @@ implements Seekable, PositionedReadable {
   public void readFully(long position, byte[] buffer) throws IOException {
     ((FSInputStream)in).readFully(position, buffer);
   }
+
+  @Override
+  public FileDescriptor getFileDescriptor() throws IOException {
+    if (in instanceof HasFileDescriptor) {
+      return ((HasFileDescriptor) in).getFileDescriptor();
+    } else {
+      return null;
+    }
+  }
 }

+ 12 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataInputStream.java

@@ -28,7 +28,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class FSDataInputStream extends DataInputStream
-    implements Seekable, PositionedReadable, Closeable, ByteBufferReadable {
+    implements Seekable, PositionedReadable, Closeable, ByteBufferReadable, HasFileDescriptor {
 
   public FSDataInputStream(InputStream in)
     throws IOException {
@@ -125,4 +125,15 @@ public class FSDataInputStream extends DataInputStream
 
     throw new UnsupportedOperationException("Byte-buffer read unsupported by input stream");
   }
+
+  @Override
+  public FileDescriptor getFileDescriptor() throws IOException {
+    if (in instanceof HasFileDescriptor) {
+      return ((HasFileDescriptor) in).getFileDescriptor();
+    } else if (in instanceof FileInputStream) {
+      return ((FileInputStream) in).getFD();
+    } else {
+      return null;
+    }
+  }
 }

+ 40 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HasFileDescriptor.java

@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs;
+
+import java.io.FileDescriptor;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * Having a FileDescriptor
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface HasFileDescriptor {
+
+  /**
+   * @return the FileDescriptor
+   * @throws IOException
+   */
+  public FileDescriptor getFileDescriptor() throws IOException;
+
+}

+ 7 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java

@@ -26,6 +26,7 @@ import java.io.FileNotFoundException;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
+import java.io.FileDescriptor;
 import java.net.URI;
 import java.nio.ByteBuffer;
 import java.util.Arrays;
@@ -111,7 +112,7 @@ public class RawLocalFileSystem extends FileSystem {
   /*******************************************************
    * For open()'s FSInputStream.
    *******************************************************/
-  class LocalFSFileInputStream extends FSInputStream {
+  class LocalFSFileInputStream extends FSInputStream implements HasFileDescriptor {
     private FileInputStream fis;
     private long position;
 
@@ -181,6 +182,11 @@ public class RawLocalFileSystem extends FileSystem {
       }
       return value;
     }
+
+    @Override
+    public FileDescriptor getFileDescriptor() throws IOException {
+      return fis.getFD();
+    }
   }
   
   public FSDataInputStream open(Path f, int bufferSize) throws IOException {

+ 6 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java

@@ -88,7 +88,7 @@ public abstract class HAAdmin extends Configured implements Tool {
 
   /** Output stream for errors, for use in tests */
   protected PrintStream errOut = System.err;
-  PrintStream out = System.out;
+  protected PrintStream out = System.out;
   private RequestSource requestSource = RequestSource.REQUEST_BY_USER;
 
   protected abstract HAServiceTarget resolveTarget(String string);
@@ -439,7 +439,10 @@ public abstract class HAAdmin extends Configured implements Tool {
   }
   
   private int help(String[] argv) {
-    if (argv.length != 2) {
+    if (argv.length == 1) { // only -help
+      printUsage(out);
+      return 0;
+    } else if (argv.length != 2) {
       printUsage(errOut, "-help");
       return -1;
     }
@@ -454,7 +457,7 @@ public abstract class HAAdmin extends Configured implements Tool {
       return -1;
     }
     
-    errOut.println(cmd + " [" + usageInfo.args + "]: " + usageInfo.help);
+    out.println(cmd + " [" + usageInfo.args + "]: " + usageInfo.help);
     return 0;
   }
   

+ 3 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java

@@ -80,6 +80,8 @@ public abstract class ZKFailoverController {
     ZK_AUTH_KEY
   };
   
+  protected static final String USAGE = 
+      "Usage: java zkfc [ -formatZK [-force] [-nonInteractive] ]";
 
   /** Unable to format the parent znode in ZK */
   static final int ERR_CODE_FORMAT_DENIED = 2;
@@ -248,8 +250,7 @@ public abstract class ZKFailoverController {
   }
 
   private void printUsage() {
-    System.err.println("Usage: " + this.getClass().getSimpleName() +
-        " [-formatZK [-force | -nonInteractive]]");
+    System.err.println(USAGE + "\n");
   }
 
   private int formatZK(boolean force, boolean interactive)

+ 6 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpConfig.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.http;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -37,6 +38,11 @@ public class HttpConfig {
         CommonConfigurationKeysPublic.HADOOP_SSL_ENABLED_DEFAULT);
   }
 
+  @VisibleForTesting
+  static void setSecure(boolean secure) {
+    sslEnabled = secure;
+  }
+
   public static boolean isSecure() {
     return sslEnabled;
   }

+ 10 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ExitUtil.java

@@ -73,6 +73,15 @@ public final class ExitUtil {
     firstExitException = null;
   }
 
+  /**
+   * Reset the tracking of process termination. This is for use
+   * in unit tests where one test in the suite expects an exit
+   * but others do not.
+   */
+  public static void resetFirstExitException() {
+    firstExitException = null;
+  }
+
   /**
    * Terminate the current process. Note that terminate is the *only* method
    * that should be used to terminate the daemon processes.
@@ -112,4 +121,4 @@ public final class ExitUtil {
   public static void terminate(int status) throws ExitException {
     terminate(status, "ExitException");
   }
-}
+}

+ 10 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java

@@ -248,4 +248,14 @@ public class TestLocalFileSystem {
     }
     assertEquals(1, fileSchemeCount);
   }
+
+  public void testHasFileDescriptor() throws IOException {
+    Configuration conf = new Configuration();
+    LocalFileSystem fs = FileSystem.getLocal(conf);
+    Path path = new Path(TEST_ROOT_DIR, "test-file");
+    writeFile(fs, path, 1);
+    BufferedFSInputStream bis = new BufferedFSInputStream(
+        new RawLocalFileSystem().new LocalFSFileInputStream(path), 1024);
+    assertNotNull(bis.getFileDescriptor());
+  }
 }

+ 11 - 5
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java

@@ -40,7 +40,9 @@ public class TestHAAdmin {
   
   private HAAdmin tool;
   private ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream();
+  private ByteArrayOutputStream outBytes = new ByteArrayOutputStream();
   private String errOutput;
+  private String output;
 
   @Before
   public void setup() throws IOException {
@@ -53,12 +55,14 @@ public class TestHAAdmin {
     };
     tool.setConf(new Configuration());
     tool.errOut = new PrintStream(errOutBytes);
+    tool.out = new PrintStream(outBytes);
   }
   
   private void assertOutputContains(String string) {
-    if (!errOutput.contains(string)) {
-      fail("Expected output to contain '" + string + "' but was:\n" +
-          errOutput);
+    if (!errOutput.contains(string) && !output.contains(string)) {
+      fail("Expected output to contain '" + string + 
+          "' but err_output was:\n" + errOutput + 
+          "\n and output was: \n" + output);
     }
   }
   
@@ -88,17 +92,19 @@ public class TestHAAdmin {
 
   @Test
   public void testHelp() throws Exception {
-    assertEquals(-1, runTool("-help"));
+    assertEquals(0, runTool("-help"));
     assertEquals(0, runTool("-help", "transitionToActive"));
     assertOutputContains("Transitions the service into Active");
   }
 
   private Object runTool(String ... args) throws Exception {
     errOutBytes.reset();
+    outBytes.reset();
     LOG.info("Running: HAAdmin " + Joiner.on(" ").join(args));
     int ret = tool.run(args);
     errOutput = new String(errOutBytes.toByteArray(), Charsets.UTF_8);
-    LOG.info("Output:\n" + errOutput);
+    output = new String(outBytes.toByteArray(), Charsets.UTF_8);
+    LOG.info("Err_output:\n" + errOutput + "\nOutput:\n" + output);
     return ret;
   }
 }

+ 11 - 3
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestSSLHttpServer.java

@@ -41,6 +41,8 @@ import java.net.URL;
  * corresponding HTTPS URL.
  */
 public class TestSSLHttpServer extends HttpServerFunctionalTest {
+  private static final String CONFIG_SITE_XML = "sslhttpserver-site.xml";
+
   private static final String BASEDIR =
       System.getProperty("test.build.dir", "target/test-dir") + "/" +
       TestSSLHttpServer.class.getSimpleName();
@@ -49,8 +51,10 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
   private static HttpServer server;
   private static URL baseUrl;
 
+
   @Before
   public void setup() throws Exception {
+    HttpConfig.setSecure(true);
     File base = new File(BASEDIR);
     FileUtil.fullyDelete(base);
     base.mkdirs();
@@ -66,11 +70,12 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
     //we do this trick because the MR AppMaster is started in another VM and
     //the HttpServer configuration is not loaded from the job.xml but from the
     //site.xml files in the classpath
-    Writer writer = new FileWriter(classpathDir + "/core-site.xml");
+    Writer writer = new FileWriter(new File(classpathDir, CONFIG_SITE_XML));
     conf.writeXml(writer);
     writer.close();
 
     conf.setInt(HttpServer.HTTP_MAX_THREADS, 10);
+    conf.addResource(CONFIG_SITE_XML);
     server = createServer("test", conf);
     server.addServlet("echo", "/echo", TestHttpServer.EchoServlet.class);
     server.start();
@@ -83,7 +88,8 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
     server.stop();
     String classpathDir =
         KeyStoreTestUtil.getClasspathDir(TestSSLHttpServer.class);
-    new File(classpathDir + "/core-site.xml").delete();
+    new File(classpathDir, CONFIG_SITE_XML).delete();
+    HttpConfig.setSecure(false);
   }
   
 
@@ -98,7 +104,9 @@ public class TestSSLHttpServer extends HttpServerFunctionalTest {
   private static String readOut(URL url) throws Exception {
     StringBuilder out = new StringBuilder();
     HttpsURLConnection conn = (HttpsURLConnection) url.openConnection();
-    SSLFactory sslf = new SSLFactory(SSLFactory.Mode.CLIENT, new Configuration());
+    Configuration conf = new Configuration();
+    conf.addResource(CONFIG_SITE_XML);
+    SSLFactory sslf = new SSLFactory(SSLFactory.Mode.CLIENT, conf);
     sslf.init();
     conn.setSSLSocketFactory(sslf.createSSLSocketFactory());
     InputStream in = conn.getInputStream();

+ 30 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -114,6 +114,12 @@ Trunk (unreleased changes)
     HDFS-3789. JournalManager#format() should be able to throw IOException
     (Ivan Kelly via todd)
 
+    HDFS-3723. Add support -h, -help to all the commands. (Jing Zhao via
+    suresh)
+
+    HDFS-3803. Change BlockPoolSliceScanner chatty INFO log to DEBUG.
+    (Andrew Purtell via suresh)
+
   OPTIMIZATIONS
 
   BUG FIXES
@@ -183,6 +189,8 @@ Trunk (unreleased changes)
     HDFS-3625. Fix TestBackupNode by properly initializing edit log during
     startup. (Junping Du via todd)
 
+    HDFS-3792. Fix two findbugs introduced by HDFS-3695 (todd)
+
 Branch-2 ( Unreleased changes )
 
   INCOMPATIBLE CHANGES
@@ -209,6 +217,8 @@ Branch-2 ( Unreleased changes )
 
     HDFS-3637. Add support for encrypting the DataTransferProtocol. (atm)
 
+    HDFS-3150. Add option for clients to contact DNs via hostname. (eli)
+
   IMPROVEMENTS
 
     HDFS-3390. DFSAdmin should print full stack traces of errors when DEBUG
@@ -381,6 +391,14 @@ Branch-2 ( Unreleased changes )
 
     HDFS-3276. initializeSharedEdits should have a -nonInteractive flag (todd)
 
+    HDFS-3765. namenode -initializeSharedEdits should be able to initialize
+    all shared storages. (Vinay and todd via todd)
+
+    HDFS-3802. StartupOption.name in HdfsServerConstants should be final.
+    (Jing Zhao via szetszwo)
+
+    HDFS-3796. Speed up edit log tests by avoiding fsync() (todd)
+
   OPTIMIZATIONS
 
     HDFS-2982. Startup performance suffers when there are many edit log
@@ -587,6 +605,15 @@ Branch-2 ( Unreleased changes )
     IOExceptions of stream closures can mask root exceptions. (Uma Maheswara
     Rao G via szetszwo)
 
+    HDFS-3790. test_fuse_dfs.c doesn't compile on centos 5. (Colin Patrick
+    McCabe via atm)
+
+    HDFS-3658. Fix bugs in TestDFSClientRetries and add more tests.  (szetszwo)
+
+    HDFS-3794. WebHDFS OPEN returns the incorrect Content-Length in the HTTP
+    header when offset is specified and length is omitted.
+    (Ravi Prakash via szetszwo)
+
   BREAKDOWN OF HDFS-3042 SUBTASKS
 
     HDFS-2185. HDFS portion of ZK-based FailoverController (todd)
@@ -1462,6 +1489,9 @@ Release 0.23.3 - UNRELEASED
 
     HDFS-3553. Hftp proxy tokens are broken (daryn)
 
+    HDFS-3718. Datanode won't shutdown because of runaway DataBlockScanner
+    thread (Kihwal Lee via daryn)
+
 Release 0.23.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 108 - 0
hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/test/java/org/apache/hadoop/contrib/bkjournal/TestBookKeeperAsHASharedDir.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.contrib.bkjournal;
 
 import static org.junit.Assert.*;
+
 import org.junit.Test;
 import org.junit.Before;
 import org.junit.After;
@@ -25,6 +26,9 @@ import org.junit.BeforeClass;
 import org.junit.AfterClass;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.ha.ServiceFailedException;
+import org.apache.hadoop.ha.HAServiceProtocol.RequestSource;
+import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 
 import org.apache.hadoop.hdfs.HAUtil;
@@ -35,12 +39,16 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogTestUtil;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 
 import org.apache.hadoop.ipc.RemoteException;
 
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.ExitUtil.ExitException;
 
 import org.apache.bookkeeper.proto.BookieServer;
@@ -48,7 +56,9 @@ import org.apache.bookkeeper.proto.BookieServer;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
+import java.io.File;
 import java.io.IOException;
+import java.net.URISyntaxException;
 
 /**
  * Integration test to ensure that the BookKeeper JournalManager
@@ -67,6 +77,11 @@ public class TestBookKeeperAsHASharedDir {
     bkutil = new BKJMUtil(numBookies);
     bkutil.start();
   }
+  
+  @Before
+  public void clearExitStatus() {
+    ExitUtil.resetFirstExitException();
+  }
 
   @AfterClass
   public static void teardownBookkeeper() throws Exception {
@@ -244,4 +259,97 @@ public class TestBookKeeperAsHASharedDir {
       }
     }
   }
+  
+  /**
+   * Use NameNode INTIALIZESHAREDEDITS to initialize the shared edits. i.e. copy
+   * the edits log segments to new bkjm shared edits.
+   * 
+   * @throws Exception
+   */
+  @Test
+  public void testInitializeBKSharedEdits() throws Exception {
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new Configuration();
+      HAUtil.setAllowStandbyReads(conf, true);
+      conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
+
+      MiniDFSNNTopology topology = MiniDFSNNTopology.simpleHATopology();
+      cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology)
+          .numDataNodes(0).build();
+      cluster.waitActive();
+      // Shutdown and clear the current filebased shared dir.
+      cluster.shutdownNameNodes();
+      File shareddir = new File(cluster.getSharedEditsDir(0, 1));
+      assertTrue("Initial Shared edits dir not fully deleted",
+          FileUtil.fullyDelete(shareddir));
+
+      // Check namenodes should not start without shared dir.
+      assertCanNotStartNamenode(cluster, 0);
+      assertCanNotStartNamenode(cluster, 1);
+
+      // Configure bkjm as new shared edits dir in both namenodes
+      Configuration nn1Conf = cluster.getConfiguration(0);
+      Configuration nn2Conf = cluster.getConfiguration(1);
+      nn1Conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
+          .createJournalURI("/initializeSharedEdits").toString());
+      nn2Conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil
+          .createJournalURI("/initializeSharedEdits").toString());
+      BKJMUtil.addJournalManagerDefinition(nn1Conf);
+      BKJMUtil.addJournalManagerDefinition(nn2Conf);
+
+      // Initialize the BKJM shared edits.
+      assertFalse(NameNode.initializeSharedEdits(nn1Conf));
+
+      // NameNode should be able to start and should be in sync with BKJM as
+      // shared dir
+      assertCanStartHANameNodes(cluster, conf, "/testBKJMInitialize");
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+
+  private void assertCanNotStartNamenode(MiniDFSCluster cluster, int nnIndex) {
+    try {
+      cluster.restartNameNode(nnIndex, false);
+      fail("Should not have been able to start NN" + (nnIndex)
+          + " without shared dir");
+    } catch (IOException ioe) {
+      LOG.info("Got expected exception", ioe);
+      GenericTestUtils.assertExceptionContains(
+          "Cannot start an HA namenode with name dirs that need recovery", ioe);
+    }
+  }
+
+  private void assertCanStartHANameNodes(MiniDFSCluster cluster,
+      Configuration conf, String path) throws ServiceFailedException,
+      IOException, URISyntaxException, InterruptedException {
+    // Now should be able to start both NNs. Pass "false" here so that we don't
+    // try to waitActive on all NNs, since the second NN doesn't exist yet.
+    cluster.restartNameNode(0, false);
+    cluster.restartNameNode(1, true);
+
+    // Make sure HA is working.
+    cluster
+        .getNameNode(0)
+        .getRpcServer()
+        .transitionToActive(
+            new StateChangeRequestInfo(RequestSource.REQUEST_BY_USER));
+    FileSystem fs = null;
+    try {
+      Path newPath = new Path(path);
+      fs = HATestUtil.configureFailoverFs(cluster, conf);
+      assertTrue(fs.mkdirs(newPath));
+      HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
+          cluster.getNameNode(1));
+      assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
+          newPath.toString(), false).isDir());
+    } finally {
+      if (fs != null) {
+        fs.close();
+      }
+    }
+  }
 }

+ 10 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java

@@ -86,11 +86,11 @@ class BlockReaderLocal implements BlockReader {
     }
 
     private synchronized ClientDatanodeProtocol getDatanodeProxy(
-        DatanodeInfo node, Configuration conf, int socketTimeout)
-        throws IOException {
+        DatanodeInfo node, Configuration conf, int socketTimeout,
+        boolean connectToDnViaHostname) throws IOException {
       if (proxy == null) {
         proxy = DFSUtil.createClientDatanodeProtocolProxy(node, conf,
-            socketTimeout);
+            socketTimeout, connectToDnViaHostname);
       }
       return proxy;
     }
@@ -156,14 +156,16 @@ class BlockReaderLocal implements BlockReader {
    */
   static BlockReaderLocal newBlockReader(Configuration conf, String file,
       ExtendedBlock blk, Token<BlockTokenIdentifier> token, DatanodeInfo node,
-      int socketTimeout, long startOffset, long length) throws IOException {
+      int socketTimeout, long startOffset, long length,
+      boolean connectToDnViaHostname) throws IOException {
 
     LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node
         .getIpcPort());
     // check the cache first
     BlockLocalPathInfo pathinfo = localDatanodeInfo.getBlockLocalPathInfo(blk);
     if (pathinfo == null) {
-      pathinfo = getBlockPathInfo(blk, node, conf, socketTimeout, token);
+      pathinfo = getBlockPathInfo(blk, node, conf, socketTimeout, token,
+          connectToDnViaHostname);
     }
 
     // check to see if the file exists. It may so happen that the
@@ -241,11 +243,12 @@ class BlockReaderLocal implements BlockReader {
   
   private static BlockLocalPathInfo getBlockPathInfo(ExtendedBlock blk,
       DatanodeInfo node, Configuration conf, int timeout,
-      Token<BlockTokenIdentifier> token) throws IOException {
+      Token<BlockTokenIdentifier> token, boolean connectToDnViaHostname)
+          throws IOException {
     LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node.getIpcPort());
     BlockLocalPathInfo pathinfo = null;
     ClientDatanodeProtocol proxy = localDatanodeInfo.getDatanodeProxy(node,
-        conf, timeout);
+        conf, timeout, connectToDnViaHostname);
     try {
       // make RPC to local datanode to find local pathnames of blocks
       pathinfo = proxy.getBlockLocalPathInfo(blk, token);

+ 25 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -49,6 +49,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_DEF
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
 
 import java.io.BufferedOutputStream;
 import java.io.DataInputStream;
@@ -213,6 +215,7 @@ public class DFSClient implements java.io.Closeable {
     final String taskId;
     final FsPermission uMask;
     final boolean useLegacyBlockReader;
+    final boolean connectToDnViaHostname;
 
     Conf(Configuration conf) {
       maxFailoverAttempts = conf.getInt(
@@ -263,6 +266,8 @@ public class DFSClient implements java.io.Closeable {
       useLegacyBlockReader = conf.getBoolean(
           DFS_CLIENT_USE_LEGACY_BLOCKREADER,
           DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT);
+      connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME,
+          DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
     }
 
     private int getChecksumType(Configuration conf) {
@@ -473,6 +478,14 @@ public class DFSClient implements java.io.Closeable {
     return clientName;
   }
 
+  /**
+   * @return whether the client should use hostnames instead of IPs
+   *    when connecting to DataNodes
+   */
+  boolean connectToDnViaHostname() {
+    return dfsClientConf.connectToDnViaHostname;
+  }
+
   void checkOpen() throws IOException {
     if (!clientRunning) {
       IOException result = new IOException("Filesystem closed");
@@ -729,12 +742,12 @@ public class DFSClient implements java.io.Closeable {
    */
   static BlockReader getLocalBlockReader(Configuration conf,
       String src, ExtendedBlock blk, Token<BlockTokenIdentifier> accessToken,
-      DatanodeInfo chosenNode, int socketTimeout, long offsetIntoBlock)
-      throws InvalidToken, IOException {
+      DatanodeInfo chosenNode, int socketTimeout, long offsetIntoBlock,
+      boolean connectToDnViaHostname) throws InvalidToken, IOException {
     try {
       return BlockReaderLocal.newBlockReader(conf, src, blk, accessToken,
           chosenNode, socketTimeout, offsetIntoBlock, blk.getNumBytes()
-              - offsetIntoBlock);
+              - offsetIntoBlock, connectToDnViaHostname);
     } catch (RemoteException re) {
       throw re.unwrapRemoteException(InvalidToken.class,
           AccessControlException.class);
@@ -1425,7 +1438,8 @@ public class DFSClient implements java.io.Closeable {
   public MD5MD5CRC32FileChecksum getFileChecksum(String src) throws IOException {
     checkOpen();
     return getFileChecksum(src, namenode, socketFactory,
-        dfsClientConf.socketTimeout, getDataEncryptionKey());
+        dfsClientConf.socketTimeout, getDataEncryptionKey(),
+        dfsClientConf.connectToDnViaHostname);
   }
   
   @InterfaceAudience.Private
@@ -1471,7 +1485,8 @@ public class DFSClient implements java.io.Closeable {
    */
   public static MD5MD5CRC32FileChecksum getFileChecksum(String src,
       ClientProtocol namenode, SocketFactory socketFactory, int socketTimeout,
-      DataEncryptionKey encryptionKey) throws IOException {
+      DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
+      throws IOException {
     //get all block locations
     LocatedBlocks blockLocations = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE);
     if (null == blockLocations) {
@@ -1509,9 +1524,11 @@ public class DFSClient implements java.io.Closeable {
         try {
           //connect to a datanode
           sock = socketFactory.createSocket();
-          NetUtils.connect(sock,
-              NetUtils.createSocketAddr(datanodes[j].getXferAddr()),
-              timeout);
+          String dnAddr = datanodes[j].getXferAddr(connectToDnViaHostname);
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Connecting to datanode " + dnAddr);
+          }
+          NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
           sock.setSoTimeout(timeout);
 
           OutputStream unbufOut = NetUtils.getOutputStream(sock);

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -52,6 +52,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_DEFAULT = "DEFAULT";
   public static final String  DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY = "dfs.client.socketcache.capacity";
   public static final int     DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT = 16;
+  public static final String  DFS_CLIENT_USE_DN_HOSTNAME = "dfs.client.use.datanode.hostname";
+  public static final boolean DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT = false;
 
   // HA related configuration
   public static final String  DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX = "dfs.client.failover.proxy.provider";
@@ -81,6 +83,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final boolean DFS_DATANODE_SYNC_BEHIND_WRITES_DEFAULT = false;
   public static final String  DFS_DATANODE_DROP_CACHE_BEHIND_READS_KEY = "dfs.datanode.drop.cache.behind.reads";
   public static final boolean DFS_DATANODE_DROP_CACHE_BEHIND_READS_DEFAULT = false;
+  public static final String  DFS_DATANODE_USE_DN_HOSTNAME = "dfs.datanode.use.datanode.hostname";
+  public static final boolean DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT = false;
 
   public static final String  DFS_NAMENODE_HTTP_PORT_KEY = "dfs.http.port";
   public static final int     DFS_NAMENODE_HTTP_PORT_DEFAULT = 50070;

+ 11 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java

@@ -199,7 +199,8 @@ public class DFSInputStream extends FSInputStream implements ByteBufferReadable
       
       try {
         cdp = DFSUtil.createClientDatanodeProtocolProxy(
-        datanode, dfsClient.conf, dfsClient.getConf().socketTimeout, locatedblock);
+            datanode, dfsClient.conf, dfsClient.getConf().socketTimeout,
+            dfsClient.getConf().connectToDnViaHostname, locatedblock);
         
         final long n = cdp.getReplicaVisibleLength(locatedblock.getBlock());
         
@@ -716,8 +717,12 @@ public class DFSInputStream extends FSInputStream implements ByteBufferReadable
       DatanodeInfo[] nodes = block.getLocations();
       try {
         DatanodeInfo chosenNode = bestNode(nodes, deadNodes);
-        InetSocketAddress targetAddr = 
-          NetUtils.createSocketAddr(chosenNode.getXferAddr());
+        final String dnAddr =
+            chosenNode.getXferAddr(dfsClient.connectToDnViaHostname());
+        if (DFSClient.LOG.isDebugEnabled()) {
+          DFSClient.LOG.debug("Connecting to datanode " + dnAddr);
+        }
+        InetSocketAddress targetAddr = NetUtils.createSocketAddr(dnAddr);
         return new DNAddrPair(chosenNode, targetAddr);
       } catch (IOException ie) {
         String blockInfo = block.getBlock() + " file=" + src;
@@ -875,7 +880,8 @@ public class DFSInputStream extends FSInputStream implements ByteBufferReadable
     
     if (dfsClient.shouldTryShortCircuitRead(dnAddr)) {
       return DFSClient.getLocalBlockReader(dfsClient.conf, src, block,
-          blockToken, chosenNode, dfsClient.hdfsTimeout, startOffset);
+          blockToken, chosenNode, dfsClient.hdfsTimeout, startOffset,
+          dfsClient.connectToDnViaHostname());
     }
     
     IOException err = null;
@@ -1183,7 +1189,7 @@ public class DFSInputStream extends FSInputStream implements ByteBufferReadable
     throw new IOException("No live nodes contain current block");
   }
 
-  /** Utility class to encapsulate data node info and its ip address. */
+  /** Utility class to encapsulate data node info and its address. */
   static class DNAddrPair {
     DatanodeInfo info;
     InetSocketAddress addr;

+ 7 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java

@@ -1100,7 +1100,7 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
           if (ie instanceof InvalidEncryptionKeyException && refetchEncryptionKey > 0) {
             DFSClient.LOG.info("Will fetch a new encryption key and retry, " 
                 + "encryption key was invalid when connecting to "
-                + nodes[0].getXferAddr() + " : " + ie);
+                + nodes[0] + " : " + ie);
             // The encryption key used is invalid.
             refetchEncryptionKey--;
             dfsClient.clearDataEncryptionKey();
@@ -1112,7 +1112,8 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
           // find the datanode that matches
           if (firstBadLink.length() != 0) {
             for (int i = 0; i < nodes.length; i++) {
-              if (nodes[i].getXferAddr().equals(firstBadLink)) {
+              // NB: Unconditionally using the xfer addr w/o hostname
+              if (firstBadLink.equals(nodes[i].getXferAddr())) {
                 errorIndex = i;
                 break;
               }
@@ -1216,11 +1217,11 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
    */
   static Socket createSocketForPipeline(final DatanodeInfo first,
       final int length, final DFSClient client) throws IOException {
-    if(DFSClient.LOG.isDebugEnabled()) {
-      DFSClient.LOG.debug("Connecting to datanode " + first);
+    final String dnAddr = first.getXferAddr(client.connectToDnViaHostname());
+    if (DFSClient.LOG.isDebugEnabled()) {
+      DFSClient.LOG.debug("Connecting to datanode " + dnAddr);
     }
-    final InetSocketAddress isa =
-      NetUtils.createSocketAddr(first.getXferAddr());
+    final InetSocketAddress isa = NetUtils.createSocketAddr(dnAddr);
     final Socket sock = client.socketFactory.createSocket();
     final int timeout = client.getDatanodeReadTimeout(length);
     NetUtils.connect(sock, isa, client.getRandomLocalInterfaceAddr(), timeout);

+ 68 - 10
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -18,8 +18,21 @@
 
 package org.apache.hadoop.hdfs;
 
-import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
+
 import java.io.IOException;
+import java.io.PrintStream;
 import java.io.UnsupportedEncodingException;
 import java.net.InetSocketAddress;
 import java.net.URI;
@@ -33,10 +46,17 @@ import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.Set;
-import java.util.StringTokenizer;
 
 import javax.net.SocketFactory;
 
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.CommandLineParser;
+import org.apache.commons.cli.Option;
+import org.apache.commons.cli.Options;
+import org.apache.commons.cli.ParseException;
+import org.apache.commons.cli.PosixParser;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
@@ -57,8 +77,7 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.util.ToolRunner;
 
 import com.google.common.base.Joiner;
 import com.google.common.collect.Lists;
@@ -424,7 +443,6 @@ public class DFSUtil {
    * 
    * @param conf configuration
    * @return list of InetSocketAddresses
-   * @throws IOException if no addresses are configured
    */
   public static Map<String, Map<String, InetSocketAddress>> getHaNnRpcAddresses(
       Configuration conf) {
@@ -841,17 +859,17 @@ public class DFSUtil {
   /** Create a {@link ClientDatanodeProtocol} proxy */
   public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
       DatanodeID datanodeid, Configuration conf, int socketTimeout,
-      LocatedBlock locatedBlock) throws IOException {
+      boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException {
     return new ClientDatanodeProtocolTranslatorPB(datanodeid, conf, socketTimeout,
-             locatedBlock);
+        connectToDnViaHostname, locatedBlock);
   }
   
   /** Create {@link ClientDatanodeProtocol} proxy using kerberos ticket */
   static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
-      DatanodeID datanodeid, Configuration conf, int socketTimeout)
-      throws IOException {
+      DatanodeID datanodeid, Configuration conf, int socketTimeout,
+      boolean connectToDnViaHostname) throws IOException {
     return new ClientDatanodeProtocolTranslatorPB(
-        datanodeid, conf, socketTimeout);
+        datanodeid, conf, socketTimeout, connectToDnViaHostname);
   }
   
   /** Create a {@link ClientDatanodeProtocol} proxy */
@@ -1073,4 +1091,44 @@ public class DFSUtil {
       return null;
     }
   }
+  
+  public static Options helpOptions = new Options();
+  public static Option helpOpt = new Option("h", "help", false,
+      "get help information");
+
+  static {
+    helpOptions.addOption(helpOpt);
+  }
+
+  /**
+   * Parse the arguments for commands
+   * 
+   * @param args the argument to be parsed
+   * @param helpDescription help information to be printed out
+   * @param out Printer
+   * @param printGenericCommandUsage whether to print the 
+   *              generic command usage defined in ToolRunner
+   * @return true when the argument matches help option, false if not
+   */
+  public static boolean parseHelpArgument(String[] args,
+      String helpDescription, PrintStream out, boolean printGenericCommandUsage) {
+    if (args.length == 1) {
+      try {
+        CommandLineParser parser = new PosixParser();
+        CommandLine cmdLine = parser.parse(helpOptions, args);
+        if (cmdLine.hasOption(helpOpt.getOpt())
+            || cmdLine.hasOption(helpOpt.getLongOpt())) {
+          // should print out the help information
+          out.println(helpDescription + "\n");
+          if (printGenericCommandUsage) {
+            ToolRunner.printGenericCommandUsage(out);
+          }
+          return true;
+        }
+      } catch (ParseException pe) {
+        return false;
+      }
+    }
+    return false;
+  }
 }

+ 24 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java

@@ -104,7 +104,7 @@ public class DatanodeID implements Comparable<DatanodeID> {
   /**
    * @return IP:ipcPort string
    */
-  public String getIpcAddr() {
+  private String getIpcAddr() {
     return ipAddr + ":" + ipcPort;
   }
 
@@ -122,6 +122,29 @@ public class DatanodeID implements Comparable<DatanodeID> {
     return hostName + ":" + xferPort;
   }
 
+  /**
+   * @return hostname:ipcPort
+   */
+  private String getIpcAddrWithHostname() {
+    return hostName + ":" + ipcPort;
+  }
+
+  /**
+   * @param useHostname true to use the DN hostname, use the IP otherwise
+   * @return name:xferPort
+   */
+  public String getXferAddr(boolean useHostname) {
+    return useHostname ? getXferAddrWithHostname() : getXferAddr();
+  }
+
+  /**
+   * @param useHostname true to use the DN hostname, use the IP otherwise
+   * @return name:ipcPort
+   */
+  public String getIpcAddr(boolean useHostname) {
+    return useHostname ? getIpcAddrWithHostname() : getIpcAddr();
+  }
+
   /**
    * @return data storage ID.
    */

+ 15 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java

@@ -73,10 +73,10 @@ public class ClientDatanodeProtocolTranslatorPB implements
       RefreshNamenodesRequestProto.newBuilder().build();
 
   public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid,
-      Configuration conf, int socketTimeout, LocatedBlock locatedBlock)
-      throws IOException {
+      Configuration conf, int socketTimeout, boolean connectToDnViaHostname,
+      LocatedBlock locatedBlock) throws IOException {
     rpcProxy = createClientDatanodeProtocolProxy( datanodeid, conf, 
-                  socketTimeout, locatedBlock);
+                  socketTimeout, connectToDnViaHostname, locatedBlock);
   }
   
   public ClientDatanodeProtocolTranslatorPB(InetSocketAddress addr,
@@ -90,11 +90,17 @@ public class ClientDatanodeProtocolTranslatorPB implements
    * @param datanodeid Datanode to connect to.
    * @param conf Configuration.
    * @param socketTimeout Socket timeout to use.
+   * @param connectToDnViaHostname connect to the Datanode using its hostname
    * @throws IOException
    */
   public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid,
-      Configuration conf, int socketTimeout) throws IOException {
-    InetSocketAddress addr = NetUtils.createSocketAddr(datanodeid.getIpcAddr());
+      Configuration conf, int socketTimeout, boolean connectToDnViaHostname)
+      throws IOException {
+    final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
+    InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
+    }
     rpcProxy = createClientDatanodeProtocolProxy(addr,
         UserGroupInformation.getCurrentUser(), conf,
         NetUtils.getDefaultSocketFactory(conf), socketTimeout);
@@ -102,10 +108,11 @@ public class ClientDatanodeProtocolTranslatorPB implements
 
   static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy(
       DatanodeID datanodeid, Configuration conf, int socketTimeout,
-      LocatedBlock locatedBlock) throws IOException {
-    InetSocketAddress addr = NetUtils.createSocketAddr(datanodeid.getIpcAddr());
+      boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException {
+    final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
+    InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
     if (LOG.isDebugEnabled()) {
-      LOG.debug("ClientDatanodeProtocol addr=" + addr);
+      LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
     }
     
     // Since we're creating a new UserGroupInformation here, we know that no

+ 16 - 10
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.balancer;
 
+import static com.google.common.base.Preconditions.checkArgument;
 import static org.apache.hadoop.hdfs.protocol.HdfsProtoUtil.vintPrefixed;
 
 import java.io.BufferedInputStream;
@@ -26,6 +27,7 @@ import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.io.PrintStream;
 import java.net.Socket;
 import java.net.URI;
 import java.text.DateFormat;
@@ -68,7 +70,6 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
 import org.apache.hadoop.io.IOUtils;
@@ -79,7 +80,6 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
-import static com.google.common.base.Preconditions.checkArgument;
 
 /** <p>The balancer is a tool that balances disk space usage on an HDFS cluster
  * when some datanodes become full or when new empty nodes join the cluster.
@@ -189,6 +189,13 @@ public class Balancer {
    */
   public static final int MAX_NUM_CONCURRENT_MOVES = 5;
   
+  private static final String USAGE = "Usage: java "
+      + Balancer.class.getSimpleName()
+      + "\n\t[-policy <policy>]\tthe balancing policy: "
+      + BalancingPolicy.Node.INSTANCE.getName() + " or "
+      + BalancingPolicy.Pool.INSTANCE.getName()
+      + "\n\t[-threshold <threshold>]\tPercentage of disk capacity";
+  
   private final NameNodeConnector nnc;
   private final BalancingPolicy policy;
   private final double threshold;
@@ -1550,7 +1557,7 @@ public class Balancer {
             }
           }
         } catch(RuntimeException e) {
-          printUsage();
+          printUsage(System.err);
           throw e;
         }
       }
@@ -1558,13 +1565,8 @@ public class Balancer {
       return new Parameters(policy, threshold);
     }
 
-    private static void printUsage() {
-      System.out.println("Usage: java " + Balancer.class.getSimpleName());
-      System.out.println("    [-policy <policy>]\tthe balancing policy: "
-          + BalancingPolicy.Node.INSTANCE.getName() + " or " 
-          + BalancingPolicy.Pool.INSTANCE.getName());
-      System.out.println(
-          "    [-threshold <threshold>]\tPercentage of disk capacity");
+    private static void printUsage(PrintStream out) {
+      out.println(USAGE + "\n");
     }
   }
 
@@ -1573,6 +1575,10 @@ public class Balancer {
    * @param args Command line arguments
    */
   public static void main(String[] args) {
+    if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
+      System.exit(0);
+    }
+
     try {
       System.exit(ToolRunner.run(new HdfsConfiguration(), new Cli(), args));
     } catch (Throwable e) {

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java

@@ -61,7 +61,7 @@ public final class HdfsServerConstants {
     FORCE("-force"),
     NONINTERACTIVE("-nonInteractive");
     
-    private String name = null;
+    private final String name;
     
     // Used only with format and upgrade options
     private String clusterId = null;

+ 3 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java

@@ -554,9 +554,11 @@ class BlockPoolSliceScanner {
   }
   
   private synchronized void startNewPeriod() {
-    LOG.info("Starting a new period : work left in prev period : "
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Starting a new period : work left in prev period : "
         + String.format("%.2f%%", totalBytesToScan == 0 ? 0
             : (bytesLeft * 100.0) / totalBytesToScan));
+    }
 
     // reset the byte counts :
     bytesLeft = totalBytesToScan;

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DNConf.java

@@ -55,7 +55,7 @@ class DNConf {
   final boolean dropCacheBehindReads;
   final boolean syncOnClose;
   final boolean encryptDataTransfer;
-  
+  final boolean connectToDnViaHostname;
 
   final long readaheadLength;
   final long heartBeatInterval;
@@ -97,7 +97,9 @@ class DNConf {
     dropCacheBehindReads = conf.getBoolean(
         DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_KEY,
         DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_DEFAULT);
-    
+    connectToDnViaHostname = conf.getBoolean(
+        DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME,
+        DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT);
     this.blockReportInterval = conf.getLong(DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,
     DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT);
     

+ 33 - 22
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -46,6 +46,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOUR
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HTTPS_ENABLE_KEY;
+import static org.apache.hadoop.util.ExitUtil.terminate;
 
 import java.io.BufferedOutputStream;
 import java.io.ByteArrayInputStream;
@@ -55,6 +56,7 @@ import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.io.PrintStream;
 import java.net.InetSocketAddress;
 import java.net.ServerSocket;
 import java.net.Socket;
@@ -98,8 +100,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
 import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
-import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
+import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
 import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ClientDatanodeProtocolService;
@@ -124,9 +126,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.common.Util;
-
-import static org.apache.hadoop.util.ExitUtil.terminate;
-
 import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@@ -171,9 +170,9 @@ import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.VersionInfo;
 import org.mortbay.util.ajax.JSON;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
-import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.BlockingService;
 
 /**********************************************************
@@ -230,6 +229,8 @@ public class DataNode extends Configured
         
   static final Log ClientTraceLog =
     LogFactory.getLog(DataNode.class.getName() + ".clienttrace");
+  
+  private static final String USAGE = "Usage: java DataNode [-rollback | -regular]";
 
   /**
    * Use {@link NetUtils#createSocketAddr(String)} instead.
@@ -276,6 +277,7 @@ public class DataNode extends Configured
   private Configuration conf;
 
   private final String userWithLocalPathAccess;
+  private boolean connectToDnViaHostname;
   ReadaheadPool readaheadPool;
 
   /**
@@ -296,8 +298,11 @@ public class DataNode extends Configured
            final SecureResources resources) throws IOException {
     super(conf);
 
-    this.userWithLocalPathAccess = conf
-        .get(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY);
+    this.userWithLocalPathAccess =
+        conf.get(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY);
+    this.connectToDnViaHostname = conf.getBoolean(
+        DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME,
+        DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT);
     try {
       hostName = getHostName(conf);
       LOG.info("Configured hostname is " + hostName);
@@ -878,7 +883,7 @@ public class DataNode extends Configured
   /**
    * NB: The datanode can perform data transfer on the streaming
    * address however clients are given the IPC IP address for data
-   * transfer, and that may be be a different address.
+   * transfer, and that may be a different address.
    * 
    * @return socket address for data transfer
    */
@@ -925,12 +930,12 @@ public class DataNode extends Configured
   }
 
   public static InterDatanodeProtocol createInterDataNodeProtocolProxy(
-      DatanodeID datanodeid, final Configuration conf, final int socketTimeout)
-    throws IOException {
-    final InetSocketAddress addr =
-      NetUtils.createSocketAddr(datanodeid.getIpcAddr());
-    if (InterDatanodeProtocol.LOG.isDebugEnabled()) {
-      InterDatanodeProtocol.LOG.debug("InterDatanodeProtocol addr=" + addr);
+      DatanodeID datanodeid, final Configuration conf, final int socketTimeout,
+      final boolean connectToDnViaHostname) throws IOException {
+    final String dnAddr = datanodeid.getIpcAddr(connectToDnViaHostname);
+    final InetSocketAddress addr = NetUtils.createSocketAddr(dnAddr);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Connecting to datanode " + dnAddr + " addr=" + addr);
     }
     final UserGroupInformation loginUgi = UserGroupInformation.getLoginUser();
     try {
@@ -1061,6 +1066,7 @@ public class DataNode extends Configured
       }
     }
     
+    this.shouldRun = false;
     shutdownPeriodicScanners();
     
     if (infoServer != null) {
@@ -1074,7 +1080,6 @@ public class DataNode extends Configured
       ipcServer.stop();
     }
     
-    this.shouldRun = false;
     if (dataXceiverServer != null) {
       ((DataXceiverServer) this.dataXceiverServer.getRunnable()).kill();
       this.dataXceiverServer.interrupt();
@@ -1386,8 +1391,11 @@ public class DataNode extends Configured
       final boolean isClient = clientname.length() > 0;
       
       try {
-        InetSocketAddress curTarget = 
-          NetUtils.createSocketAddr(targets[0].getXferAddr());
+        final String dnAddr = targets[0].getXferAddr(connectToDnViaHostname);
+        InetSocketAddress curTarget = NetUtils.createSocketAddr(dnAddr);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Connecting to datanode " + dnAddr);
+        }
         sock = newSocket();
         NetUtils.connect(sock, curTarget, dnConf.socketTimeout);
         sock.setSoTimeout(targets.length * dnConf.socketTimeout);
@@ -1534,7 +1542,7 @@ public class DataNode extends Configured
     }
     
     if (!parseArguments(args, conf)) {
-      printUsage();
+      printUsage(System.err);
       return null;
     }
     Collection<URI> dataDirs = getStorageDirs(conf);
@@ -1648,9 +1656,8 @@ public class DataNode extends Configured
         + xmitsInProgress.get() + "}";
   }
 
-  private static void printUsage() {
-    System.err.println("Usage: java DataNode");
-    System.err.println("           [-rollback]");
+  private static void printUsage(PrintStream out) {
+    out.println(USAGE + "\n");
   }
 
   /**
@@ -1735,6 +1742,10 @@ public class DataNode extends Configured
   }
   
   public static void main(String args[]) {
+    if (DFSUtil.parseHelpArgument(args, DataNode.USAGE, System.out, true)) {
+      System.exit(0);
+    }
+
     secureMain(args, null);
   }
 
@@ -1843,7 +1854,7 @@ public class DataNode extends Configured
         DatanodeRegistration bpReg = bpos.bpRegistration;
         InterDatanodeProtocol datanode = bpReg.equals(id)?
             this: DataNode.createInterDataNodeProtocolProxy(id, getConf(),
-                dnConf.socketTimeout);
+                dnConf.socketTimeout, dnConf.connectToDnViaHostname);
         ReplicaRecoveryInfo info = callInitReplicaRecovery(datanode, rBlock);
         if (info != null &&
             info.getGenerationStamp() >= block.getGenerationStamp() &&

+ 14 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java

@@ -86,7 +86,7 @@ class DataXceiver extends Receiver implements Runnable {
   private final DataNode datanode;
   private final DNConf dnConf;
   private final DataXceiverServer dataXceiverServer;
-
+  private final boolean connectToDnViaHostname;
   private long opStartTime; //the start time of receiving an Op
   private final SocketInputWrapper socketIn;
   private OutputStream socketOut;
@@ -113,6 +113,7 @@ class DataXceiver extends Receiver implements Runnable {
     this.isLocal = s.getInetAddress().equals(s.getLocalAddress());
     this.datanode = datanode;
     this.dataXceiverServer = dataXceiverServer;
+    this.connectToDnViaHostname = datanode.getDnConf().connectToDnViaHostname;
     remoteAddress = s.getRemoteSocketAddress().toString();
     localAddress = s.getLocalSocketAddress().toString();
 
@@ -404,7 +405,10 @@ class DataXceiver extends Receiver implements Runnable {
       if (targets.length > 0) {
         InetSocketAddress mirrorTarget = null;
         // Connect to backup machine
-        mirrorNode = targets[0].getXferAddr();
+        mirrorNode = targets[0].getXferAddr(connectToDnViaHostname);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Connecting to datanode " + mirrorNode);
+        }
         mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
         mirrorSock = datanode.newSocket();
         try {
@@ -457,7 +461,8 @@ class DataXceiver extends Receiver implements Runnable {
           if (isClient) {
             BlockOpResponseProto.newBuilder()
               .setStatus(ERROR)
-              .setFirstBadLink(mirrorNode)
+               // NB: Unconditionally using the xfer addr w/o hostname
+              .setFirstBadLink(targets[0].getXferAddr())
               .build()
               .writeDelimitedTo(replyOut);
             replyOut.flush();
@@ -729,8 +734,11 @@ class DataXceiver extends Receiver implements Runnable {
     
     try {
       // get the output stream to the proxy
-      InetSocketAddress proxyAddr =
-        NetUtils.createSocketAddr(proxySource.getXferAddr());
+      final String dnAddr = proxySource.getXferAddr(connectToDnViaHostname);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Connecting to datanode " + dnAddr);
+      }
+      InetSocketAddress proxyAddr = NetUtils.createSocketAddr(dnAddr);
       proxySock = datanode.newSocket();
       NetUtils.connect(proxySock, proxyAddr, dnConf.socketTimeout);
       proxySock.setSoTimeout(dnConf.socketTimeout);
@@ -891,6 +899,7 @@ class DataXceiver extends Receiver implements Runnable {
             if (mode == BlockTokenSecretManager.AccessMode.WRITE) {
               DatanodeRegistration dnR = 
                 datanode.getDNRegistrationForBP(blk.getBlockPoolId());
+              // NB: Unconditionally using the xfer addr w/o hostname
               resp.setFirstBadLink(dnR.getXferAddr());
             }
             resp.build().writeDelimitedTo(out);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java

@@ -411,7 +411,7 @@ public class DatanodeWebHdfsMethods {
       }
       
       final long n = length.getValue() != null? length.getValue()
-          : in.getVisibleLength();
+        : in.getVisibleLength() - offset.getValue();
       return Response.ok(new OpenEntity(in, n, dfsclient)).type(
           MediaType.APPLICATION_OCTET_STREAM).build();
     }

+ 16 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java

@@ -49,6 +49,8 @@ public class EditLogFileOutputStream extends EditLogOutputStream {
   private EditsDoubleBuffer doubleBuf;
   static ByteBuffer fill = ByteBuffer.allocateDirect(MIN_PREALLOCATION_LENGTH);
 
+  private static boolean shouldSkipFsyncForTests = false;
+
   static {
     fill.position(0);
     for (int i = 0; i < fill.capacity(); i++) {
@@ -184,7 +186,9 @@ public class EditLogFileOutputStream extends EditLogOutputStream {
     }
     preallocate(); // preallocate file if necessay
     doubleBuf.flushTo(fp);
-    fc.force(false); // metadata updates not needed
+    if (!shouldSkipFsyncForTests) {
+      fc.force(false); // metadata updates not needed
+    }
   }
 
   /**
@@ -247,4 +251,15 @@ public class EditLogFileOutputStream extends EditLogOutputStream {
   public FileChannel getFileChannelForTesting() {
     return fc;
   }
+  
+  /**
+   * For the purposes of unit tests, we don't need to actually
+   * write durably to disk. So, we can skip the fsync() calls
+   * for a speed improvement.
+   * @param skip true if fsync should <em>not</em> be called
+   */
+  @VisibleForTesting
+  public static void setShouldSkipFsyncForTesting(boolean skip) {
+    shouldSkipFsyncForTests = skip;
+  }
 }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java

@@ -341,7 +341,7 @@ public class FSEditLog  {
    * File-based journals are skipped, since they are formatted by the
    * Storage format code.
    */
-  void formatNonFileJournals(NamespaceInfo nsInfo) throws IOException {
+  synchronized void formatNonFileJournals(NamespaceInfo nsInfo) throws IOException {
     Preconditions.checkState(state == State.BETWEEN_LOG_SEGMENTS,
         "Bad state: %s", state);
     
@@ -352,7 +352,7 @@ public class FSEditLog  {
     }
   }
   
-  List<FormatConfirmable> getFormatConfirmables() {
+  synchronized List<FormatConfirmable> getFormatConfirmables() {
     Preconditions.checkState(state == State.BETWEEN_LOG_SEGMENTS,
         "Bad state: %s", state);
 

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -3742,6 +3742,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       this.extension = conf.getInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
       this.safeReplication = conf.getInt(DFS_NAMENODE_REPLICATION_MIN_KEY, 
                                          DFS_NAMENODE_REPLICATION_MIN_DEFAULT);
+      
+      LOG.info(DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY + " = " + threshold);
+      LOG.info(DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY + " = " + datanodeThreshold);
+      LOG.info(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY + "     = " + extension);
+
       // default to safe mode threshold (i.e., don't populate queues before leaving safe mode)
       this.replQueueThreshold = 
         conf.getFloat(DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY,

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java

@@ -127,7 +127,7 @@ public class FileChecksumServlets {
             datanode, conf, getUGI(request, conf));
         final ClientProtocol nnproxy = dfs.getNamenode();
         final MD5MD5CRC32FileChecksum checksum = DFSClient.getFileChecksum(
-            path, nnproxy, socketFactory, socketTimeout, dfs.getDataEncryptionKey());
+            path, nnproxy, socketFactory, socketTimeout, dfs.getDataEncryptionKey(), false);
         MD5MD5CRC32FileChecksum.write(xml, checksum);
       } catch(IOException ioe) {
         writeXml(ioe, path, xml);

+ 90 - 74
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -17,18 +17,13 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.io.InputStream;
-import java.io.OutputStream;
+import java.io.PrintStream;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
-import java.util.Iterator;
 import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -42,10 +37,12 @@ import org.apache.hadoop.ha.HealthCheckFailedException;
 import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Trash;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
+import static org.apache.hadoop.util.ExitUtil.terminate;
+import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
 
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -53,9 +50,6 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
-import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
-import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
-import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
 import org.apache.hadoop.hdfs.server.namenode.ha.ActiveState;
 import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
@@ -68,8 +62,6 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
-import org.apache.hadoop.hdfs.util.AtomicFileOutputStream;
-import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
@@ -80,14 +72,12 @@ import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
 import org.apache.hadoop.tools.GetUserMappingsProtocol;
+import org.apache.hadoop.util.ExitUtil.ExitException;
 import org.apache.hadoop.util.ServicePlugin;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.util.ExitUtil.ExitException;
-
-import static org.apache.hadoop.util.ExitUtil.terminate;
-import static org.apache.hadoop.util.ToolRunner.confirmPrompt;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 
 /**********************************************************
@@ -198,6 +188,22 @@ public class NameNode {
     DFS_HA_AUTO_FAILOVER_ENABLED_KEY
   };
   
+  private static final String USAGE = "Usage: java NameNode ["
+      + StartupOption.BACKUP.getName() + "] | ["
+      + StartupOption.CHECKPOINT.getName() + "] | ["
+      + StartupOption.FORMAT.getName() + " ["
+      + StartupOption.CLUSTERID.getName() + " cid ] ["
+      + StartupOption.FORCE.getName() + "] ["
+      + StartupOption.NONINTERACTIVE.getName() + "] ] | ["
+      + StartupOption.UPGRADE.getName() + "] | ["
+      + StartupOption.ROLLBACK.getName() + "] | ["
+      + StartupOption.FINALIZE.getName() + "] | ["
+      + StartupOption.IMPORT.getName() + "] | ["
+      + StartupOption.INITIALIZESHAREDEDITS.getName() + "] | ["
+      + StartupOption.BOOTSTRAPSTANDBY.getName() + "] | ["
+      + StartupOption.RECOVER.getName() + " [ " + StartupOption.FORCE.getName()
+      + " ] ]";
+  
   public long getProtocolVersion(String protocol, 
                                  long clientVersion) throws IOException {
     if (protocol.equals(ClientProtocol.class.getName())) {
@@ -767,9 +773,18 @@ public class NameNode {
     String nsId = DFSUtil.getNamenodeNameServiceId(conf);
     String namenodeId = HAUtil.getNameNodeId(conf, nsId);
     initializeGenericKeys(conf, nsId, namenodeId);
+    
+    if (conf.get(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY) == null) {
+      LOG.fatal("No shared edits directory configured for namespace " +
+          nsId + " namenode " + namenodeId);
+      return false;
+    }
+
     NNStorage existingStorage = null;
     try {
-      FSNamesystem fsns = FSNamesystem.loadFromDisk(conf,
+      Configuration confWithoutShared = new Configuration(conf);
+      confWithoutShared.unset(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
+      FSNamesystem fsns = FSNamesystem.loadFromDisk(confWithoutShared,
           FSNamesystem.getNamespaceDirs(conf),
           FSNamesystem.getNamespaceEditsDirs(conf, false));
       
@@ -799,11 +814,9 @@ public class NameNode {
       fsns.getFSImage().getEditLog().close();
       fsns.getFSImage().getEditLog().initJournalsForWrite();
       fsns.getFSImage().getEditLog().recoverUnclosedStreams();
-      
-      if (copyEditLogSegmentsToSharedDir(fsns, sharedEditsDirs,
-          newSharedStorage, conf)) {
-        return true; // aborted
-      }
+
+      copyEditLogSegmentsToSharedDir(fsns, sharedEditsDirs, newSharedStorage,
+          conf);
     } catch (IOException ioe) {
       LOG.error("Could not initialize shared edits dir", ioe);
       return true; // aborted
@@ -821,43 +834,59 @@ public class NameNode {
     }
     return false; // did not abort
   }
-  
-  private static boolean copyEditLogSegmentsToSharedDir(FSNamesystem fsns,
+
+  private static void copyEditLogSegmentsToSharedDir(FSNamesystem fsns,
       Collection<URI> sharedEditsDirs, NNStorage newSharedStorage,
-      Configuration conf) throws FileNotFoundException, IOException {
+      Configuration conf) throws IOException {
+    Preconditions.checkArgument(!sharedEditsDirs.isEmpty(),
+        "No shared edits specified");
     // Copy edit log segments into the new shared edits dir.
-    for (JournalAndStream jas : fsns.getFSImage().getEditLog().getJournals()) {
-      FileJournalManager fjm = null;
-      if (!(jas.getManager() instanceof FileJournalManager)) {
-        LOG.error("Cannot populate shared edits dir from non-file " +
-            "journal manager: " + jas.getManager());
-        return true; // aborted
-      } else {
-        fjm = (FileJournalManager) jas.getManager();
-      }
-      for (EditLogFile elf : fjm.getLogFiles(fsns.getFSImage()
-          .getMostRecentCheckpointTxId())) {
-        File editLogSegment = elf.getFile();
-        for (URI sharedEditsUri : sharedEditsDirs) {
-          StorageDirectory sharedEditsDir = newSharedStorage
-              .getStorageDirectory(sharedEditsUri);
-          File targetFile = new File(sharedEditsDir.getCurrentDir(),
-              editLogSegment.getName());
-          if (!targetFile.exists()) {
-            InputStream in = null;
-            OutputStream out = null;
-            try {
-              in = new FileInputStream(editLogSegment);
-              out = new AtomicFileOutputStream(targetFile);
-              IOUtils.copyBytes(in, out, conf);
-            } finally {
-              IOUtils.cleanup(LOG, in, out);
-            }
-          }
+    List<URI> sharedEditsUris = new ArrayList<URI>(sharedEditsDirs);
+    FSEditLog newSharedEditLog = new FSEditLog(conf, newSharedStorage,
+        sharedEditsUris);
+    newSharedEditLog.initJournalsForWrite();
+    newSharedEditLog.recoverUnclosedStreams();
+    
+    FSEditLog sourceEditLog = fsns.getFSImage().editLog;
+    
+    long fromTxId = fsns.getFSImage().getMostRecentCheckpointTxId();
+    Collection<EditLogInputStream> streams = sourceEditLog.selectInputStreams(
+        fromTxId+1, 0);
+
+    // Set the nextTxid to the CheckpointTxId+1
+    newSharedEditLog.setNextTxId(fromTxId + 1);
+    
+    // Copy all edits after last CheckpointTxId to shared edits dir
+    for (EditLogInputStream stream : streams) {
+      LOG.debug("Beginning to copy stream " + stream + " to shared edits");
+      FSEditLogOp op;
+      boolean segmentOpen = false;
+      while ((op = stream.readOp()) != null) {
+        if (LOG.isTraceEnabled()) {
+          LOG.trace("copying op: " + op);
         }
+        if (!segmentOpen) {
+          newSharedEditLog.startLogSegment(op.txid, false);
+          segmentOpen = true;
+        }
+        
+        newSharedEditLog.logEdit(op);
+
+        if (op.opCode == FSEditLogOpCodes.OP_END_LOG_SEGMENT) {
+          newSharedEditLog.logSync();
+          newSharedEditLog.endCurrentLogSegment(false);
+          LOG.debug("ending log segment because of END_LOG_SEGMENT op in " + stream);
+          segmentOpen = false;
+        }
+      }
+      
+      if (segmentOpen) {
+        LOG.debug("ending log segment because of end of stream in " + stream);
+        newSharedEditLog.logSync();
+        newSharedEditLog.endCurrentLogSegment(false);
+        segmentOpen = false;
       }
     }
-    return false; // did not abort
   }
 
   private static boolean finalize(Configuration conf,
@@ -882,25 +911,8 @@ public class NameNode {
     return false;
   }
 
-  private static void printUsage() {
-    System.err.println(
-      "Usage: java NameNode [" +
-      StartupOption.BACKUP.getName() + "] | [" +
-      StartupOption.CHECKPOINT.getName() + "] | [" +
-      StartupOption.FORMAT.getName() + " [" + StartupOption.CLUSTERID.getName() +  
-      " cid ] [" + StartupOption.FORCE.getName() + "] [" +
-      StartupOption.NONINTERACTIVE.getName() + "] ] | [" +
-      StartupOption.UPGRADE.getName() + "] | [" +
-      StartupOption.ROLLBACK.getName() + "] | [" +
-      StartupOption.FINALIZE.getName() + "] | [" +
-      StartupOption.IMPORT.getName() + "] | [" +
-      StartupOption.INITIALIZESHAREDEDITS.getName() + 
-        " [" + StartupOption.FORCE.getName() + "] [" +
-             StartupOption.NONINTERACTIVE.getName() + "]" +
-      "] | [" +
-      StartupOption.BOOTSTRAPSTANDBY.getName() + "] | [" + 
-      StartupOption.RECOVER.getName() + " [ " +
-        StartupOption.FORCE.getName() + " ] ]");
+  private static void printUsage(PrintStream out) {
+    out.println(USAGE + "\n");
   }
 
   private static StartupOption parseArguments(String args[]) {
@@ -1048,7 +1060,7 @@ public class NameNode {
       conf = new HdfsConfiguration();
     StartupOption startOpt = parseArguments(argv);
     if (startOpt == null) {
-      printUsage();
+      printUsage(System.err);
       return null;
     }
     setStartupOption(conf, startOpt);
@@ -1162,6 +1174,10 @@ public class NameNode {
   /**
    */
   public static void main(String argv[]) throws Exception {
+    if (DFSUtil.parseHelpArgument(argv, NameNode.USAGE, System.out, true)) {
+      System.exit(0);
+    }
+
     try {
       StringUtils.startupShutdownMessage(NameNode.class, argv, LOG);
       NameNode namenode = createNameNode(argv, null);

+ 23 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java

@@ -562,6 +562,9 @@ public class SecondaryNameNode implements Runnable {
     if (opts == null) {
       LOG.fatal("Failed to parse options");
       terminate(1);
+    } else if (opts.shouldPrintHelp()) {
+      opts.usage();
+      System.exit(0);
     }
     
     StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG);
@@ -595,6 +598,7 @@ public class SecondaryNameNode implements Runnable {
     private final Option geteditsizeOpt;
     private final Option checkpointOpt;
     private final Option formatOpt;
+    private final Option helpOpt;
 
 
     Command cmd;
@@ -605,6 +609,7 @@ public class SecondaryNameNode implements Runnable {
     
     private boolean shouldForce;
     private boolean shouldFormat;
+    private boolean shouldPrintHelp;
 
     CommandLineOpts() {
       geteditsizeOpt = new Option("geteditsize",
@@ -612,20 +617,32 @@ public class SecondaryNameNode implements Runnable {
       checkpointOpt = OptionBuilder.withArgName("force")
         .hasOptionalArg().withDescription("checkpoint on startup").create("checkpoint");;
       formatOpt = new Option("format", "format the local storage during startup");
+      helpOpt = new Option("h", "help", false, "get help information");
       
       options.addOption(geteditsizeOpt);
       options.addOption(checkpointOpt);
       options.addOption(formatOpt);
+      options.addOption(helpOpt);
     }
     
     public boolean shouldFormat() {
       return shouldFormat;
     }
 
+    public boolean shouldPrintHelp() {
+      return shouldPrintHelp;
+    }
+    
     public void parse(String ... argv) throws ParseException {
       CommandLineParser parser = new PosixParser();
       CommandLine cmdLine = parser.parse(options, argv);
       
+      if (cmdLine.hasOption(helpOpt.getOpt())
+          || cmdLine.hasOption(helpOpt.getLongOpt())) {
+        shouldPrintHelp = true;
+        return;
+      }
+      
       boolean hasGetEdit = cmdLine.hasOption(geteditsizeOpt.getOpt());
       boolean hasCheckpoint = cmdLine.hasOption(checkpointOpt.getOpt()); 
       if (hasGetEdit && hasCheckpoint) {
@@ -662,8 +679,13 @@ public class SecondaryNameNode implements Runnable {
     }
     
     void usage() {
+      String header = "The Secondary NameNode is a helper "
+          + "to the primary NameNode. The Secondary is responsible "
+          + "for supporting periodic checkpoints of the HDFS metadata. "
+          + "The current design allows only one Secondary NameNode "
+          + "per HDFS cluster.";
       HelpFormatter formatter = new HelpFormatter();
-      formatter.printHelp("secondarynamenode", options);
+      formatter.printHelp("secondarynamenode", header, options, "", false);
     }
   }
 

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java

@@ -42,6 +42,10 @@ public class DFSHAAdmin extends HAAdmin {
   protected void setErrOut(PrintStream errOut) {
     this.errOut = errOut;
   }
+  
+  protected void setOut(PrintStream out) {
+    this.out = out;
+  }
 
   @Override
   public void setConf(Configuration conf) {

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSZKFailoverController.java

@@ -162,6 +162,10 @@ public class DFSZKFailoverController extends ZKFailoverController {
 
   public static void main(String args[])
       throws Exception {
+    if (DFSUtil.parseHelpArgument(args, 
+        ZKFailoverController.USAGE, System.out, true)) {
+      System.exit(0);
+    }
     
     GenericOptionsParser parser = new GenericOptionsParser(
         new HdfsConfiguration(), args);

+ 32 - 25
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java

@@ -73,6 +73,25 @@ public class DFSck extends Configured implements Tool {
     HdfsConfiguration.init();
   }
 
+  private static final String USAGE = "Usage: DFSck <path> "
+      + "[-list-corruptfileblocks | "
+      + "[-move | -delete | -openforwrite] "
+      + "[-files [-blocks [-locations | -racks]]]]\n"
+      + "\t<path>\tstart checking from this path\n"
+      + "\t-move\tmove corrupted files to /lost+found\n"
+      + "\t-delete\tdelete corrupted files\n"
+      + "\t-files\tprint out files being checked\n"
+      + "\t-openforwrite\tprint out files opened for write\n"
+      + "\t-list-corruptfileblocks\tprint out list of missing "
+      + "blocks and files they belong to\n"
+      + "\t-blocks\tprint out block report\n"
+      + "\t-locations\tprint out locations for every block\n"
+      + "\t-racks\tprint out network topology for data-node locations\n"
+      + "\t\tBy default fsck ignores files opened for write, "
+      + "use -openforwrite to report such files. They are usually "
+      + " tagged CORRUPT or HEALTHY depending on their block "
+      + "allocation status";
+  
   private final UserGroupInformation ugi;
   private final PrintStream out;
 
@@ -93,25 +112,9 @@ public class DFSck extends Configured implements Tool {
   /**
    * Print fsck usage information
    */
-  static void printUsage() {
-    System.err.println("Usage: DFSck <path> [-list-corruptfileblocks | " +
-        "[-move | -delete | -openforwrite] " +
-        "[-files [-blocks [-locations | -racks]]]]");
-    System.err.println("\t<path>\tstart checking from this path");
-    System.err.println("\t-move\tmove corrupted files to /lost+found");
-    System.err.println("\t-delete\tdelete corrupted files");
-    System.err.println("\t-files\tprint out files being checked");
-    System.err.println("\t-openforwrite\tprint out files opened for write");
-    System.err.println("\t-list-corruptfileblocks\tprint out list of missing "
-        + "blocks and files they belong to");
-    System.err.println("\t-blocks\tprint out block report");
-    System.err.println("\t-locations\tprint out locations for every block");
-    System.err.println("\t-racks\tprint out network topology for data-node locations");
-    System.err.println("\t\tBy default fsck ignores files opened for write, " +
-                       "use -openforwrite to report such files. They are usually " +
-                       " tagged CORRUPT or HEALTHY depending on their block " +
-                        "allocation status");
-    ToolRunner.printGenericCommandUsage(System.err);
+  static void printUsage(PrintStream out) {
+    out.println(USAGE + "\n");
+    ToolRunner.printGenericCommandUsage(out);
   }
   /**
    * @param args
@@ -119,7 +122,7 @@ public class DFSck extends Configured implements Tool {
   @Override
   public int run(final String[] args) throws IOException {
     if (args.length == 0) {
-      printUsage();
+      printUsage(System.err);
       return -1;
     }
 
@@ -258,12 +261,12 @@ public class DFSck extends Configured implements Tool {
         } else {
           System.err.println("fsck: can only operate on one path at a time '"
               + args[idx] + "'");
-          printUsage();
+          printUsage(System.err);
           return -1;
         }
       } else {
         System.err.println("fsck: Illegal option '" + args[idx] + "'");
-        printUsage();
+        printUsage(System.err);
         return -1;
       }
     }
@@ -304,10 +307,14 @@ public class DFSck extends Configured implements Tool {
     // -files option is also used by GenericOptionsParser
     // Make sure that is not the first argument for fsck
     int res = -1;
-    if ((args.length == 0 ) || ("-files".equals(args[0]))) 
-      printUsage();
-    else
+    if ((args.length == 0) || ("-files".equals(args[0]))) {
+      printUsage(System.err);
+      ToolRunner.printGenericCommandUsage(System.err);
+    } else if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
+      res = 0;
+    } else {
       res = ToolRunner.run(new DFSck(new HdfsConfiguration()), args);
+    }
     System.exit(res);
   }
 }

+ 9 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java

@@ -40,7 +40,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HftpFileSystem;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
@@ -48,9 +47,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretMan
 import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet;
 import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet;
 import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet;
-import org.apache.hadoop.hdfs.web.URLUtils;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.SecurityUtil;
@@ -71,8 +68,10 @@ public class DelegationTokenFetcher {
   private static final String CANCEL = "cancel";
   private static final String RENEW = "renew";
   private static final String PRINT = "print";
+  private static final String HELP = "help";
+  private static final String HELP_SHORT = "h";
 
-  private static void printUsage(PrintStream err) throws IOException {
+  private static void printUsage(PrintStream err) {
     err.println("fetchdt retrieves delegation tokens from the NameNode");
     err.println();
     err.println("fetchdt <opts> <token file>");
@@ -107,6 +106,7 @@ public class DelegationTokenFetcher {
     fetcherOptions.addOption(CANCEL, false, "cancel the token");
     fetcherOptions.addOption(RENEW, false, "renew the token");
     fetcherOptions.addOption(PRINT, false, "print the token");
+    fetcherOptions.addOption(HELP_SHORT, HELP, false, "print out help information");
     GenericOptionsParser parser = new GenericOptionsParser(conf,
         fetcherOptions, args);
     CommandLine cmd = parser.getCommandLine();
@@ -119,9 +119,14 @@ public class DelegationTokenFetcher {
     final boolean cancel = cmd.hasOption(CANCEL);
     final boolean renew = cmd.hasOption(RENEW);
     final boolean print = cmd.hasOption(PRINT);
+    final boolean help = cmd.hasOption(HELP);
     String[] remaining = parser.getRemainingArgs();
 
     // check option validity
+    if (help) {
+      printUsage(System.out);
+      System.exit(0);
+    }
     if (cancel && renew || cancel && print || renew && print || cancel && renew
         && print) {
       System.err.println("ERROR: Only specify cancel, renew or print.");

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetConf.java

@@ -324,6 +324,10 @@ public class GetConf extends Configured implements Tool {
   }
 
   public static void main(String[] args) throws Exception {
+    if (DFSUtil.parseHelpArgument(args, USAGE, System.out, true)) {
+      System.exit(0);
+    }
+    
     int res = ToolRunner.run(new GetConf(new HdfsConfiguration()), args);
     System.exit(res);
   }

+ 7 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/GetGroups.java

@@ -28,6 +28,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.NameNodeProxies;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -43,6 +44,8 @@ import org.apache.hadoop.util.ToolRunner;
 public class GetGroups extends GetGroupsBase {
   
   private static final Log LOG = LogFactory.getLog(GetGroups.class);
+  
+  static final String USAGE = "Usage: hdfs groups [username ...]";
 
   static{
     HdfsConfiguration.init();
@@ -86,6 +89,10 @@ public class GetGroups extends GetGroupsBase {
   }
 
   public static void main(String[] argv) throws Exception {
+    if (DFSUtil.parseHelpArgument(argv, USAGE, System.out, true)) {
+      System.exit(0);
+    }
+    
     int res = ToolRunner.run(new GetGroups(new HdfsConfiguration()), argv);
     System.exit(res);
   }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/test/test_fuse_dfs.c

@@ -75,7 +75,7 @@ static int fuserMount(int *procRet, ...)
 {
   int ret, status;
   size_t i = 0;
-  char *args[64], *c, *env[] = { NULL };
+  char *args[64], *c;
   va_list ap;
   pid_t pid, pret;
 
@@ -99,7 +99,7 @@ static int fuserMount(int *procRet, ...)
             ret, strerror(ret));
     return -ret;
   } else if (pid == 0) {
-    if (execvpe("fusermount", args, env)) {
+    if (execvp("fusermount", args)) {
       ret = errno;
       fprintf(stderr, "FUSE_TEST: failed to execute fusermount: "
               "error %d: %s\n", ret, strerror(ret));

+ 17 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

@@ -53,7 +53,7 @@
   <name>dfs.datanode.address</name>
   <value>0.0.0.0:50010</value>
   <description>
-    The address where the datanode server will listen to.
+    The datanode server address and port for data transfer.
     If the port is 0 then the server will start on a free port.
   </description>
 </property>
@@ -925,6 +925,22 @@
   </description>
 </property>
 
+<property>
+  <name>dfs.client.use.datanode.hostname</name>
+  <value>false</value>
+  <description>Whether clients should use datanode hostnames when
+    connecting to datanodes.
+  </description>
+</property>
+
+<property>
+  <name>dfs.datanode.use.datanode.hostname</name>
+  <value>false</value>
+  <description>Whether datanodes should use datanode hostnames when
+    connecting to other datanodes for data transfer.
+  </description>
+</property>
+
 <property>
   <name>dfs.client.local.interfaces</name>
   <value></value>

+ 47 - 12
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -118,6 +118,8 @@ public class MiniDFSCluster {
   public static final String PROP_TEST_BUILD_DATA = "test.build.data";
   /** Configuration option to set the data dir: {@value} */
   public static final String HDFS_MINIDFS_BASEDIR = "hdfs.minidfs.basedir";
+  public static final String  DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY
+      = DFS_NAMENODE_SAFEMODE_EXTENSION_KEY + ".testing";
 
   static { DefaultMetricsSystem.setMiniClusterMode(true); }
 
@@ -143,6 +145,7 @@ public class MiniDFSCluster {
     private boolean setupHostsFile = false;
     private MiniDFSNNTopology nnTopology = null;
     private boolean checkExitOnShutdown = true;
+    private boolean checkDataNodeHostConfig = false;
     
     public Builder(Configuration conf) {
       this.conf = conf;
@@ -260,6 +263,14 @@ public class MiniDFSCluster {
       return this;
     }
 
+    /**
+     * Default: false
+     */
+    public Builder checkDataNodeHostConfig(boolean val) {
+      this.checkDataNodeHostConfig = val;
+      return this;
+    }
+    
     /**
      * Default: null
      */
@@ -324,7 +335,8 @@ public class MiniDFSCluster {
                        builder.waitSafeMode,
                        builder.setupHostsFile,
                        builder.nnTopology,
-                       builder.checkExitOnShutdown);
+                       builder.checkExitOnShutdown,
+                       builder.checkDataNodeHostConfig);
   }
   
   public class DataNodeProperties {
@@ -561,7 +573,7 @@ public class MiniDFSCluster {
         manageNameDfsDirs, true, manageDataDfsDirs, manageDataDfsDirs,
         operation, racks, hosts,
         simulatedCapacities, null, true, false,
-        MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0), true);
+        MiniDFSNNTopology.simpleSingleNN(nameNodePort, 0), true, false);
   }
 
   private void initMiniDFSCluster(
@@ -571,7 +583,8 @@ public class MiniDFSCluster {
       boolean manageDataDfsDirs, StartupOption operation, String[] racks,
       String[] hosts, long[] simulatedCapacities, String clusterId,
       boolean waitSafeMode, boolean setupHostsFile,
-      MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown)
+      MiniDFSNNTopology nnTopology, boolean checkExitOnShutdown,
+      boolean checkDataNodeHostConfig)
   throws IOException {
     ExitUtil.disableSystemExit();
 
@@ -587,7 +600,9 @@ public class MiniDFSCluster {
     
     int replication = conf.getInt(DFS_REPLICATION_KEY, 3);
     conf.setInt(DFS_REPLICATION_KEY, Math.min(replication, numDataNodes));
-    conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, 0);
+    int safemodeExtension = conf.getInt(
+        DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 0);
+    conf.setInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, safemodeExtension);
     conf.setInt(DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 3); // 3 second
     conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, 
                    StaticMapping.class, DNSToSwitchMapping.class);
@@ -626,7 +641,7 @@ public class MiniDFSCluster {
 
     // Start the DataNodes
     startDataNodes(conf, numDataNodes, manageDataDfsDirs, operation, racks,
-        hosts, simulatedCapacities, setupHostsFile);
+        hosts, simulatedCapacities, setupHostsFile, false, checkDataNodeHostConfig);
     waitClusterUp();
     //make sure ProxyUsers uses the latest conf
     ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
@@ -978,7 +993,21 @@ public class MiniDFSCluster {
                              long[] simulatedCapacities,
                              boolean setupHostsFile) throws IOException {
     startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks, hosts,
-                   simulatedCapacities, setupHostsFile, false);
+        simulatedCapacities, setupHostsFile, false, false);
+  }
+
+  /**
+   * @see MiniDFSCluster#startDataNodes(Configuration, int, boolean, StartupOption,
+   * String[], String[], long[], boolean, boolean, boolean)
+   */
+  public synchronized void startDataNodes(Configuration conf, int numDataNodes,
+      boolean manageDfsDirs, StartupOption operation, 
+      String[] racks, String[] hosts,
+      long[] simulatedCapacities,
+      boolean setupHostsFile,
+      boolean checkDataNodeAddrConfig) throws IOException {
+    startDataNodes(conf, numDataNodes, manageDfsDirs, operation, racks, hosts,
+        simulatedCapacities, setupHostsFile, checkDataNodeAddrConfig, false);
   }
 
   /**
@@ -1004,19 +1033,25 @@ public class MiniDFSCluster {
    * @param simulatedCapacities array of capacities of the simulated data nodes
    * @param setupHostsFile add new nodes to dfs hosts files
    * @param checkDataNodeAddrConfig if true, only set DataNode port addresses if not already set in config
+   * @param checkDataNodeHostConfig if true, only set DataNode hostname key if not already set in config
    *
    * @throws IllegalStateException if NameNode has been shutdown
    */
   public synchronized void startDataNodes(Configuration conf, int numDataNodes,
-                             boolean manageDfsDirs, StartupOption operation, 
-                             String[] racks, String[] hosts,
-                             long[] simulatedCapacities,
-                             boolean setupHostsFile,
-                             boolean checkDataNodeAddrConfig) throws IOException {
+      boolean manageDfsDirs, StartupOption operation, 
+      String[] racks, String[] hosts,
+      long[] simulatedCapacities,
+      boolean setupHostsFile,
+      boolean checkDataNodeAddrConfig,
+      boolean checkDataNodeHostConfig) throws IOException {
     if (operation == StartupOption.RECOVER) {
       return;
     }
-    conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
+    if (checkDataNodeHostConfig) {
+      conf.setIfUnset(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
+    } else {
+      conf.set(DFS_DATANODE_HOST_NAME_KEY, "127.0.0.1");
+    }
 
     int curDatanodesNum = dataNodes.size();
     // for mincluster's the default initialDelay for BRs is 0

+ 65 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java

@@ -41,6 +41,7 @@ import java.security.MessageDigest;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
+import java.util.Random;
 import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
@@ -79,8 +80,10 @@ import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.log4j.Level;
+import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.internal.stubbing.answers.ThrowsException;
@@ -765,7 +768,7 @@ public class TestDFSClientRetries {
 
     try {
       proxy = DFSUtil.createClientDatanodeProtocolProxy(
-          fakeDnId, conf, 500, fakeBlock);
+          fakeDnId, conf, 500, false, fakeBlock);
 
       proxy.getReplicaVisibleLength(new ExtendedBlock("bpid", 1));
       fail ("Did not get expected exception: SocketTimeoutException");
@@ -842,6 +845,8 @@ public class TestDFSClientRetries {
     final Path dir = new Path("/testNamenodeRestart");
 
     conf.setBoolean(DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, true);
+    conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, 1);
+    conf.setInt(MiniDFSCluster.DFS_NAMENODE_SAFEMODE_EXTENSION_TESTING_KEY, 5000);
 
     final short numDatanodes = 3;
     final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
@@ -864,11 +869,38 @@ public class TestDFSClientRetries {
       final FileStatus s1 = fs.getFileStatus(file1);
       assertEquals(length, s1.getLen());
 
+      //create file4, write some data but not close
+      final Path file4 = new Path(dir, "file4"); 
+      final FSDataOutputStream out4 = fs.create(file4, false, 4096,
+          fs.getDefaultReplication(file4), 1024L, null);
+      final byte[] bytes = new byte[1000];
+      new Random().nextBytes(bytes);
+      out4.write(bytes);
+      out4.write(bytes);
+      out4.hflush();
+
       //shutdown namenode
       assertTrue(HdfsUtils.isHealthy(uri));
       cluster.shutdownNameNode(0);
       assertFalse(HdfsUtils.isHealthy(uri));
 
+      //namenode is down, continue writing file4 in a thread
+      final Thread file4thread = new Thread(new Runnable() {
+        @Override
+        public void run() {
+          try {
+            //write some more data and then close the file
+            out4.write(bytes);
+            out4.write(bytes);
+            out4.write(bytes);
+            out4.close();
+          } catch (Exception e) {
+            exceptions.add(e);
+          }
+        }
+      });
+      file4thread.start();
+
       //namenode is down, read the file in a thread
       final Thread reader = new Thread(new Runnable() {
         @Override
@@ -927,10 +959,26 @@ public class TestDFSClientRetries {
 
       //check file1 and file3
       thread.join();
+      assertEmpty(exceptions);
       assertEquals(s1.getLen(), fs.getFileStatus(file3).getLen());
       assertEquals(fs.getFileChecksum(file1), fs.getFileChecksum(file3));
 
       reader.join();
+      assertEmpty(exceptions);
+
+      //check file4
+      file4thread.join();
+      assertEmpty(exceptions);
+      {
+        final FSDataInputStream in = fs.open(file4);
+        int count = 0;
+        for(int r; (r = in.read()) != -1; count++) {
+          Assert.assertEquals(String.format("count=%d", count),
+              bytes[count % bytes.length], (byte)r);
+        }
+        Assert.assertEquals(5 * bytes.length, count);
+        in.close();
+      }
 
       //enter safe mode
       assertTrue(HdfsUtils.isHealthy(uri));
@@ -970,18 +1018,27 @@ public class TestDFSClientRetries {
         LOG.info("GOOD!", fnfe);
       }
 
-      if (!exceptions.isEmpty()) {
-        LOG.error("There are " + exceptions.size() + " exception(s):");
-        for(int i = 0; i < exceptions.size(); i++) {
-          LOG.error("Exception " + i, exceptions.get(i));
-        }
-        fail();
-      }
+      assertEmpty(exceptions);
     } finally {
       cluster.shutdown();
     }
   }
 
+  static void assertEmpty(final List<Exception> exceptions) {
+    if (!exceptions.isEmpty()) {
+      final StringBuilder b = new StringBuilder("There are ")
+        .append(exceptions.size())
+        .append(" exception(s):");
+      for(int i = 0; i < exceptions.size(); i++) {
+        b.append("\n  Exception ")
+         .append(i)
+         .append(": ")
+         .append(StringUtils.stringifyException(exceptions.get(i)));
+      }
+      fail(b.toString());
+    }
+  }
+
   private static FileSystem createFsWithDifferentUsername(
       final Configuration conf, final boolean isWebHDFS
       ) throws IOException, InterruptedException {

+ 0 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java

@@ -417,7 +417,6 @@ public class TestDistributedFileSystem {
 
     final Configuration conf = getTestConfiguration();
     conf.setBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
-    conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
 
     final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     final FileSystem hdfs = cluster.getFileSystem();

+ 24 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java

@@ -171,7 +171,14 @@ public class TestFileCreation {
 
   @Test
   public void testFileCreation() throws IOException {
-    checkFileCreation(null);
+    checkFileCreation(null, false);
+  }
+
+  /** Same test but the client should use DN hostnames */
+  @Test
+  public void testFileCreationUsingHostname() throws IOException {
+    assumeTrue(System.getProperty("os.name").startsWith("Linux"));
+    checkFileCreation(null, true);
   }
 
   /** Same test but the client should bind to a local interface */
@@ -180,10 +187,10 @@ public class TestFileCreation {
     assumeTrue(System.getProperty("os.name").startsWith("Linux"));
 
     // The mini cluster listens on the loopback so we can use it here
-    checkFileCreation("lo");
+    checkFileCreation("lo", false);
 
     try {
-      checkFileCreation("bogus-interface");
+      checkFileCreation("bogus-interface", false);
       fail("Able to specify a bogus interface");
     } catch (UnknownHostException e) {
       assertEquals("No such interface bogus-interface", e.getMessage());
@@ -193,16 +200,28 @@ public class TestFileCreation {
   /**
    * Test if file creation and disk space consumption works right
    * @param netIf the local interface, if any, clients should use to access DNs
+   * @param useDnHostname whether the client should contact DNs by hostname
    */
-  public void checkFileCreation(String netIf) throws IOException {
+  public void checkFileCreation(String netIf, boolean useDnHostname)
+      throws IOException {
     Configuration conf = new HdfsConfiguration();
     if (netIf != null) {
       conf.set(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES, netIf);
     }
+    conf.setBoolean(DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME, useDnHostname);
+    if (useDnHostname) {
+      // Since the mini cluster only listens on the loopback we have to
+      // ensure the hostname used to access DNs maps to the loopback. We
+      // do this by telling the DN to advertise localhost as its hostname
+      // instead of the default hostname.
+      conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
+    }
     if (simulatedStorage) {
       SimulatedFSDataset.setFactory(conf);
     }
-    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+      .checkDataNodeHostConfig(true)
+      .build();
     FileSystem fs = cluster.getFileSystem();
     try {
 

+ 0 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java

@@ -92,7 +92,6 @@ public class TestHftpFileSystem {
     RAN.setSeed(seed);
 
     config = new Configuration();
-    config.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
     cluster = new MiniDFSCluster.Builder(config).numDataNodes(2).build();
     hdfs = cluster.getFileSystem();
     blockPoolId = cluster.getNamesystem().getBlockPoolId();

+ 23 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assume.assumeTrue;
 
 import java.io.File;
 
@@ -41,6 +42,7 @@ public class TestMiniDFSCluster {
   private static final String CLUSTER_2 = "cluster2";
   private static final String CLUSTER_3 = "cluster3";
   private static final String CLUSTER_4 = "cluster4";
+  private static final String CLUSTER_5 = "cluster5";
   protected String testDataPath;
   protected File testDataDir;
   @Before
@@ -125,4 +127,25 @@ public class TestMiniDFSCluster {
       }  
     }
   }
+
+  /** MiniDFSCluster should not clobber dfs.datanode.hostname if requested */
+  @Test(timeout=100000)
+  public void testClusterSetDatanodeHostname() throws Throwable {
+    assumeTrue(System.getProperty("os.name").startsWith("Linux"));
+    Configuration conf = new HdfsConfiguration();
+    conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "MYHOST");
+    File testDataCluster5 = new File(testDataPath, CLUSTER_5);
+    String c5Path = testDataCluster5.getAbsolutePath();
+    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c5Path);
+    MiniDFSCluster cluster5 = new MiniDFSCluster.Builder(conf)
+      .numDataNodes(1)
+      .checkDataNodeHostConfig(true)
+      .build();
+    try {
+      assertEquals("DataNode hostname config not respected", "MYHOST",
+          cluster5.getDataNodes().get(0).getDatanodeId().getHostName());
+    } finally {
+      MiniDFSCluster.shutdownCluster(cluster5);
+    }
+  }
 }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java

@@ -246,7 +246,7 @@ public class TestShortCircuitLocalRead {
             @Override
             public ClientDatanodeProtocol run() throws Exception {
               return DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf,
-                  60000);
+                  60000, false);
             }
           });
       
@@ -264,7 +264,7 @@ public class TestShortCircuitLocalRead {
             @Override
             public ClientDatanodeProtocol run() throws Exception {
               return DFSUtil.createClientDatanodeProtocolProxy(dnInfo, conf,
-                  60000);
+                  60000, false);
             }
           });
       try {

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java

@@ -304,7 +304,7 @@ public class TestBlockToken {
       long endTime = Time.now() + 3000;
       while (Time.now() < endTime) {
         proxy = DFSUtil.createClientDatanodeProtocolProxy(fakeDnId, conf, 1000,
-            fakeBlock);
+            false, fakeBlock);
         assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
         if (proxy != null) {
           RPC.stopProxy(proxy);

+ 6 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java

@@ -105,10 +105,13 @@ public class DataNodeTestUtils {
   }
 
   public static InterDatanodeProtocol createInterDatanodeProtocolProxy(
-      DataNode dn, DatanodeID datanodeid, final Configuration conf
-      ) throws IOException {
+      DataNode dn, DatanodeID datanodeid, final Configuration conf,
+      boolean connectToDnViaHostname) throws IOException {
+    if (connectToDnViaHostname != dn.getDnConf().connectToDnViaHostname) {
+      throw new AssertionError("Unexpected DN hostname configuration");
+    }
     return DataNode.createInterDataNodeProtocolProxy(datanodeid, conf,
-        dn.getDnConf().socketTimeout);
+        dn.getDnConf().socketTimeout, dn.getDnConf().connectToDnViaHostname);
   }
   
   public static void shutdownBlockScanner(DataNode dn) {

+ 34 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java

@@ -29,6 +29,7 @@ import java.util.List;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSClientAdapter;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -59,6 +60,8 @@ import org.apache.hadoop.net.NetUtils;
 import org.junit.Assert;
 import org.junit.Test;
 
+import static org.junit.Assume.assumeTrue;
+
 /**
  * This tests InterDataNodeProtocol for block handling. 
  */
@@ -125,17 +128,42 @@ public class TestInterDatanodeProtocol {
     return blocks.get(blocks.size() - 1);
   }
 
+  /** Test block MD access via a DN */
+  @Test
+  public void testBlockMetaDataInfo() throws Exception {
+    checkBlockMetaDataInfo(false);
+  }
+
+  /** The same as above, but use hostnames for DN<->DN communication */
+  @Test
+  public void testBlockMetaDataInfoWithHostname() throws Exception {
+    assumeTrue(System.getProperty("os.name").startsWith("Linux"));
+    checkBlockMetaDataInfo(true);
+  }
+
   /**
    * The following test first creates a file.
    * It verifies the block information from a datanode.
-   * Then, it updates the block with new information and verifies again. 
+   * Then, it updates the block with new information and verifies again.
+   * @param useDnHostname whether DNs should connect to other DNs by hostname
    */
-  @Test
-  public void testBlockMetaDataInfo() throws Exception {
+  private void checkBlockMetaDataInfo(boolean useDnHostname) throws Exception {
     MiniDFSCluster cluster = null;
 
+    conf.setBoolean(DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME, useDnHostname);
+    if (useDnHostname) {
+      // Since the mini cluster only listens on the loopback we have to
+      // ensure the hostname used to access DNs maps to the loopback. We
+      // do this by telling the DN to advertise localhost as its hostname
+      // instead of the default hostname.
+      conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
+    }
+
     try {
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
+      cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(3)
+        .checkDataNodeHostConfig(true)
+        .build();
       cluster.waitActive();
 
       //create a file
@@ -154,7 +182,7 @@ public class TestInterDatanodeProtocol {
       //connect to a data node
       DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
       InterDatanodeProtocol idp = DataNodeTestUtils.createInterDatanodeProtocolProxy(
-          datanode, datanodeinfo[0], conf);
+          datanode, datanodeinfo[0], conf, useDnHostname);
       
       //stop block scanner, so we could compare lastScanTime
       DataNodeTestUtils.shutdownBlockScanner(datanode);
@@ -364,7 +392,7 @@ public class TestInterDatanodeProtocol {
 
     try {
       proxy = DataNode.createInterDataNodeProtocolProxy(
-          dInfo, conf, 500);
+          dInfo, conf, 500, false);
       proxy.initReplicaRecovery(new RecoveringBlock(
           new ExtendedBlock("bpid", 1), null, 100));
       fail ("Expected SocketTimeoutException exception, but did not get.");

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java

@@ -119,6 +119,11 @@ public class TestEditLog {
         "a4ff 0000 0000 0000 0000 0000 0000 0000"
     ).replace(" ",""));
 
+  static {
+    // No need to fsync for the purposes of tests. This makes
+    // the tests run much faster.
+    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
+  }
   
   static final byte TRAILER_BYTE = FSEditLogOpCodes.OP_INVALID.getOpCode();
 

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java

@@ -40,6 +40,12 @@ public class TestEditLogFileOutputStream {
   final static int MIN_PREALLOCATION_LENGTH =
       EditLogFileOutputStream.MIN_PREALLOCATION_LENGTH;
 
+  static {
+    // No need to fsync for the purposes of tests. This makes
+    // the tests run much faster.
+    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
+  }
+
   @Before
   @After
   public void deleteEditsFile() {

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java

@@ -51,6 +51,12 @@ import com.google.common.collect.ImmutableList;
 public class TestFileJournalManager {
   static final Log LOG = LogFactory.getLog(TestFileJournalManager.class);
 
+  static {
+    // No need to fsync for the purposes of tests. This makes
+    // the tests run much faster.
+    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
+  }
+
   /**
    * Find out how many transactions we can read from a
    * FileJournalManager, starting at a given transaction ID.

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java

@@ -57,6 +57,7 @@ public class TestNameNodeRecovery {
 
   static {
     recoverStartOpt.setForce(MetaRecoveryContext.FORCE_ALL);
+    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
   }
 
   static void runEditLogTest(EditLogTestSetup elts) throws IOException {

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java

@@ -49,6 +49,12 @@ public class TestSecurityTokenEditLog {
   static final int NUM_THREADS = 100;
   static final int opsPerTrans = 3;
 
+  static {
+    // No need to fsync for the purposes of tests. This makes
+    // the tests run much faster.
+    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
+  }
+
   //
   // an object that does a bunch of transactions
   //

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/HATestUtil.java

@@ -65,7 +65,7 @@ public abstract class HATestUtil {
    * @throws CouldNotCatchUpException if the standby doesn't catch up to the
    *         active in NN_LAG_TIMEOUT milliseconds
    */
-  static void waitForStandbyToCatchUp(NameNode active,
+  public static void waitForStandbyToCatchUp(NameNode active,
       NameNode standby) throws InterruptedException, IOException, CouldNotCatchUpException {
     
     long activeTxId = active.getNamesystem().getFSImage().getEditLog()

+ 7 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogsDuringFailover.java

@@ -34,6 +34,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@@ -52,6 +53,12 @@ public class TestEditLogsDuringFailover {
   private static final Log LOG =
     LogFactory.getLog(TestEditLogsDuringFailover.class);
   private static final int NUM_DIRS_IN_LOG = 5;
+
+  static {
+    // No need to fsync for the purposes of tests. This makes
+    // the tests run much faster.
+    EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
+  }
   
   @Test
   public void testStartup() throws Exception {

+ 7 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestInitializeSharedEdits.java

@@ -158,6 +158,13 @@ public class TestInitializeSharedEdits {
     assertCanStartHaNameNodes("2");
   }
   
+  @Test
+  public void testFailWhenNoSharedEditsSpecified() throws Exception {
+    Configuration confNoShared = new Configuration(conf);
+    confNoShared.unset(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY);
+    assertFalse(NameNode.initializeSharedEdits(confNoShared, true));
+  }
+  
   @Test
   public void testDontOverWriteExistingDir() {
     assertFalse(NameNode.initializeSharedEdits(conf, false));

+ 11 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java

@@ -55,7 +55,9 @@ public class TestDFSHAAdmin {
   
   private DFSHAAdmin tool;
   private ByteArrayOutputStream errOutBytes = new ByteArrayOutputStream();
+  private ByteArrayOutputStream outBytes = new ByteArrayOutputStream();
   private String errOutput;
+  private String output;
   private HAServiceProtocol mockProtocol;
   private ZKFCProtocol mockZkfcProtocol;
   
@@ -111,12 +113,14 @@ public class TestDFSHAAdmin {
     };
     tool.setConf(getHAConf());
     tool.setErrOut(new PrintStream(errOutBytes));
+    tool.setOut(new PrintStream(outBytes));
   }
 
   private void assertOutputContains(String string) {
-    if (!errOutput.contains(string)) {
-      fail("Expected output to contain '" + string + "' but was:\n" +
-          errOutput);
+    if (!errOutput.contains(string) && !output.contains(string)) {
+      fail("Expected output to contain '" + string + 
+          "' but err_output was:\n" + errOutput + 
+          "\n and output was: \n" + output);
     }
   }
   
@@ -143,7 +147,7 @@ public class TestDFSHAAdmin {
 
   @Test
   public void testHelp() throws Exception {
-    assertEquals(-1, runTool("-help"));
+    assertEquals(0, runTool("-help"));
     assertEquals(0, runTool("-help", "transitionToActive"));
     assertOutputContains("Transitions the service into Active");
   }
@@ -378,10 +382,12 @@ public class TestDFSHAAdmin {
   
   private Object runTool(String ... args) throws Exception {
     errOutBytes.reset();
+    outBytes.reset();
     LOG.info("Running: DFSHAAdmin " + Joiner.on(" ").join(args));
     int ret = tool.run(args);
     errOutput = new String(errOutBytes.toByteArray(), Charsets.UTF_8);
-    LOG.info("Output:\n" + errOutput);
+    output = new String(outBytes.toByteArray(), Charsets.UTF_8);
+    LOG.info("Err_output:\n" + errOutput + "\nOutput:\n" + output);
     return ret;
   }
   

+ 3 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -809,6 +809,9 @@ Release 0.23.3 - UNRELEASED
     MAPREDUCE-3782. teragen terasort jobs fail when using webhdfs:// (Jason
     Lowe via bobby)
 
+    MAPREDUCE-4053. Counters group names deprecation is wrong, iterating over
+    group names deprecated names don't show up  (Robert Evans via tgraves)
+
 Release 0.23.2 - UNRELEASED
 
   INCOMPATIBLE CHANGES

+ 10 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java

@@ -24,6 +24,7 @@ import static org.apache.hadoop.mapreduce.counters.CounterGroupFactory.isFramewo
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.concurrent.ConcurrentSkipListMap;
@@ -185,7 +186,15 @@ public abstract class AbstractCounters<C extends Counter,
    * @return Set of counter names.
    */
   public synchronized Iterable<String> getGroupNames() {
-    return Iterables.concat(fgroups.keySet(), groups.keySet());
+    HashSet<String> deprecated = new HashSet<String>();
+    for(Map.Entry<String, String> entry : legacyMap.entrySet()) {
+      String newGroup = entry.getValue();
+      boolean isFGroup = isFrameworkGroup(newGroup);
+      if(isFGroup ? fgroups.containsKey(newGroup) : groups.containsKey(newGroup)) {
+        deprecated.add(entry.getKey());
+      }
+    }
+    return Iterables.concat(fgroups.keySet(), groups.keySet(), deprecated);
   }
 
   @Override

+ 18 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java

@@ -22,6 +22,7 @@ import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
 import java.text.ParseException;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Random;
 
@@ -224,6 +225,23 @@ public class TestCounters {
     iterator.next();
   }
   
+  @Test
+  public void testLegacyGetGroupNames() {
+    Counters counters = new Counters();
+    // create 2 filesystem counter groups
+    counters.findCounter("fs1", FileSystemCounter.BYTES_READ).increment(1);
+    counters.findCounter("fs2", FileSystemCounter.BYTES_READ).increment(1);
+    counters.incrCounter("group1", "counter1", 1);
+    
+    HashSet<String> groups = new HashSet<String>(counters.getGroupNames());
+    HashSet<String> expectedGroups = new HashSet<String>();
+    expectedGroups.add("group1");
+    expectedGroups.add("FileSystemCounter"); //Legacy Name
+    expectedGroups.add("org.apache.hadoop.mapreduce.FileSystemCounter");
+
+    assertEquals(expectedGroups, groups);
+  }
+  
   @Test
   public void testMakeCompactString() {
     final String GC1 = "group1.counter1:1";

+ 1 - 1
hadoop-project/pom.xml

@@ -458,7 +458,7 @@
       <dependency>
         <groupId>log4j</groupId>
         <artifactId>log4j</artifactId>
-        <version>1.2.15</version>
+        <version>1.2.17</version>
         <exclusions>
           <exclusion>
             <groupId>com.sun.jdmk</groupId>