Parcourir la source

HDFS-1704. Add a tool that lists namenodes, secondary and backup from configuration file. Contributed by Suresh Srinivas.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/branches/HDFS-1052@1076506 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas il y a 14 ans
Parent
commit
f563bdd023

+ 3 - 0
CHANGES.txt

@@ -162,6 +162,9 @@ Trunk (unreleased changes)
     HDFS-1706. Federation: TestFileAppend2, TestFileAppend3 and 
     TestBlockTokenWithDFS failing. (jitendra)
 
+    HDFS-1704. Federation: Add a tool that lists namenodes, secondary and
+    backup from configuration file. (suresh)
+
   IMPROVEMENTS
 
     HDFS-1510. Added test-patch.properties required by test-patch.sh (nigel)

+ 3 - 0
bin/hdfs

@@ -33,6 +33,7 @@ function print_usage(){
   echo "  jmxget               get JMX exported values from NameNode or DataNode."
   echo "  oiv                  apply the offline fsimage viewer to an fsimage"
   echo "  fetchdt              fetch a delegation token from the NameNode"
+  echo "  getconf              get config values from configuration"
   echo "						Use -help to see options"
   echo ""
   echo "Most commands print help when invoked w/o parameters."
@@ -91,6 +92,8 @@ elif [ "$COMMAND" = "oiv" ] ; then
   CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer
 elif [ "$COMMAND" = "fetchdt" ] ; then
   CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
+elif [ "$COMMAND" = "getconfig" ] ; then
+  CLASS=org.apache.hadoop.hdfs.tools.GetConf
 else
   echo $COMMAND - invalid command
   print_usage

+ 115 - 43
src/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -21,7 +21,6 @@ package org.apache.hadoop.hdfs;
 import java.io.IOException;
 import java.io.UnsupportedEncodingException;
 import java.net.InetSocketAddress;
-import java.net.URI;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.List;
@@ -30,12 +29,12 @@ import java.util.StringTokenizer;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NodeBase;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 
@@ -237,72 +236,146 @@ public class DFSUtil {
     return blkLocations;
   }
 
-  private static URI getDefaultNamenode(Configuration conf) {
-    return FileSystem.getDefaultUri(conf);
-  }
-  
   /**
    * Returns collection of nameservice Ids from the configuration.
-   * 
    * @param conf configuration
    * @return collection of nameservice Ids
    */
   public static Collection<String> getNameServiceIds(Configuration conf) {
     return conf.getStringCollection(DFS_FEDERATION_NAMESERVICES);
   }
+
+  /**
+   * Given a list of keys in the order of preference, returns a value
+   * for the key in the given order from the configuration.
+   * @param defaultValue default value to return, when key was not found
+   * @param keySuffix suffix to add to the key, if it is not null
+   * @param conf Configuration
+   * @param keys list of keys in the order of preference
+   * @return value of the key or default if a key was not found in configuration
+   */
+  private static String getConfValue(String defaultValue, String keySuffix,
+      Configuration conf, String... keys) {
+    String value = null;
+    for (String key : keys) {
+      if (keySuffix != null) {
+        key += "." + keySuffix;
+      }
+      value = conf.get(key);
+      if (value != null) {
+        break;
+      }
+    }
+    if (value == null) {
+      value = defaultValue;
+    }
+    return value;
+  }
   
   /**
-   * Returns the InetSocketAddresses of namenodes from the configuration.
-   * Note this is to be used by datanodes to get the list of namenode addresses
-   * to talk to.
-   * 
-   * If namenode address specifically configured for datanodes (using service
-   * ports) is found, it is returned. If not, regular RPC address configured 
-   * for other clients is returned.
-   * 
+   * Returns list of InetSocketAddress for a given set of keys.
    * @param conf configuration
-   * @return list of InetSocketAddresses
-   * @throws IOException on error
+   * @param defaultAddress default address to return in case key is not found
+   * @param keys Set of keys to look for in the order of preference
+   * @return list of InetSocketAddress corresponding to the key
    */
-  public static List<InetSocketAddress> getNNServiceRpcAddresses(
-      Configuration conf) throws IOException {
+  private static List<InetSocketAddress> getAddresses(Configuration conf,
+      String defaultAddress, String... keys) {
     Collection<String> nameserviceIds = getNameServiceIds(conf);
     List<InetSocketAddress> isas = new ArrayList<InetSocketAddress>();
 
     // Configuration with a single namenode
     if (nameserviceIds == null || nameserviceIds.isEmpty()) {
-      String namenodeAddress = conf
-          .get(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY);
-      if (namenodeAddress == null) {
-        // Fall back to regular rpc address
-        namenodeAddress = conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY);
-      }
-      if (namenodeAddress == null) {
-        isas.add(NameNode.getAddress(getDefaultNamenode(conf)));
-      } else {
-        isas.add(NameNode.getAddress(namenodeAddress));
+      String address = getConfValue(defaultAddress, null, conf, keys);
+      if (address == null) {
+        return null;
       }
+      isas.add(NetUtils.createSocketAddr(address));
     } else {
       // Get the namenodes for all the configured nameServiceIds
       for (String nameserviceId : nameserviceIds) {
-        String namenodeAddress = conf.get(getNameServiceIdKey(
-            DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId));
-        if (namenodeAddress == null) {
-          // Fallback to regular rpc address
-          namenodeAddress = conf.get(getNameServiceIdKey(
-              DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId));
-        }
-        if (namenodeAddress == null) {
-          throw new IOException("Incorrect configuration: "
-              + "No namenode rpc address configured for nameserviceId "
-              + nameserviceId);
+        String address = getConfValue(null, nameserviceId, conf, keys);
+        if (address == null) {
+          return null;
         }
-        isas.add(NameNode.getAddress(namenodeAddress));
+        isas.add(NetUtils.createSocketAddr(address));
       }
     }
     return isas;
   }
   
+  /**
+   * Returns list of InetSocketAddress corresponding to  backup node rpc 
+   * addresses from the configuration.
+   * 
+   * @param conf configuration
+   * @return list of InetSocketAddresses
+   * @throws IOException on error
+   */
+  public static List<InetSocketAddress> getBackupNodeAddresses(
+      Configuration conf) throws IOException {
+    List<InetSocketAddress> addressList = getAddresses(conf,
+        null, DFS_NAMENODE_BACKUP_ADDRESS_KEY);
+    if (addressList == null) {
+      throw new IOException("Incorrect configuration: backup node address "
+          + DFS_NAMENODE_BACKUP_ADDRESS_KEY + " is not configured.");
+    }
+    return addressList;
+  }
+
+  /**
+   * Returns list of InetSocketAddresses of corresponding to secondary namenode
+   * http addresses from the configuration.
+   * 
+   * @param conf configuration
+   * @return list of InetSocketAddresses
+   * @throws IOException on error
+   */
+  public static List<InetSocketAddress> getSecondaryNameNodeAddresses(
+      Configuration conf) throws IOException {
+    List<InetSocketAddress> addressList = getAddresses(conf, null,
+        DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
+    if (addressList == null) {
+      throw new IOException("Incorrect configuration: secondary namenode address "
+          + DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY + " is not configured.");
+    }
+    return addressList;
+  }
+
+  /**
+   * Returns list of InetSocketAddresses corresponding to namenodes from the
+   * configuration. Note this is to be used by datanodes to get the list of
+   * namenode addresses to talk to.
+   * 
+   * Returns namenode address specifically configured for datanodes (using
+   * service ports), if found. If not, regular RPC address configured for other
+   * clients is returned.
+   * 
+   * @param conf configuration
+   * @return list of InetSocketAddress
+   * @throws IOException on error
+   */
+  public static List<InetSocketAddress> getNNServiceRpcAddresses(
+      Configuration conf) throws IOException {
+    // Use default address as fall back
+    String defaultAddress;
+    try {
+      defaultAddress = NameNode.getHostPortString(NameNode.getAddress(conf));
+    } catch (IllegalArgumentException e) {
+      defaultAddress = null;
+    }
+    
+    List<InetSocketAddress> addressList = getAddresses(conf, defaultAddress,
+        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
+    if (addressList == null) {
+      throw new IOException("Incorrect configuration: namenode address "
+          + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or "  
+          + DFS_NAMENODE_RPC_ADDRESS_KEY
+          + " is not configured.");
+    }
+    return addressList;
+  }
+  
   /**
    * @return key specific to a nameserviceId from a generic key
    */
@@ -366,5 +439,4 @@ public class DFSUtil {
     return new InetSocketAddress(address.substring(0, colon), 
         Integer.parseInt(address.substring(colon + 1)));
   }
-}
-
+}

+ 249 - 0
src/java/org/apache/hadoop/hdfs/tools/GetConf.java

@@ -0,0 +1,249 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.tools;
+
+import java.io.IOException;
+import java.io.PrintStream;
+import java.net.InetSocketAddress;
+import java.security.PrivilegedExceptionAction;
+import java.util.List;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.Tool;
+import org.apache.hadoop.util.ToolRunner;
+
+/**
+ * Tool for getting configuration information from a configuration file.
+ * 
+ * Adding more options:
+ * <ul>
+ * <li>
+ * If adding a simple option to get a value corresponding to a key in the 
+ * configuration, use regular {@link GetConf.CommandHandler}. 
+ * See {@link GetConf.Command#EXCLUDE_FILE} example.
+ * </li>
+ * <li>
+ * If adding an option that is does not return a value for a key, add
+ * a subclass of {@link GetConf.CommandHandler} and set it up in 
+ * {@link GetConf.Command}.
+ * 
+ * See {@link GetConf.Command#NAMENODE} for example.
+ * </ul>
+ */
+public class GetConf extends Configured implements Tool {
+  private static final String DESCRIPTION = "hdfs getconf is utility for "
+      + "getting configuration information from the config file.\n";
+
+  enum Command {
+    NAMENODE("-namenodes", new NameNodesCommandHandler(),
+        "gets list of namenodes in the cluster."),
+    SECONDARY("-secondaryNameNodes", new SecondaryNameNodesCommandHandler(),
+        "gets list of secondary namenodes in the cluster."),
+    BACKUP("-backupNodes", new BackupNodesCommandHandler(),
+        "gets list of backup nodes in the cluster."),
+    INCLUDE_FILE("-includeFile", new CommandHandler("dfs.hosts"),
+        "gets the include file path that defines the datanodes " +
+        "that can join the cluster."),
+    EXCLUDE_FILE("-excludeFile", new CommandHandler("dfs.hosts.exlucde"),
+        "gets the exclude file path that defines the datanodes " +
+        "that need to decommissioned.");
+
+    private final String cmd;
+    private final CommandHandler handler;
+    private final String description;
+
+    Command(String cmd, CommandHandler handler, String description) {
+      this.cmd = cmd;
+      this.handler = handler;
+      this.description = description;
+    }
+
+    public String getName() {
+      return cmd;
+    }
+    
+    public String getDescription() {
+      return description;
+    }
+    
+    public static CommandHandler getHandler(String name) {
+      for (Command cmd : values()) {
+        if (cmd.getName().equalsIgnoreCase(name)) {
+          return cmd.handler;
+        }
+      }
+      return null;
+    }
+  }
+  
+  static final String USAGE;
+  static {
+    Configuration.addDefaultResource("hdfs-default.xml");
+    Configuration.addDefaultResource("hdfs-site.xml");
+    
+    /* Initialize USAGE based on Command values */
+    StringBuilder usage = new StringBuilder(DESCRIPTION);
+    usage.append("\nhadoop getconf \n");
+    for (Command cmd : Command.values()) {
+      usage.append("\t[" + cmd.getName() + "]\t\t\t" + cmd.getDescription()
+          + "\n");
+    }
+    USAGE = usage.toString();
+  }
+  
+  /** 
+   * Handler to return value for key corresponding to the {@link Command}
+   */
+  static class CommandHandler {
+    final String key; // Configuration key to lookup
+    
+    CommandHandler() {
+      this(null);
+    }
+    
+    CommandHandler(String key) {
+      this.key = key;
+    }
+
+    final int doWork(GetConf tool) {
+      try {
+        return doWorkInternal(tool);
+      } catch (Exception e) {
+        tool.printError(e.getMessage());
+      }
+      return -1;
+    }
+    
+    /** Method to be overridden by sub classes for specific behavior */
+    int doWorkInternal(GetConf tool) throws Exception {
+      String value = tool.getConf().get(key);
+      if (value != null) {
+        tool.printOut(value);
+        return 0;
+      }
+      tool.printError("Configuration " + key + " is missing.");
+      return -1;
+    }
+  }
+  
+  /**
+   * Handler for {@link Command#NAMENODE}
+   */
+  static class NameNodesCommandHandler extends CommandHandler {
+    @Override
+    int doWorkInternal(GetConf tool) throws IOException {
+      tool.printList(DFSUtil.getNNServiceRpcAddresses(tool.getConf()));
+      return 0;
+    }
+  }
+  
+  /**
+   * Handler for {@link Command#BACKUP}
+   */
+  static class BackupNodesCommandHandler extends CommandHandler {
+    @Override
+    public int doWorkInternal(GetConf tool) throws IOException {
+      tool.printList(DFSUtil.getBackupNodeAddresses(tool.getConf()));
+      return 0;
+    }
+  }
+  
+  /**
+   * Handler for {@link Command#SECONDARY}
+   */
+  static class SecondaryNameNodesCommandHandler extends CommandHandler {
+    @Override
+    public int doWorkInternal(GetConf tool) throws IOException {
+      tool.printList(DFSUtil.getSecondaryNameNodeAddresses(tool.getConf()));
+      return 0;
+    }
+  }
+  
+  private final PrintStream out; // Stream for printing command output
+  private final PrintStream err; // Stream for printing error
+
+  GetConf(Configuration conf) {
+    this(conf, System.out, System.err);
+  }
+
+  GetConf(Configuration conf, PrintStream out, PrintStream err) {
+    super(conf);
+    this.out = out;
+    this.err = err;
+  }
+
+  void printError(String message) {
+    err.println(message);
+  }
+
+  void printOut(String message) {
+    out.println(message);
+  }
+
+  void printList(List<InetSocketAddress> list) {
+    StringBuilder buffer = new StringBuilder();
+    for (InetSocketAddress address : list) {
+      buffer.append(address.getHostName()).append(" ");
+    }
+    printOut(buffer.toString());
+  }
+
+  private void printUsage() {
+    printError(USAGE);
+  }
+
+  /**
+   * Main method that runs the tool for given arguments.
+   * @param args arguments
+   * @return return status of the command
+   */
+  private int doWork(String[] args) {
+    if (args.length == 1) {
+      CommandHandler handler = Command.getHandler(args[0]);
+      if (handler != null) {
+        return handler.doWork(this);
+      }
+    }
+    printUsage();
+    return -1;
+  }
+
+  @Override
+  public int run(final String[] args) throws Exception {
+    try {
+      return UserGroupInformation.getCurrentUser().doAs(
+          new PrivilegedExceptionAction<Integer>() {
+            @Override
+            public Integer run() throws Exception {
+              return doWork(args);
+            }
+          });
+    } catch (InterruptedException e) {
+      throw new IOException(e);
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    int res = ToolRunner.run(new GetConf(new HdfsConfiguration()), args);
+    System.exit(res);
+  }
+}

+ 30 - 0
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUtil.java

@@ -27,6 +27,7 @@ import java.util.Iterator;
 import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
+import org.junit.Assert;
 import org.junit.Test;
 
 
@@ -115,4 +116,33 @@ public class TestDFSUtil {
         .get(DFSConfigKeys.FS_DEFAULT_NAME_KEY));
     cluster.shutdown();
   }
+  
+  /**
+   * Tests for empty configuration, an exception is thrown from
+   * {@link DFSUtil#getNNServiceRpcAddresses(Configuration)}
+   * {@link DFSUtil#getBackupNodeAddresses(Configuration)}
+   * {@link DFSUtil#getSecondaryNameNodeAddresses(Configuration)}
+   */
+  @Test
+  public void testEmptyConf() {
+    HdfsConfiguration conf = new HdfsConfiguration(false);
+    try {
+      DFSUtil.getNNServiceRpcAddresses(conf);
+      Assert.fail("Expected IOException is not thrown");
+    } catch (IOException expected) {
+    }
+
+    try {
+      DFSUtil.getBackupNodeAddresses(conf);
+      Assert.fail("Expected IOException is not thrown");
+    } catch (IOException expected) {
+    }
+
+    try {
+      DFSUtil.getSecondaryNameNodeAddresses(conf);
+      Assert.fail("Expected IOException is not thrown");
+    } catch (IOException expected) {
+    }
+  }
+
 }

+ 311 - 0
src/test/hdfs/org/apache/hadoop/hdfs/tools/TestGetConf.java

@@ -0,0 +1,311 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.tools;
+
+import java.io.ByteArrayOutputStream;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.StringTokenizer;
+
+import static org.junit.Assert.*;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
+
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.tools.GetConf;
+import org.apache.hadoop.hdfs.tools.GetConf.Command;
+import org.apache.hadoop.hdfs.tools.GetConf.CommandHandler;
+import org.apache.hadoop.util.ToolRunner;
+import org.junit.Test;
+
+/**
+ * Test for {@link GetConf}
+ */
+public class TestGetConf {
+  enum TestType {
+    NAMENODE, BACKUP, SECONDARY
+  }
+  
+  /** Setup federation nameServiceIds in the configuration */
+  private void setupNameServices(HdfsConfiguration conf, int nameServiceIdCount) {
+    StringBuilder nsList = new StringBuilder();
+    for (int i = 0; i < nameServiceIdCount; i++) {
+      if (nsList.length() > 0) {
+        nsList.append(",");
+      }
+      nsList.append(getNameServiceId(i));
+    }
+    conf.set(DFS_FEDERATION_NAMESERVICES, nsList.toString());
+  }
+
+  /** Set a given key with value as address, for all the nameServiceIds.
+   * @param conf configuration to set the addresses in
+   * @param key configuration key
+   * @param nameServiceIdCount Number of nameServices for which the key is set
+   * @param portOffset starting port offset
+   * @return list of addresses that are set in the configuration
+   */
+  private String[] setupAddress(HdfsConfiguration conf, String key,
+      int nameServiceIdCount, int portOffset) {
+    String[] values = new String[nameServiceIdCount];
+    for (int i = 0; i < nameServiceIdCount; i++, portOffset++) {
+      String nsID = getNameServiceId(i);
+      String specificKey = DFSUtil.getNameServiceIdKey(key, nsID);
+      values[i] = "nn" + i + ":" + portOffset;
+      conf.set(specificKey, values[i]);
+    }
+    return values;
+  }
+
+  /*
+   * Convert list of InetSocketAddress to string array with each address
+   * represented as "host:port"
+   */
+  private String[] toStringArray(List<InetSocketAddress> list) {
+    String[] ret = new String[list.size()];
+    for (int i = 0; i < list.size(); i++) {
+      ret[i] = NameNode.getHostPortString(list.get(i));
+    }
+    return ret;
+  }
+
+  /**
+   * Using DFSUtil methods get the list of given {@code type} of address
+   */
+  private List<InetSocketAddress> getAddressListFromConf(TestType type,
+      HdfsConfiguration conf) throws IOException {
+    switch (type) {
+    case NAMENODE:
+      return DFSUtil.getNNServiceRpcAddresses(conf);
+    case BACKUP:
+      return DFSUtil.getBackupNodeAddresses(conf);
+    case SECONDARY:
+      return DFSUtil.getSecondaryNameNodeAddresses(conf);
+    }
+    return null;
+  }
+  
+  private String runTool(HdfsConfiguration conf, String[] args, boolean success)
+      throws Exception {
+    ByteArrayOutputStream o = new ByteArrayOutputStream();
+    PrintStream out = new PrintStream(o, true);
+    try {
+      int ret = ToolRunner.run(new GetConf(conf, out, out), args);
+      assertEquals(success, ret == 0);
+      return o.toString();
+    } finally {
+      o.close();
+      out.close();
+    }
+  }
+  
+  /**
+   * Get address list for a given type of address. Command expected to
+   * fail if {@code success} is false.
+   * @return returns the success or error output from the tool.
+   */
+  private String getAddressListFromTool(TestType type, HdfsConfiguration conf,
+      boolean success)
+      throws Exception {
+    String[] args = new String[1];
+    switch (type) {
+    case NAMENODE:
+      args[0] = Command.NAMENODE.getName();
+      break;
+    case BACKUP:
+      args[0] = Command.BACKUP.getName();
+      break;
+    case SECONDARY:
+      args[0] = Command.SECONDARY.getName();
+      break;
+    }
+    return runTool(conf, args, success);
+  }
+
+  /**
+   * Using {@link GetConf} methods get the list of given {@code type} of
+   * addresses
+   */
+  private void getAddressListFromTool(TestType type, HdfsConfiguration conf,
+      List<InetSocketAddress> expected) throws Exception {
+    String out = getAddressListFromTool(type, conf, expected.size() != 0);
+    List<String> values = new ArrayList<String>();
+    
+    // Convert list of addresses returned to an array of string
+    StringTokenizer tokenizer = new StringTokenizer(out);
+    while (tokenizer.hasMoreTokens()) {
+      String s = tokenizer.nextToken().trim();
+      values.add(s);
+    }
+    String[] actual = values.toArray(new String[values.size()]);
+
+    // Convert expected list to String[] of hosts
+    int i = 0;
+    String[] expectedHosts = new String[expected.size()];
+    for (InetSocketAddress addr : expected) {
+      expectedHosts[i++] = addr.getHostName();
+    }
+
+    // Compare two arrays
+    assertTrue(Arrays.equals(expectedHosts, actual));
+  }
+  
+  private void verifyAddresses(HdfsConfiguration conf, TestType type,
+      String... expected) throws Exception {
+    // Ensure DFSUtil returned the right set of addresses
+    List<InetSocketAddress> list = getAddressListFromConf(type, conf);
+    String[] actual = toStringArray(list);
+    Arrays.sort(actual);
+    Arrays.sort(expected);
+    assertTrue(Arrays.equals(expected, actual));
+
+    // Test GetConf returned addresses
+    getAddressListFromTool(type, conf, list);
+  }
+
+  private static String getNameServiceId(int index) {
+    return "ns" + index;
+  }
+
+  /**
+   * Test empty configuration
+   */
+  @Test
+  public void testEmptyConf() throws Exception {
+    HdfsConfiguration conf = new HdfsConfiguration(false);
+    // Verify getting addresses fails
+    getAddressListFromTool(TestType.NAMENODE, conf, false);
+    System.out.println(getAddressListFromTool(TestType.BACKUP, conf, false));
+    getAddressListFromTool(TestType.SECONDARY, conf, false);
+    for (Command cmd : Command.values()) {
+      CommandHandler handler = Command.getHandler(cmd.getName());
+      if (handler.key != null) {
+        // First test with configuration missing the required key
+        String[] args = {handler.key};
+        runTool(conf, args, false);
+      }
+    }
+  }
+  
+  /**
+   * Test invalid argument to the tool
+   */
+  @Test
+  public void testInvalidArgument() throws Exception {
+    HdfsConfiguration conf = new HdfsConfiguration();
+    String[] args = {"-invalidArgument"};
+    String ret = runTool(conf, args, false);
+    assertTrue(ret.contains(GetConf.USAGE));
+  }
+
+  /**
+   * Tests to make sure the returned addresses are correct in case of default
+   * configuration with no federation
+   */
+  @Test
+  public void testNonFederation() throws Exception {
+    HdfsConfiguration conf = new HdfsConfiguration(false);
+  
+    // Returned namenode address should match default address
+    conf.set(FS_DEFAULT_NAME_KEY, "hdfs://localhost:1000");
+    verifyAddresses(conf, TestType.NAMENODE, "localhost:1000");
+  
+    // Returned address should match backupnode RPC address
+    conf.set(DFS_NAMENODE_BACKUP_ADDRESS_KEY, "localhost:1001");
+    verifyAddresses(conf, TestType.BACKUP, "localhost:1001");
+  
+    // Returned address should match secondary http address
+    conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "localhost:1002");
+    verifyAddresses(conf, TestType.SECONDARY, "localhost:1002");
+  
+    // Returned namenode address should match service RPC address
+    conf = new HdfsConfiguration();
+    conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "localhost:1000");
+    conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1001");
+    verifyAddresses(conf, TestType.NAMENODE, "localhost:1000");
+  
+    // Returned address should match RPC address
+    conf = new HdfsConfiguration();
+    conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1001");
+    verifyAddresses(conf, TestType.NAMENODE, "localhost:1001");
+  }
+
+  /**
+   * Tests to make sure the returned addresses are correct in case of federation
+   * of setup.
+   */
+  @Test
+  public void testFederation() throws Exception {
+    final int nsCount = 10;
+    HdfsConfiguration conf = new HdfsConfiguration(false);
+  
+    // Test to ensure namenode, backup and secondary namenode addresses are
+    // returned from federation configuration. Returned namenode addresses are
+    // based on service RPC address and not regular RPC address
+    setupNameServices(conf, nsCount);
+    String[] nnAddresses = setupAddress(conf,
+        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nsCount, 1000);
+    setupAddress(conf, DFS_NAMENODE_RPC_ADDRESS_KEY, nsCount, 1500);
+    String[] backupAddresses = setupAddress(conf,
+        DFS_NAMENODE_BACKUP_ADDRESS_KEY, nsCount, 2000);
+    String[] secondaryAddresses = setupAddress(conf,
+        DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, nsCount, 3000);
+    verifyAddresses(conf, TestType.NAMENODE, nnAddresses);
+    verifyAddresses(conf, TestType.BACKUP, backupAddresses);
+    verifyAddresses(conf, TestType.SECONDARY, secondaryAddresses);
+  
+    // Test to ensure namenode, backup and secondary namenode addresses are
+    // returned from federation configuration. Returned namenode addresses are
+    // based on regular RPC address in the absence of service RPC address
+    conf = new HdfsConfiguration(false);
+    setupNameServices(conf, nsCount);
+    nnAddresses = setupAddress(conf,
+        DFS_NAMENODE_RPC_ADDRESS_KEY, nsCount, 1000);
+    backupAddresses = setupAddress(conf,
+        DFS_NAMENODE_BACKUP_ADDRESS_KEY, nsCount, 2000);
+    secondaryAddresses = setupAddress(conf,
+        DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, nsCount, 3000);
+    verifyAddresses(conf, TestType.NAMENODE, nnAddresses);
+    verifyAddresses(conf, TestType.BACKUP, backupAddresses);
+    verifyAddresses(conf, TestType.SECONDARY, secondaryAddresses);
+  }
+
+  /**
+   * Tests commands other than {@link Command#NAMENODE}, {@link Command#BACKUP}
+   * and {@link Command#SECONDARY}
+   */
+  public void testTool() throws Exception {
+    HdfsConfiguration conf = new HdfsConfiguration(false);
+    for (Command cmd : Command.values()) {
+      CommandHandler handler = Command.getHandler(cmd.getName());
+      if (handler.key != null) {
+        // Add the key to the conf and ensure tool returns the right value
+        String[] args = {handler.key};
+        conf.set(handler.key, "value");
+        assertTrue(runTool(conf, args, true).contains("value"));
+      }
+    }
+  }
+}