Jelajahi Sumber

HDFS-12067. Correct dfsadmin commands usage message to reflects IPC port. Contributed by steven-wugang.

Brahma Reddy Battula 8 tahun lalu
induk
melakukan
f8cd55fe33

+ 34 - 23
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java

@@ -1113,29 +1113,39 @@ public class DFSAdmin extends FsShell {
         "\tor gets a list of reconfigurable properties.\n" +
         "\tor gets a list of reconfigurable properties.\n" +
 
 
         "\tThe second parameter specifies the node type\n";
         "\tThe second parameter specifies the node type\n";
-    String genericRefresh = "-refresh: Arguments are <hostname:port> <resource_identifier> [arg1..argn]\n" +
-      "\tTriggers a runtime-refresh of the resource specified by <resource_identifier>\n" +
-      "\ton <hostname:port>. All other args after are sent to the host.\n";
+    String genericRefresh = "-refresh: Arguments are <hostname:ipc_port>" +
+            " <resource_identifier> [arg1..argn]\n" +
+            "\tTriggers a runtime-refresh of the resource specified by " +
+            "<resource_identifier> on <hostname:ipc_port>.\n" +
+            "\tAll other args after are sent to the host.\n" +
+            "\tThe ipc_port is determined by 'dfs.datanode.ipc.address'," +
+            "default is DFS_DATANODE_IPC_DEFAULT_PORT.\n";
 
 
     String printTopology = "-printTopology: Print a tree of the racks and their\n" +
     String printTopology = "-printTopology: Print a tree of the racks and their\n" +
                            "\t\tnodes as reported by the Namenode\n";
                            "\t\tnodes as reported by the Namenode\n";
     
     
-    String refreshNamenodes = "-refreshNamenodes: Takes a datanodehost:port as argument,\n"+
-                              "\t\tFor the given datanode, reloads the configuration files,\n" +
-                              "\t\tstops serving the removed block-pools\n"+
-                              "\t\tand starts serving new block-pools\n";
+    String refreshNamenodes = "-refreshNamenodes: Takes a " +
+            "datanodehost:ipc_port as argument,For the given datanode\n" +
+            "\t\treloads the configuration files,stops serving the removed\n" +
+            "\t\tblock-pools and starts serving new block-pools.\n" +
+            "\t\tThe ipc_port is determined by 'dfs.datanode.ipc.address'," +
+            "default is DFS_DATANODE_IPC_DEFAULT_PORT.\n";
     
     
-    String getVolumeReport = "-getVolumeReport: Takes a datanodehost:port as "
-        + "argument,\n\t\tFor the given datanode, get the volume report\n";
-
-    String deleteBlockPool = "-deleteBlockPool: Arguments are datanodehost:port, blockpool id\n"+
-                             "\t\t and an optional argument \"force\". If force is passed,\n"+
-                             "\t\t block pool directory for the given blockpool id on the given\n"+
-                             "\t\t datanode is deleted along with its contents, otherwise\n"+
-                             "\t\t the directory is deleted only if it is empty. The command\n" +
-                             "\t\t will fail if datanode is still serving the block pool.\n" +
-                             "\t\t   Refer to refreshNamenodes to shutdown a block pool\n" +
-                             "\t\t service on a datanode.\n";
+    String getVolumeReport = "-getVolumeReport: Takes a datanodehost:ipc_port"+
+            " as argument,For the given datanode,get the volume report.\n" +
+            "\t\tThe ipc_port is determined by 'dfs.datanode.ipc.address'," +
+            "default is DFS_DATANODE_IPC_DEFAULT_PORT.\n";
+
+    String deleteBlockPool = "-deleteBlockPool: Arguments are " +
+            "datanodehost:ipc_port, blockpool id and an optional argument\n" +
+            "\t\t\"force\". If force is passed,block pool directory for\n" +
+            "\t\tthe given blockpool id on the given datanode is deleted\n" +
+            "\t\talong with its contents,otherwise the directory is deleted\n"+
+            "\t\tonly if it is empty.The command will fail if datanode is\n" +
+            "\t\tstill serving the block pool.Refer to refreshNamenodes to\n" +
+            "\t\tshutdown a block pool service on a datanode.\n" +
+            "\t\tThe ipc_port is determined by 'dfs.datanode.ipc.address'," +
+            "default is DFS_DATANODE_IPC_DEFAULT_PORT.\n";
 
 
     String setBalancerBandwidth = "-setBalancerBandwidth <bandwidth>:\n" +
     String setBalancerBandwidth = "-setBalancerBandwidth <bandwidth>:\n" +
       "\tChanges the network bandwidth used by each datanode during\n" +
       "\tChanges the network bandwidth used by each datanode during\n" +
@@ -1893,23 +1903,24 @@ public class DFSAdmin extends FsShell {
                          + " [-refreshCallQueue]");
                          + " [-refreshCallQueue]");
     } else if ("-reconfig".equals(cmd)) {
     } else if ("-reconfig".equals(cmd)) {
       System.err.println("Usage: hdfs dfsadmin"
       System.err.println("Usage: hdfs dfsadmin"
-          + " [-reconfig <namenode|datanode> <host:port> "
+          + " [-reconfig <namenode|datanode> <host:ipc_port> "
           + "<start|status|properties>]");
           + "<start|status|properties>]");
     } else if ("-refresh".equals(cmd)) {
     } else if ("-refresh".equals(cmd)) {
       System.err.println("Usage: hdfs dfsadmin"
       System.err.println("Usage: hdfs dfsadmin"
-                         + " [-refresh <hostname:port> <resource_identifier> [arg1..argn]");
+          + " [-refresh <hostname:ipc_port> "
+          + "<resource_identifier> [arg1..argn]");
     } else if ("-printTopology".equals(cmd)) {
     } else if ("-printTopology".equals(cmd)) {
       System.err.println("Usage: hdfs dfsadmin"
       System.err.println("Usage: hdfs dfsadmin"
                          + " [-printTopology]");
                          + " [-printTopology]");
     } else if ("-refreshNamenodes".equals(cmd)) {
     } else if ("-refreshNamenodes".equals(cmd)) {
       System.err.println("Usage: hdfs dfsadmin"
       System.err.println("Usage: hdfs dfsadmin"
-                         + " [-refreshNamenodes datanode-host:port]");
+                         + " [-refreshNamenodes datanode-host:ipc_port]");
     } else if ("-getVolumeReport".equals(cmd)) {
     } else if ("-getVolumeReport".equals(cmd)) {
       System.err.println("Usage: hdfs dfsadmin"
       System.err.println("Usage: hdfs dfsadmin"
-          + " [-getVolumeReport datanode-host:port]");
+          + " [-getVolumeReport datanode-host:ipc_port]");
     } else if ("-deleteBlockPool".equals(cmd)) {
     } else if ("-deleteBlockPool".equals(cmd)) {
       System.err.println("Usage: hdfs dfsadmin"
       System.err.println("Usage: hdfs dfsadmin"
-          + " [-deleteBlockPool datanode-host:port blockpoolId [force]]");
+          + " [-deleteBlockPool datanode-host:ipc_port blockpoolId [force]]");
     } else if ("-setBalancerBandwidth".equals(cmd)) {
     } else if ("-setBalancerBandwidth".equals(cmd)) {
       System.err.println("Usage: hdfs dfsadmin"
       System.err.println("Usage: hdfs dfsadmin"
                   + " [-setBalancerBandwidth <bandwidth in bytes per second>]");
                   + " [-setBalancerBandwidth <bandwidth in bytes per second>]");