Ver código fonte

HADOOP-5094. Show hostname and separate live/dead datanodes in DFSAdmin report. (Jakob Homan via szetszwo)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/trunk@740532 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 16 anos atrás
pai
commit
9da114a33b

+ 3 - 0
CHANGES.txt

@@ -20,6 +20,9 @@ Trunk (unreleased changes)
     line per split- rather than moving back one character in the stream- to
     work with splittable compression codecs. (Abdul Qadeer via cdouglas)
 
+    HADOOP-5094. Show hostname and separate live/dead datanodes in DFSAdmin
+    report.  (Jakob Homan via szetszwo)
+
   NEW FEATURES
 
     HADOOP-4268. Change fsck to use ClientProtocol methods so that the

+ 36 - 2
src/hdfs/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java

@@ -20,7 +20,10 @@ package org.apache.hadoop.hdfs.protocol;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
 import java.util.Date;
+import java.util.regex.Pattern;
 
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
@@ -44,6 +47,8 @@ public class DatanodeInfo extends DatanodeID implements Node {
   protected long lastUpdate;
   protected int xceiverCount;
   protected String location = NetworkTopology.DEFAULT_RACK;
+  static final Pattern ip = // Pattern for matching hostname to ip:port
+    Pattern.compile("\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}:?\\d*");
 
   /** HostName as suplied by the datanode during registration as its 
    * name. Namenode uses datanode IP address as the name.
@@ -172,8 +177,13 @@ public class DatanodeInfo extends DatanodeID implements Node {
     long nonDFSUsed = getNonDfsUsed();
     float usedPercent = getDfsUsedPercent();
     float remainingPercent = getRemainingPercent();
+    String hostName = getHostNameOfIP();
+
+    buffer.append("Name: "+ name);
+    if(hostName != null)
+      buffer.append(" (" + hostName + ")");
+    buffer.append("\n");
 
-    buffer.append("Name: "+name+"\n");
     if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
       buffer.append("Rack: "+location+"\n");
     }
@@ -188,13 +198,37 @@ public class DatanodeInfo extends DatanodeID implements Node {
     buffer.append("Configured Capacity: "+c+" ("+StringUtils.byteDesc(c)+")"+"\n");
     buffer.append("DFS Used: "+u+" ("+StringUtils.byteDesc(u)+")"+"\n");
     buffer.append("Non DFS Used: "+nonDFSUsed+" ("+StringUtils.byteDesc(nonDFSUsed)+")"+"\n");
-    buffer.append("DFS Remaining: " +r+ "("+StringUtils.byteDesc(r)+")"+"\n");
+    buffer.append("DFS Remaining: " +r+ " ("+StringUtils.byteDesc(r)+")"+"\n");
     buffer.append("DFS Used%: "+StringUtils.limitDecimalTo2(usedPercent)+"%\n");
     buffer.append("DFS Remaining%: "+StringUtils.limitDecimalTo2(remainingPercent)+"%\n");
     buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
     return buffer.toString();
   }
 
+  /**
+   * Attempt to obtain the host name of a name specified by ip address.  
+   * Check that the node name is an ip addr and if so, attempt to determine
+   * its host name.  If the name is not an IP addr, or the actual name cannot
+   * be determined, return null.
+   * 
+   * @return Host name or null
+   */
+  private String getHostNameOfIP() {
+    // If name is not an ip addr, don't bother looking it up
+    if(!ip.matcher(name).matches())
+      return null;
+    
+    String hostname = "";
+    try {
+      String n = name.substring(0, name.indexOf(':'));
+      hostname = InetAddress.getByName(n).getHostName();
+    } catch (UnknownHostException e) {
+      return null;
+    }
+    
+    return hostname; 
+  }
+
   /** A formatted string for printing the status of the DataNode. */
   public String dumpDatanode() {
     StringBuffer buffer = new StringBuffer();

+ 20 - 13
src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java

@@ -23,6 +23,12 @@ import java.util.List;
 import javax.security.auth.login.LoginException;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsShell;
+import org.apache.hadoop.fs.FsStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.shell.Command;
+import org.apache.hadoop.fs.shell.CommandFormat;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
@@ -30,12 +36,6 @@ import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FsStatus;
-import org.apache.hadoop.fs.FsShell;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.shell.Command;
-import org.apache.hadoop.fs.shell.CommandFormat;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
@@ -298,14 +298,21 @@ public class DFSAdmin extends FsShell {
                          " (" + (live.length + dead.length) + " total, " + 
                          dead.length + " dead)\n");
       
-      for (DatanodeInfo dn : live) {
-        System.out.println(dn.getDatanodeReport());
-        System.out.println();
+      if(live.length > 0) {
+        System.out.println("Live datanodes:");
+        for (DatanodeInfo dn : live) {
+          System.out.println(dn.getDatanodeReport());
+          System.out.println();
+        }
+      }
+      
+      if(dead.length > 0) {
+        System.out.println("Dead datanodes:");
+        for (DatanodeInfo dn : dead) {
+          System.out.println(dn.getDatanodeReport());
+          System.out.println();
+        }     
       }
-      for (DatanodeInfo dn : dead) {
-        System.out.println(dn.getDatanodeReport());
-        System.out.println();
-      }      
     }
   }