فهرست منبع

HDFS-3210. svn merge -c 1310135 from trunk

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1310137 13f79535-47bb-0310-9956-ffa450edef68
Eli Collins 13 سال پیش
والد
کامیت
879c0ec2b0

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -322,6 +322,9 @@ Release 2.0.0 - UNRELEASED
     HDFS-3109. Remove hsqldf exclusions from pom.xml. (Ravi Prakash
     HDFS-3109. Remove hsqldf exclusions from pom.xml. (Ravi Prakash
     via suresh)
     via suresh)
 
 
+    HDFS-3210. JsonUtil#toJsonMap for for a DatanodeInfo should use
+    "ipAddr" instead of "name". (eli)
+
   BREAKDOWN OF HDFS-1623 SUBTASKS
   BREAKDOWN OF HDFS-1623 SUBTASKS
 
 
     HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
     HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java

@@ -122,12 +122,12 @@ public class DatanodeInfo extends DatanodeID implements Node {
   }
   }
 
 
   /** Constructor */
   /** Constructor */
-  public DatanodeInfo(final String name, final String hostName,
+  public DatanodeInfo(final String ipAddr, final String hostName,
       final String storageID, final int xferPort, final int infoPort, final int ipcPort,
       final String storageID, final int xferPort, final int infoPort, final int ipcPort,
       final long capacity, final long dfsUsed, final long remaining,
       final long capacity, final long dfsUsed, final long remaining,
       final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
       final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
       final String networkLocation, final AdminStates adminState) {
       final String networkLocation, final AdminStates adminState) {
-    super(name, hostName, storageID, xferPort, infoPort, ipcPort);
+    super(ipAddr, hostName, storageID, xferPort, infoPort, ipcPort);
     this.capacity = capacity;
     this.capacity = capacity;
     this.dfsUsed = dfsUsed;
     this.dfsUsed = dfsUsed;
     this.remaining = remaining;
     this.remaining = remaining;

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java

@@ -305,7 +305,7 @@ public class JsonUtil {
     }
     }
 
 
     return new DatanodeInfo(
     return new DatanodeInfo(
-        (String)m.get("name"),
+        (String)m.get("ipAddr"),
         (String)m.get("hostName"),
         (String)m.get("hostName"),
         (String)m.get("storageID"),
         (String)m.get("storageID"),
         (int)(long)(Long)m.get("xferPort"),
         (int)(long)(Long)m.get("xferPort"),

+ 14 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java

@@ -24,6 +24,7 @@ import java.io.IOException;
 import java.io.InputStreamReader;
 import java.io.InputStreamReader;
 import java.net.HttpURLConnection;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.net.URL;
+import java.util.Arrays;
 import java.util.Map;
 import java.util.Map;
 
 
 import javax.servlet.http.HttpServletResponse;
 import javax.servlet.http.HttpServletResponse;
@@ -133,8 +134,20 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
     final BlockLocation[] expected = cluster.getFileSystem().getFileBlockLocations(
     final BlockLocation[] expected = cluster.getFileSystem().getFileBlockLocations(
         new Path(f), 0L, 1L);
         new Path(f), 0L, 1L);
     assertEquals(expected.length, computed.length);
     assertEquals(expected.length, computed.length);
-    for(int i = 0; i < computed.length; i++) {
+    for (int i = 0; i < computed.length; i++) {
       assertEquals(expected[i].toString(), computed[i].toString());
       assertEquals(expected[i].toString(), computed[i].toString());
+      // Check names
+      String names1[] = expected[i].getNames();
+      String names2[] = computed[i].getNames();
+      Arrays.sort(names1);
+      Arrays.sort(names2);
+      Assert.assertArrayEquals("Names differ", names1, names2);
+      // Check topology
+      String topos1[] = expected[i].getTopologyPaths();
+      String topos2[] = computed[i].getTopologyPaths();
+      Arrays.sort(topos1);
+      Arrays.sort(topos2);
+      Assert.assertArrayEquals("Topology differs", topos1, topos2);
     }
     }
   }
   }