Kaynağa Gözat

HDFS-1080. SecondaryNameNode image transfer should use the defined http address rather than local ip address. (jghoman)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/trunk@953195 13f79535-47bb-0310-9956-ffa450edef68
Jakob Homan 15 yıl önce
ebeveyn
işleme
5d4ecb04f2

+ 3 - 0
CHANGES.txt

@@ -70,6 +70,9 @@ Trunk (unreleased changes)
 
 
     HDFS-1027. Update copyright year to 2010. (Ravi Phulari via jghoman)
     HDFS-1027. Update copyright year to 2010. (Ravi Phulari via jghoman)
 
 
+    HDFS-1080. SecondaryNameNode image transfer should use the defined http 
+    address rather than local ip address. (jghoman)
+
 Release 0.21.0 - Unreleased
 Release 0.21.0 - Unreleased
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 11 - 2
src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java

@@ -39,6 +39,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Daemon;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT;
 
 
 /**
 /**
  * The Checkpointer is responsible for supporting periodic checkpoints 
  * The Checkpointer is responsible for supporting periodic checkpoints 
@@ -60,6 +62,8 @@ class Checkpointer extends Daemon {
   private long checkpointPeriod;    // in seconds
   private long checkpointPeriod;    // in seconds
   private long checkpointSize;    // size (in MB) of current Edit Log
   private long checkpointSize;    // size (in MB) of current Edit Log
 
 
+  private String infoBindAddress;
+
   private BackupStorage getFSImage() {
   private BackupStorage getFSImage() {
     return (BackupStorage)backupNode.getFSImage();
     return (BackupStorage)backupNode.getFSImage();
   }
   }
@@ -94,6 +98,11 @@ class Checkpointer extends Daemon {
     checkpointSize = conf.getLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_SIZE_KEY, 
     checkpointSize = conf.getLong(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_SIZE_KEY, 
                                   DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_SIZE_DEFAULT);
                                   DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_SIZE_DEFAULT);
 
 
+    // Pull out exact http address for posting url to avoid ip aliasing issues
+    String fullInfoAddr = conf.get(DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, 
+                                   DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT);
+    infoBindAddress = fullInfoAddr.substring(0, fullInfoAddr.indexOf(":"));
+    
     HttpServer httpServer = backupNode.httpServer;
     HttpServer httpServer = backupNode.httpServer;
     httpServer.setAttribute("name.system.image", getFSImage());
     httpServer.setAttribute("name.system.image", getFSImage());
     httpServer.setAttribute("name.conf", conf);
     httpServer.setAttribute("name.conf", conf);
@@ -197,11 +206,11 @@ class Checkpointer extends Daemon {
    * Copy the new image into remote name-node.
    * Copy the new image into remote name-node.
    */
    */
   private void uploadCheckpoint(CheckpointSignature sig) throws IOException {
   private void uploadCheckpoint(CheckpointSignature sig) throws IOException {
+    // Use the exact http addr as specified in config to deal with ip aliasing
     InetSocketAddress httpSocAddr = backupNode.getHttpAddress();
     InetSocketAddress httpSocAddr = backupNode.getHttpAddress();
     int httpPort = httpSocAddr.getPort();
     int httpPort = httpSocAddr.getPort();
     String fileid = "putimage=1&port=" + httpPort +
     String fileid = "putimage=1&port=" + httpPort +
-      "&machine=" +
-      InetAddress.getLocalHost().getHostAddress() +
+      "&machine=" + infoBindAddress +
       "&token=" + sig.toString();
       "&token=" + sig.toString();
     LOG.info("Posted URL " + backupNode.nnHttpAddress + fileid);
     LOG.info("Posted URL " + backupNode.nnHttpAddress + fileid);
     TransferFsImage.getFileClient(backupNode.nnHttpAddress, fileid, (File[])null);
     TransferFsImage.getFileClient(backupNode.nnHttpAddress, fileid, (File[])null);

+ 1 - 3
src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java

@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 
 import java.io.File;
 import java.io.File;
 import java.io.IOException;
 import java.io.IOException;
-import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URI;
 import java.util.ArrayList;
 import java.util.ArrayList;
@@ -277,8 +276,7 @@ public class SecondaryNameNode implements Runnable {
    */
    */
   private void putFSImage(CheckpointSignature sig) throws IOException {
   private void putFSImage(CheckpointSignature sig) throws IOException {
     String fileid = "putimage=1&port=" + infoPort +
     String fileid = "putimage=1&port=" + infoPort +
-      "&machine=" +
-      InetAddress.getLocalHost().getHostAddress() +
+      "&machine=" + infoBindAddress + 
       "&token=" + sig.toString();
       "&token=" + sig.toString();
     LOG.info("Posted URL " + fsName + fileid);
     LOG.info("Posted URL " + fsName + fileid);
     TransferFsImage.getFileClient(fsName, fileid, (File[])null);
     TransferFsImage.getFileClient(fsName, fileid, (File[])null);