Browse Source

Merge trunk into auto-failover branch

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-3042@1308260 13f79535-47bb-0310-9956-ffa450edef68
Todd Lipcon 13 years ago
parent
commit
2fd05aa597
100 changed files with 1044 additions and 1534 deletions
  1. 15 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 2 2
      hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
  3. 8 3
      hadoop-common-project/hadoop-common/src/main/bin/start-all.sh
  4. 8 3
      hadoop-common-project/hadoop-common/src/main/bin/stop-all.sh
  5. 54 31
      hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
  6. 16 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
  7. 37 14
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
  8. 21 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
  9. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
  10. 13 15
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
  11. 3 3
      hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh
  12. 54 31
      hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties
  13. 0 2
      hadoop-common-project/hadoop-common/src/site/apt/DeprecatedProperties.apt.vm
  14. 67 20
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java
  15. 12 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
  16. 2 2
      hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh
  17. 32 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  18. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
  19. 7 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh
  20. 7 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh
  21. 0 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml
  22. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
  23. 7 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  24. 0 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  25. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
  26. 8 7
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
  27. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
  28. 2 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
  29. 0 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
  30. 112 64
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
  31. 18 38
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
  32. 7 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java
  33. 2 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnregisteredNodeException.java
  34. 2 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
  35. 11 12
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
  36. 8 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
  37. 32 32
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  38. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java
  39. 2 32
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
  40. 66 79
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
  41. 15 50
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java
  42. 4 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java
  43. 1 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
  44. 3 9
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
  45. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
  46. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
  47. 91 98
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  48. 23 67
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
  49. 6 6
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
  50. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
  51. 12 18
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
  52. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java
  53. 4 37
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
  54. 11 129
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
  55. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java
  56. 27 100
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
  57. 3 59
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
  58. 2 17
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  59. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
  60. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
  61. 7 9
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
  62. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
  63. 8 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
  64. 2 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
  65. 21 9
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
  66. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DisallowedDatanodeException.java
  67. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java
  68. 5 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
  69. 6 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto
  70. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
  71. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
  72. 10 13
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
  73. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java
  74. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
  75. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java
  76. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
  77. 0 13
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
  78. 3 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
  79. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
  80. 2 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java
  81. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java
  82. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
  83. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java
  84. 4 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java
  85. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java
  86. 17 17
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
  87. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
  88. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
  89. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
  90. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
  91. 19 32
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
  92. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java
  93. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
  94. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java
  95. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
  96. 0 267
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java
  97. 1 6
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java
  98. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
  99. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java
  100. 9 9
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java

+ 15 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -231,10 +231,20 @@ Release 2.0.0 - UNRELEASED
     HADOOP-8216. Address log4j.properties inconsistencies btw main and
     HADOOP-8216. Address log4j.properties inconsistencies btw main and
     template dirs. (Patrick Hunt via eli)
     template dirs. (Patrick Hunt via eli)
 
 
+    HADOOP-8149. Cap space usage of default log4j rolling policy.
+    (Patrick Hunt via eli)
+
+    HADOOP-8211. Update commons-net version to 3.1. (eli)
+
+    HADOOP-8236. haadmin should have configurable timeouts for failover
+    commands. (todd)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
 
 
+    HADOOP-8199. Fix issues in start-all.sh and stop-all.sh (Devaraj K via umamahesh)
+    
     HADOOP-7635. RetryInvocationHandler should release underlying resources on
     HADOOP-7635. RetryInvocationHandler should release underlying resources on
     close. (atm)
     close. (atm)
 
 
@@ -295,6 +305,9 @@ Release 2.0.0 - UNRELEASED
     HADOOP-8218. RPC.closeProxy shouldn't throw error when closing a mock
     HADOOP-8218. RPC.closeProxy shouldn't throw error when closing a mock
     (todd)
     (todd)
 
 
+    HADOOP-8238. NetUtils#getHostNameOfIP blows up if given ip:port
+    string w/o port. (eli)
+
   BREAKDOWN OF HADOOP-7454 SUBTASKS
   BREAKDOWN OF HADOOP-7454 SUBTASKS
 
 
     HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh)
     HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh)
@@ -445,6 +458,8 @@ Release 0.23.2 - UNRELEASED
     HADOOP-8088. User-group mapping cache incorrectly does negative caching on
     HADOOP-8088. User-group mapping cache incorrectly does negative caching on
     transient failures (Khiwal Lee via bobby)
     transient failures (Khiwal Lee via bobby)
 
 
+    HADOOP-8208. Disallow self failover. (eli)
+
 Release 0.23.1 - 2012-02-17 
 Release 0.23.1 - 2012-02-17 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh

@@ -107,8 +107,8 @@ fi
 
 
 # some variables
 # some variables
 export HADOOP_LOGFILE=hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.log
 export HADOOP_LOGFILE=hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.log
-export HADOOP_ROOT_LOGGER="INFO,DRFA"
-export HADOOP_SECURITY_LOGGER="INFO,DRFAS"
+export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-"INFO,RFA"}
+export HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-"INFO,RFAS"}
 log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out
 log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out
 pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid
 pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid
 
 

+ 8 - 3
hadoop-common-project/hadoop-common/src/main/bin/start-all.sh

@@ -18,7 +18,7 @@
 
 
 # Start all hadoop daemons.  Run this on master node.
 # Start all hadoop daemons.  Run this on master node.
 
 
-echo "This script is Deprecated. Instead use start-dfs.sh and start-mapred.sh"
+echo "This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh"
 
 
 bin=`dirname "${BASH_SOURCE-$0}"`
 bin=`dirname "${BASH_SOURCE-$0}"`
 bin=`cd "$bin"; pwd`
 bin=`cd "$bin"; pwd`
@@ -28,6 +28,11 @@ HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
 . $HADOOP_LIBEXEC_DIR/hadoop-config.sh
 . $HADOOP_LIBEXEC_DIR/hadoop-config.sh
 
 
 # start hdfs daemons if hdfs is present
 # start hdfs daemons if hdfs is present
-if [ -f "${HADOOP_HDFS_HOME}"/bin/start-dfs.sh ]; then
-  "${HADOOP_HDFS_HOME}"/bin/start-dfs.sh --config $HADOOP_CONF_DIR
+if [ -f "${HADOOP_HDFS_HOME}"/sbin/start-dfs.sh ]; then
+  "${HADOOP_HDFS_HOME}"/sbin/start-dfs.sh --config $HADOOP_CONF_DIR
+fi
+
+# start yarn daemons if yarn is present
+if [ -f "${YARN_HOME}"/sbin/start-dfs.sh ]; then
+  "${YARN_HOME}"/sbin/start-yarn.sh --config $HADOOP_CONF_DIR
 fi
 fi

+ 8 - 3
hadoop-common-project/hadoop-common/src/main/bin/stop-all.sh

@@ -18,7 +18,7 @@
 
 
 # Stop all hadoop daemons.  Run this on master node.
 # Stop all hadoop daemons.  Run this on master node.
 
 
-echo "This script is Deprecated. Instead use stop-dfs.sh and stop-mapred.sh"
+echo "This script is Deprecated. Instead use stop-dfs.sh and stop-yarn.sh"
 
 
 bin=`dirname "${BASH_SOURCE-$0}"`
 bin=`dirname "${BASH_SOURCE-$0}"`
 bin=`cd "$bin"; pwd`
 bin=`cd "$bin"; pwd`
@@ -28,6 +28,11 @@ HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
 . $HADOOP_LIBEXEC_DIR/hadoop-config.sh
 . $HADOOP_LIBEXEC_DIR/hadoop-config.sh
 
 
 # stop hdfs daemons if hdfs is present
 # stop hdfs daemons if hdfs is present
-if [ -f "${HADOOP_HDFS_HOME}"/bin/stop-dfs.sh ]; then
-  "${HADOOP_HDFS_HOME}"/bin/stop-dfs.sh --config $HADOOP_CONF_DIR
+if [ -f "${HADOOP_HDFS_HOME}"/sbin/stop-dfs.sh ]; then
+  "${HADOOP_HDFS_HOME}"/sbin/stop-dfs.sh --config $HADOOP_CONF_DIR
+fi
+
+# stop yarn daemons if yarn is present
+if [ -f "${HADOOP_HDFS_HOME}"/sbin/stop-yarn.sh ]; then
+  "${HADOOP_HDFS_HOME}"/sbin/stop-yarn.sh --config $HADOOP_CONF_DIR
 fi
 fi

+ 54 - 31
hadoop-common-project/hadoop-common/src/main/conf/log4j.properties

@@ -21,7 +21,6 @@ hadoop.root.logger=INFO,console
 hadoop.log.dir=.
 hadoop.log.dir=.
 hadoop.log.file=hadoop.log
 hadoop.log.file=hadoop.log
 
 
-
 # Define the root logger to the system property "hadoop.root.logger".
 # Define the root logger to the system property "hadoop.root.logger".
 log4j.rootLogger=${hadoop.root.logger}, EventCounter
 log4j.rootLogger=${hadoop.root.logger}, EventCounter
 
 
@@ -31,6 +30,25 @@ log4j.threshold=ALL
 # Null Appender
 # Null Appender
 log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
 log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
 
 
+#
+# Rolling File Appender - cap space usage at 5gb.
+#
+hadoop.log.maxfilesize=256MB
+hadoop.log.maxbackupindex=20
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
+log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
 #
 #
 # Daily Rolling File Appender
 # Daily Rolling File Appender
 #
 #
@@ -85,54 +103,55 @@ log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
 #Security appender
 #Security appender
 #
 #
 hadoop.security.logger=INFO,console
 hadoop.security.logger=INFO,console
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
 log4j.category.SecurityLogger=${hadoop.security.logger}
 log4j.category.SecurityLogger=${hadoop.security.logger}
 hadoop.security.log.file=SecurityAuth.audit
 hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender 
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# Daily Rolling Security appender
+#
 log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
 log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
 log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
 log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
 log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
 log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
 log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
 log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
 log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
 log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
 
 
-
 #
 #
 # hdfs audit logging
 # hdfs audit logging
 #
 #
 hdfs.audit.logger=INFO,console
 hdfs.audit.logger=INFO,console
+hdfs.audit.log.maxfilesize=256MB
+hdfs.audit.log.maxbackupindex=20
 log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
 log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
 log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
 log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
-log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
-log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
+log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
 
 
 #
 #
 # mapred audit logging
 # mapred audit logging
 #
 #
 mapred.audit.logger=INFO,console
 mapred.audit.logger=INFO,console
+mapred.audit.log.maxfilesize=256MB
+mapred.audit.log.maxbackupindex=20
 log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
 log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
 log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
 log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
-log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender
 log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
 log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
 log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
 log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
 log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
 log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
-
-#
-# Rolling File Appender
-#
-
-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-#log4j.appender.RFA.MaxFileSize=1MB
-#log4j.appender.RFA.MaxBackupIndex=30
-
-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
+log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}
+log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}
 
 
 # Custom Logging levels
 # Custom Logging levels
 
 
@@ -153,16 +172,19 @@ log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
 # Job Summary Appender 
 # Job Summary Appender 
 #
 #
 # Use following logger to send summary to separate file defined by 
 # Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.log.file :
 # hadoop.mapreduce.jobsummary.logger=INFO,JSA
 # hadoop.mapreduce.jobsummary.logger=INFO,JSA
 # 
 # 
 hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
 hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
 hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
 hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
+hadoop.mapreduce.jobsummary.log.maxfilesize=256MB
+hadoop.mapreduce.jobsummary.log.maxbackupindex=20
+log4j.appender.JSA=org.apache.log4j.RollingFileAppender
 log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
 log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
+log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
+log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
 log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
 log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
 log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
 log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-log4j.appender.JSA.DatePattern=.yyyy-MM-dd
 log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
 log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
 log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
 log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
 
 
@@ -174,7 +196,7 @@ log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
 # Set the ResourceManager summary log level and appender
 # Set the ResourceManager summary log level and appender
 #yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
 #yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
 
 
-# Appender for ResourceManager Application Summary Log - rolled daily
+# Appender for ResourceManager Application Summary Log
 # Requires the following properties to be set
 # Requires the following properties to be set
 #    - hadoop.log.dir (Hadoop Log directory)
 #    - hadoop.log.dir (Hadoop Log directory)
 #    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
 #    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
@@ -182,8 +204,9 @@ log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
 
 
 #log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
 #log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
 #log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
 #log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
-#log4j.appender.RMSUMMARY=org.apache.log4j.DailyRollingFileAppender
+#log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
 #log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
 #log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
+#log4j.appender.RMSUMMARY.MaxFileSize=256MB
+#log4j.appender.RMSUMMARY.MaxBackupIndex=20
 #log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
 #log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
 #log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
 #log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-#log4j.appender.RMSUMMARY.DatePattern=.yyyy-MM-dd

+ 16 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java

@@ -145,5 +145,21 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   public static final String HA_HM_RPC_TIMEOUT_KEY =
   public static final String HA_HM_RPC_TIMEOUT_KEY =
     "ha.health-monitor.rpc-timeout.ms";
     "ha.health-monitor.rpc-timeout.ms";
   public static final int HA_HM_RPC_TIMEOUT_DEFAULT = 45000;
   public static final int HA_HM_RPC_TIMEOUT_DEFAULT = 45000;
+  
+  /* Timeout that the FC waits for the new active to become active */
+  public static final String HA_FC_NEW_ACTIVE_TIMEOUT_KEY =
+    "ha.failover-controller.new-active.rpc-timeout.ms";
+  public static final int HA_FC_NEW_ACTIVE_TIMEOUT_DEFAULT = 60000;
+  
+  /* Timeout that the FC waits for the old active to go to standby */
+  public static final String HA_FC_GRACEFUL_FENCE_TIMEOUT_KEY =
+    "ha.failover-controller.graceful-fence.rpc-timeout.ms";
+  public static final int HA_FC_GRACEFUL_FENCE_TIMEOUT_DEFAULT = 5000;
+  
+  /* Timeout that the CLI (manual) FC waits for monitorHealth, getServiceState */
+  public static final String HA_FC_CLI_CHECK_TIMEOUT_KEY =
+    "ha.failover-controller.cli-check.rpc-timeout.ms";
+  public static final int HA_FC_CLI_CHECK_TIMEOUT_DEFAULT = 20000;
+
 }
 }
 
 

+ 37 - 14
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java

@@ -25,6 +25,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 
 
@@ -42,7 +43,22 @@ public class FailoverController {
 
 
   private static final Log LOG = LogFactory.getLog(FailoverController.class);
   private static final Log LOG = LogFactory.getLog(FailoverController.class);
 
 
-  private static final int GRACEFUL_FENCE_TIMEOUT = 5000;
+  private final int gracefulFenceTimeout;
+  private final int rpcTimeoutToNewActive;
+  
+  private final Configuration conf;
+
+  
+  public FailoverController(Configuration conf) {
+    this.conf = conf;
+    
+    this.gracefulFenceTimeout = conf.getInt(
+        CommonConfigurationKeys.HA_FC_GRACEFUL_FENCE_TIMEOUT_KEY,
+        CommonConfigurationKeys.HA_FC_GRACEFUL_FENCE_TIMEOUT_DEFAULT);
+    this.rpcTimeoutToNewActive = conf.getInt(
+        CommonConfigurationKeys.HA_FC_NEW_ACTIVE_TIMEOUT_KEY,
+        CommonConfigurationKeys.HA_FC_NEW_ACTIVE_TIMEOUT_DEFAULT);
+  }
 
 
   /**
   /**
    * Perform pre-failover checks on the given service we plan to
    * Perform pre-failover checks on the given service we plan to
@@ -54,18 +70,25 @@ public class FailoverController {
    * allow it to become active, eg because it triggers a log roll
    * allow it to become active, eg because it triggers a log roll
    * so the standby can learn about new blocks and leave safemode.
    * so the standby can learn about new blocks and leave safemode.
    *
    *
+   * @param from currently active service
    * @param target service to make active
    * @param target service to make active
    * @param forceActive ignore toSvc if it reports that it is not ready
    * @param forceActive ignore toSvc if it reports that it is not ready
    * @throws FailoverFailedException if we should avoid failover
    * @throws FailoverFailedException if we should avoid failover
    */
    */
-  private static void preFailoverChecks(HAServiceTarget target,
-                                        boolean forceActive)
+  private void preFailoverChecks(HAServiceTarget from,
+                                 HAServiceTarget target,
+                                 boolean forceActive)
       throws FailoverFailedException {
       throws FailoverFailedException {
     HAServiceStatus toSvcStatus;
     HAServiceStatus toSvcStatus;
     HAServiceProtocol toSvc;
     HAServiceProtocol toSvc;
 
 
+    if (from.getAddress().equals(target.getAddress())) {
+      throw new FailoverFailedException(
+          "Can't failover a service to itself");
+    }
+
     try {
     try {
-      toSvc = target.getProxy();
+      toSvc = target.getProxy(conf, rpcTimeoutToNewActive);
       toSvcStatus = toSvc.getServiceStatus();
       toSvcStatus = toSvc.getServiceStatus();
     } catch (IOException e) {
     } catch (IOException e) {
       String msg = "Unable to get service state for " + target;
       String msg = "Unable to get service state for " + target;
@@ -108,11 +131,10 @@ public class FailoverController {
    * and no retries. Its only purpose is to avoid fencing a node that
    * and no retries. Its only purpose is to avoid fencing a node that
    * has already restarted.
    * has already restarted.
    */
    */
-  static boolean tryGracefulFence(Configuration conf,
-      HAServiceTarget svc) {
+  boolean tryGracefulFence(HAServiceTarget svc) {
     HAServiceProtocol proxy = null;
     HAServiceProtocol proxy = null;
     try {
     try {
-      proxy = svc.getProxy(conf, GRACEFUL_FENCE_TIMEOUT);
+      proxy = svc.getProxy(conf, gracefulFenceTimeout);
       proxy.transitionToStandby();
       proxy.transitionToStandby();
       return true;
       return true;
     } catch (ServiceFailedException sfe) {
     } catch (ServiceFailedException sfe) {
@@ -139,19 +161,19 @@ public class FailoverController {
    * @param forceActive try to make toSvc active even if it is not ready
    * @param forceActive try to make toSvc active even if it is not ready
    * @throws FailoverFailedException if the failover fails
    * @throws FailoverFailedException if the failover fails
    */
    */
-  public static void failover(HAServiceTarget fromSvc,
-                              HAServiceTarget toSvc,
-                              boolean forceFence,
-                              boolean forceActive)
+  public void failover(HAServiceTarget fromSvc,
+                       HAServiceTarget toSvc,
+                       boolean forceFence,
+                       boolean forceActive)
       throws FailoverFailedException {
       throws FailoverFailedException {
     Preconditions.checkArgument(fromSvc.getFencer() != null,
     Preconditions.checkArgument(fromSvc.getFencer() != null,
         "failover requires a fencer");
         "failover requires a fencer");
-    preFailoverChecks(toSvc, forceActive);
+    preFailoverChecks(fromSvc, toSvc, forceActive);
 
 
     // Try to make fromSvc standby
     // Try to make fromSvc standby
     boolean tryFence = true;
     boolean tryFence = true;
     
     
-    if (tryGracefulFence(new Configuration(), fromSvc)) {
+    if (tryGracefulFence(fromSvc)) {
       tryFence = forceFence;
       tryFence = forceFence;
     }
     }
 
 
@@ -167,7 +189,8 @@ public class FailoverController {
     boolean failed = false;
     boolean failed = false;
     Throwable cause = null;
     Throwable cause = null;
     try {
     try {
-      HAServiceProtocolHelper.transitionToActive(toSvc.getProxy());
+      HAServiceProtocolHelper.transitionToActive(
+          toSvc.getProxy(conf, rpcTimeoutToNewActive));
     } catch (ServiceFailedException sfe) {
     } catch (ServiceFailedException sfe) {
       LOG.error("Unable to make " + toSvc + " active (" +
       LOG.error("Unable to make " + toSvc + " active (" +
           sfe.getMessage() + "). Failing back.");
           sfe.getMessage() + "). Failing back.");

+ 21 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java

@@ -30,7 +30,9 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ToolRunner;
 
 
@@ -49,6 +51,8 @@ public abstract class HAAdmin extends Configured implements Tool {
   private static final String FORCEACTIVE = "forceactive";
   private static final String FORCEACTIVE = "forceactive";
   private static final Log LOG = LogFactory.getLog(HAAdmin.class);
   private static final Log LOG = LogFactory.getLog(HAAdmin.class);
 
 
+  private int rpcTimeoutForChecks = -1;
+  
   private static Map<String, UsageInfo> USAGE =
   private static Map<String, UsageInfo> USAGE =
     ImmutableMap.<String, UsageInfo>builder()
     ImmutableMap.<String, UsageInfo>builder()
     .put("-transitionToActive",
     .put("-transitionToActive",
@@ -165,9 +169,10 @@ public abstract class HAAdmin extends Configured implements Tool {
     HAServiceTarget fromNode = resolveTarget(args[0]);
     HAServiceTarget fromNode = resolveTarget(args[0]);
     HAServiceTarget toNode = resolveTarget(args[1]);
     HAServiceTarget toNode = resolveTarget(args[1]);
     
     
+    FailoverController fc = new FailoverController(getConf());
+    
     try {
     try {
-      FailoverController.failover(fromNode, toNode,
-          forceFence, forceActive); 
+      fc.failover(fromNode, toNode, forceFence, forceActive); 
       out.println("Failover from "+args[0]+" to "+args[1]+" successful");
       out.println("Failover from "+args[0]+" to "+args[1]+" successful");
     } catch (FailoverFailedException ffe) {
     } catch (FailoverFailedException ffe) {
       errOut.println("Failover failed: " + ffe.getLocalizedMessage());
       errOut.println("Failover failed: " + ffe.getLocalizedMessage());
@@ -184,7 +189,8 @@ public abstract class HAAdmin extends Configured implements Tool {
       return -1;
       return -1;
     }
     }
     
     
-    HAServiceProtocol proto = resolveTarget(argv[1]).getProxy();
+    HAServiceProtocol proto = resolveTarget(argv[1]).getProxy(
+        getConf(), rpcTimeoutForChecks);
     try {
     try {
       HAServiceProtocolHelper.monitorHealth(proto);
       HAServiceProtocolHelper.monitorHealth(proto);
     } catch (HealthCheckFailedException e) {
     } catch (HealthCheckFailedException e) {
@@ -202,7 +208,8 @@ public abstract class HAAdmin extends Configured implements Tool {
       return -1;
       return -1;
     }
     }
 
 
-    HAServiceProtocol proto = resolveTarget(argv[1]).getProxy();
+    HAServiceProtocol proto = resolveTarget(argv[1]).getProxy(
+        getConf(), rpcTimeoutForChecks);
     out.println(proto.getServiceStatus().getState());
     out.println(proto.getServiceStatus().getState());
     return 0;
     return 0;
   }
   }
@@ -215,6 +222,16 @@ public abstract class HAAdmin extends Configured implements Tool {
     return serviceId;
     return serviceId;
   }
   }
 
 
+  @Override
+  public void setConf(Configuration conf) {
+    super.setConf(conf);
+    if (conf != null) {
+      rpcTimeoutForChecks = conf.getInt(
+          CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_KEY,
+          CommonConfigurationKeys.HA_FC_CLI_CHECK_TIMEOUT_DEFAULT);
+    }
+  }
+
   @Override
   @Override
   public int run(String[] argv) throws Exception {
   public int run(String[] argv) throws Exception {
     try {
     try {

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java

@@ -330,8 +330,8 @@ public abstract class ZKFailoverController implements Tool {
       HAServiceTarget target = dataToTarget(data);
       HAServiceTarget target = dataToTarget(data);
       
       
       LOG.info("Should fence: " + target);
       LOG.info("Should fence: " + target);
-      boolean gracefulWorked =
-        FailoverController.tryGracefulFence(conf, target);
+      boolean gracefulWorked = new FailoverController(conf)
+          .tryGracefulFence(target);
       if (gracefulWorked) {
       if (gracefulWorked) {
         // It's possible that it's in standby but just about to go into active,
         // It's possible that it's in standby but just about to go into active,
         // no? Is there some race here?
         // no? Is there some race here?

+ 13 - 15
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java

@@ -570,31 +570,29 @@ public class NetUtils {
     }
     }
   }
   }
 
 
-  private static final Pattern ipPattern = // Pattern for matching hostname to ip:port
-    Pattern.compile("\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}:?\\d*");
+  private static final Pattern ipPortPattern = // Pattern for matching ip[:port]
+    Pattern.compile("\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}(:\\d+)?");
   
   
   /**
   /**
-   * Attempt to obtain the host name of a name specified by ip address.  
-   * Check that the node name is an ip addr and if so, attempt to determine
-   * its host name.  If the name is not an IP addr, or the actual name cannot
-   * be determined, return null.
+   * Attempt to obtain the host name of the given string which contains
+   * an IP address and an optional port.
    * 
    * 
-   * @return Host name or null
+   * @param ipPort string of form ip[:port]
+   * @return Host name or null if the name can not be determined
    */
    */
-  public static String getHostNameOfIP(String ip) {
-    // If name is not an ip addr, don't bother looking it up
-    if(!ipPattern.matcher(ip).matches())
+  public static String getHostNameOfIP(String ipPort) {
+    if (null == ipPort || !ipPortPattern.matcher(ipPort).matches()) {
       return null;
       return null;
+    }
     
     
-    String hostname = "";
     try {
     try {
-      String n = ip.substring(0, ip.indexOf(':'));
-      hostname = InetAddress.getByName(n).getHostName();
+      int colonIdx = ipPort.indexOf(':');
+      String ip = (-1 == colonIdx) ? ipPort
+          : ipPort.substring(0, ipPort.indexOf(':'));
+      return InetAddress.getByName(ip).getHostName();
     } catch (UnknownHostException e) {
     } catch (UnknownHostException e) {
       return null;
       return null;
     }
     }
-    
-    return hostname; 
   }
   }
 
 
   /**
   /**

+ 3 - 3
hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh

@@ -48,10 +48,10 @@ done
 export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS"
 export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS"
 
 
 # Command specific options appended to HADOOP_OPTS when specified
 # Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_NAMENODE_OPTS"
-HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,DRFAS $HADOOP_DATANODE_OPTS"
+export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,RFAAUDIT $HADOOP_NAMENODE_OPTS"
+export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
 
 
-export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS"
+export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,RFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS"
 
 
 # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
 # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
 export HADOOP_CLIENT_OPTS="-Xmx128m $HADOOP_CLIENT_OPTS"
 export HADOOP_CLIENT_OPTS="-Xmx128m $HADOOP_CLIENT_OPTS"

+ 54 - 31
hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties

@@ -21,7 +21,6 @@ hadoop.root.logger=INFO,console
 hadoop.log.dir=.
 hadoop.log.dir=.
 hadoop.log.file=hadoop.log
 hadoop.log.file=hadoop.log
 
 
-
 # Define the root logger to the system property "hadoop.root.logger".
 # Define the root logger to the system property "hadoop.root.logger".
 log4j.rootLogger=${hadoop.root.logger}, EventCounter
 log4j.rootLogger=${hadoop.root.logger}, EventCounter
 
 
@@ -31,6 +30,25 @@ log4j.threshold=ALL
 # Null Appender
 # Null Appender
 log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
 log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
 
 
+#
+# Rolling File Appender - cap space usage at 5gb.
+#
+hadoop.log.maxfilesize=256MB
+hadoop.log.maxbackupindex=20
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
+log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
 #
 #
 # Daily Rolling File Appender
 # Daily Rolling File Appender
 #
 #
@@ -85,54 +103,55 @@ log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
 #Security appender
 #Security appender
 #
 #
 hadoop.security.logger=INFO,console
 hadoop.security.logger=INFO,console
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
 log4j.category.SecurityLogger=${hadoop.security.logger}
 log4j.category.SecurityLogger=${hadoop.security.logger}
 hadoop.security.log.file=SecurityAuth.audit
 hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender 
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# Daily Rolling Security appender
+#
 log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
 log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
 log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
 log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
 log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
 log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
 log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
 log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
 log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
 log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
 
 
-
 #
 #
 # hdfs audit logging
 # hdfs audit logging
 #
 #
 hdfs.audit.logger=INFO,console
 hdfs.audit.logger=INFO,console
+hdfs.audit.log.maxfilesize=256MB
+hdfs.audit.log.maxbackupindex=20
 log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
 log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
 log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
 log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
-log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
-log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
+log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
 
 
 #
 #
 # mapred audit logging
 # mapred audit logging
 #
 #
 mapred.audit.logger=INFO,console
 mapred.audit.logger=INFO,console
+mapred.audit.log.maxfilesize=256MB
+mapred.audit.log.maxbackupindex=20
 log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
 log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
 log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
 log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
-log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT=org.apache.log4j.RollingFileAppender
 log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
 log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
 log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
 log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
 log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
 log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
-
-#
-# Rolling File Appender
-#
-
-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-#log4j.appender.RFA.MaxFileSize=1MB
-#log4j.appender.RFA.MaxBackupIndex=30
-
-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
+log4j.appender.MRAUDIT.MaxFileSize=${mapred.audit.log.maxfilesize}
+log4j.appender.MRAUDIT.MaxBackupIndex=${mapred.audit.log.maxbackupindex}
 
 
 # Custom Logging levels
 # Custom Logging levels
 
 
@@ -153,16 +172,19 @@ log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
 # Job Summary Appender 
 # Job Summary Appender 
 #
 #
 # Use following logger to send summary to separate file defined by 
 # Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.log.file :
 # hadoop.mapreduce.jobsummary.logger=INFO,JSA
 # hadoop.mapreduce.jobsummary.logger=INFO,JSA
 # 
 # 
 hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
 hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
 hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
 hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
+hadoop.mapreduce.jobsummary.log.maxfilesize=256MB
+hadoop.mapreduce.jobsummary.log.maxbackupindex=20
+log4j.appender.JSA=org.apache.log4j.RollingFileAppender
 log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
 log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
+log4j.appender.JSA.MaxFileSize=${hadoop.mapreduce.jobsummary.log.maxfilesize}
+log4j.appender.JSA.MaxBackupIndex=${hadoop.mapreduce.jobsummary.log.maxbackupindex}
 log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
 log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
 log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
 log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-log4j.appender.JSA.DatePattern=.yyyy-MM-dd
 log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
 log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
 log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
 log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
 
 
@@ -174,7 +196,7 @@ log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
 # Set the ResourceManager summary log level and appender
 # Set the ResourceManager summary log level and appender
 #yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
 #yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
 
 
-# Appender for ResourceManager Application Summary Log - rolled daily
+# Appender for ResourceManager Application Summary Log
 # Requires the following properties to be set
 # Requires the following properties to be set
 #    - hadoop.log.dir (Hadoop Log directory)
 #    - hadoop.log.dir (Hadoop Log directory)
 #    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
 #    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
@@ -182,8 +204,9 @@ log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
 
 
 #log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
 #log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
 #log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
 #log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
-#log4j.appender.RMSUMMARY=org.apache.log4j.DailyRollingFileAppender
+#log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
 #log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
 #log4j.appender.RMSUMMARY.File=${hadoop.log.dir}/${yarn.server.resourcemanager.appsummary.log.file}
+#log4j.appender.RMSUMMARY.MaxFileSize=256MB
+#log4j.appender.RMSUMMARY.MaxBackupIndex=20
 #log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
 #log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
 #log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
 #log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-#log4j.appender.RMSUMMARY.DatePattern=.yyyy-MM-dd

+ 0 - 2
hadoop-common-project/hadoop-common/src/site/apt/DeprecatedProperties.apt.vm

@@ -86,8 +86,6 @@ Deprecated Properties
 *---+---+
 *---+---+
 |dfs.socket.timeout | dfs.client.socket-timeout
 |dfs.socket.timeout | dfs.client.socket-timeout
 *---+---+
 *---+---+
-|dfs.upgrade.permission | dfs.namenode.upgrade.permission
-*---+---+
 |dfs.write.packet.size | dfs.client-write-packet-size
 |dfs.write.packet.size | dfs.client-write-packet-size
 *---+---+
 *---+---+
 |fs.checkpoint.dir | dfs.namenode.checkpoint.dir
 |fs.checkpoint.dir | dfs.namenode.checkpoint.dir

+ 67 - 20
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestFailoverController.java

@@ -25,11 +25,13 @@ import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.verify;
 import static org.mockito.Mockito.verify;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.ha.TestNodeFencer.AlwaysSucceedFencer;
 import org.apache.hadoop.ha.TestNodeFencer.AlwaysSucceedFencer;
 import org.apache.hadoop.ha.TestNodeFencer.AlwaysFailFencer;
 import org.apache.hadoop.ha.TestNodeFencer.AlwaysFailFencer;
 import static org.apache.hadoop.ha.TestNodeFencer.setupFencer;
 import static org.apache.hadoop.ha.TestNodeFencer.setupFencer;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.test.MockitoUtil;
 
 
 import org.junit.Test;
 import org.junit.Test;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
@@ -40,6 +42,8 @@ import static org.junit.Assert.*;
 public class TestFailoverController {
 public class TestFailoverController {
   private InetSocketAddress svc1Addr = new InetSocketAddress("svc1", 1234); 
   private InetSocketAddress svc1Addr = new InetSocketAddress("svc1", 1234); 
   private InetSocketAddress svc2Addr = new InetSocketAddress("svc2", 5678);
   private InetSocketAddress svc2Addr = new InetSocketAddress("svc2", 5678);
+  
+  private Configuration conf = new Configuration();
 
 
   HAServiceStatus STATE_NOT_READY = new HAServiceStatus(HAServiceState.STANDBY)
   HAServiceStatus STATE_NOT_READY = new HAServiceStatus(HAServiceState.STANDBY)
       .setNotReadyToBecomeActive("injected not ready");
       .setNotReadyToBecomeActive("injected not ready");
@@ -51,13 +55,13 @@ public class TestFailoverController {
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
 
 
     AlwaysSucceedFencer.fenceCalled = 0;
     AlwaysSucceedFencer.fenceCalled = 0;
-    FailoverController.failover(svc1, svc2, false, false);
+    doFailover(svc1, svc2, false, false);
     assertEquals(0, TestNodeFencer.AlwaysSucceedFencer.fenceCalled);
     assertEquals(0, TestNodeFencer.AlwaysSucceedFencer.fenceCalled);
     assertEquals(HAServiceState.STANDBY, svc1.state);
     assertEquals(HAServiceState.STANDBY, svc1.state);
     assertEquals(HAServiceState.ACTIVE, svc2.state);
     assertEquals(HAServiceState.ACTIVE, svc2.state);
 
 
     AlwaysSucceedFencer.fenceCalled = 0;
     AlwaysSucceedFencer.fenceCalled = 0;
-    FailoverController.failover(svc2, svc1, false, false);
+    doFailover(svc2, svc1, false, false);
     assertEquals(0, TestNodeFencer.AlwaysSucceedFencer.fenceCalled);
     assertEquals(0, TestNodeFencer.AlwaysSucceedFencer.fenceCalled);
     assertEquals(HAServiceState.ACTIVE, svc1.state);
     assertEquals(HAServiceState.ACTIVE, svc1.state);
     assertEquals(HAServiceState.STANDBY, svc2.state);
     assertEquals(HAServiceState.STANDBY, svc2.state);
@@ -69,7 +73,7 @@ public class TestFailoverController {
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
 
 
-    FailoverController.failover(svc1, svc2, false, false);
+    doFailover(svc1, svc2, false, false);
     assertEquals(HAServiceState.STANDBY, svc1.state);
     assertEquals(HAServiceState.STANDBY, svc1.state);
     assertEquals(HAServiceState.ACTIVE, svc2.state);
     assertEquals(HAServiceState.ACTIVE, svc2.state);
   }
   }
@@ -81,7 +85,7 @@ public class TestFailoverController {
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
 
 
     try {
     try {
-      FailoverController.failover(svc1, svc2, false, false);
+      doFailover(svc1, svc2, false, false);
       fail("Can't failover to an already active service");
       fail("Can't failover to an already active service");
     } catch (FailoverFailedException ffe) {
     } catch (FailoverFailedException ffe) {
       // Expected
       // Expected
@@ -102,7 +106,7 @@ public class TestFailoverController {
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
 
 
     try {
     try {
-      FailoverController.failover(svc1, svc2, false, false);
+      doFailover(svc1, svc2, false, false);
       fail("Can't failover when access is denied");
       fail("Can't failover when access is denied");
     } catch (FailoverFailedException ffe) {
     } catch (FailoverFailedException ffe) {
       assertTrue(ffe.getCause().getMessage().contains("Access denied"));
       assertTrue(ffe.getCause().getMessage().contains("Access denied"));
@@ -118,7 +122,7 @@ public class TestFailoverController {
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
 
 
     try {
     try {
-      FailoverController.failover(svc1, svc2, false, false);
+      doFailover(svc1, svc2, false, false);
       fail("Can't failover to a service that's not ready");
       fail("Can't failover to a service that's not ready");
     } catch (FailoverFailedException ffe) {
     } catch (FailoverFailedException ffe) {
       // Expected
       // Expected
@@ -131,7 +135,7 @@ public class TestFailoverController {
     assertEquals(HAServiceState.STANDBY, svc2.state);
     assertEquals(HAServiceState.STANDBY, svc2.state);
 
 
     // Forcing it means we ignore readyToBecomeActive
     // Forcing it means we ignore readyToBecomeActive
-    FailoverController.failover(svc1, svc2, false, true);
+    doFailover(svc1, svc2, false, true);
     assertEquals(HAServiceState.STANDBY, svc1.state);
     assertEquals(HAServiceState.STANDBY, svc1.state);
     assertEquals(HAServiceState.ACTIVE, svc2.state);
     assertEquals(HAServiceState.ACTIVE, svc2.state);
   }
   }
@@ -145,7 +149,7 @@ public class TestFailoverController {
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
 
 
     try {
     try {
-      FailoverController.failover(svc1, svc2, false, false);
+      doFailover(svc1, svc2, false, false);
       fail("Failover to unhealthy service");
       fail("Failover to unhealthy service");
     } catch (FailoverFailedException ffe) {
     } catch (FailoverFailedException ffe) {
       // Expected
       // Expected
@@ -165,7 +169,7 @@ public class TestFailoverController {
 
 
     AlwaysSucceedFencer.fenceCalled = 0;
     AlwaysSucceedFencer.fenceCalled = 0;
     try {
     try {
-      FailoverController.failover(svc1, svc2, false, false);
+      doFailover(svc1, svc2, false, false);
     } catch (FailoverFailedException ffe) {
     } catch (FailoverFailedException ffe) {
       fail("Faulty active prevented failover");
       fail("Faulty active prevented failover");
     }
     }
@@ -188,7 +192,7 @@ public class TestFailoverController {
 
 
     AlwaysFailFencer.fenceCalled = 0;
     AlwaysFailFencer.fenceCalled = 0;
     try {
     try {
-      FailoverController.failover(svc1, svc2, false, false);
+      doFailover(svc1, svc2, false, false);
       fail("Failed over even though fencing failed");
       fail("Failed over even though fencing failed");
     } catch (FailoverFailedException ffe) {
     } catch (FailoverFailedException ffe) {
       // Expected
       // Expected
@@ -208,7 +212,7 @@ public class TestFailoverController {
 
 
     AlwaysFailFencer.fenceCalled = 0;
     AlwaysFailFencer.fenceCalled = 0;
     try {
     try {
-      FailoverController.failover(svc1, svc2, true, false);
+      doFailover(svc1, svc2, true, false);
       fail("Failed over even though fencing requested and failed");
       fail("Failed over even though fencing requested and failed");
     } catch (FailoverFailedException ffe) {
     } catch (FailoverFailedException ffe) {
       // Expected
       // Expected
@@ -232,16 +236,26 @@ public class TestFailoverController {
           .defaultAnswer(new ThrowsException(
           .defaultAnswer(new ThrowsException(
               new IOException("Could not connect to host")))
               new IOException("Could not connect to host")))
           .extraInterfaces(Closeable.class));
           .extraInterfaces(Closeable.class));
-    Mockito.doReturn(errorThrowingProxy).when(svc1).getProxy();
+    Mockito.doNothing().when((Closeable)errorThrowingProxy).close();
+
+    Mockito.doReturn(errorThrowingProxy).when(svc1).getProxy(
+        Mockito.<Configuration>any(),
+        Mockito.anyInt());
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
 
 
     try {
     try {
-      FailoverController.failover(svc1, svc2, false, false);
+      doFailover(svc1, svc2, false, false);
     } catch (FailoverFailedException ffe) {
     } catch (FailoverFailedException ffe) {
       fail("Non-existant active prevented failover");
       fail("Non-existant active prevented failover");
     }
     }
-
+    // Verify that the proxy created to try to make it go to standby
+    // gracefully used the right rpc timeout
+    Mockito.verify(svc1).getProxy(
+        Mockito.<Configuration>any(),
+        Mockito.eq(
+          CommonConfigurationKeys.HA_FC_GRACEFUL_FENCE_TIMEOUT_DEFAULT));
+        
     // Don't check svc1 because we can't reach it, but that's OK, it's been fenced.
     // Don't check svc1 because we can't reach it, but that's OK, it's been fenced.
     assertEquals(HAServiceState.ACTIVE, svc2.state);
     assertEquals(HAServiceState.ACTIVE, svc2.state);
   }
   }
@@ -256,7 +270,7 @@ public class TestFailoverController {
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
 
 
     try {
     try {
-      FailoverController.failover(svc1, svc2, false, false);
+      doFailover(svc1, svc2, false, false);
       fail("Failed over to a non-existant standby");
       fail("Failed over to a non-existant standby");
     } catch (FailoverFailedException ffe) {
     } catch (FailoverFailedException ffe) {
       // Expected
       // Expected
@@ -274,7 +288,7 @@ public class TestFailoverController {
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
 
 
     try {
     try {
-      FailoverController.failover(svc1, svc2, false, false);
+      doFailover(svc1, svc2, false, false);
       fail("Failover to already active service");
       fail("Failover to already active service");
     } catch (FailoverFailedException ffe) {
     } catch (FailoverFailedException ffe) {
       // Expected
       // Expected
@@ -296,7 +310,7 @@ public class TestFailoverController {
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
 
 
     try {
     try {
-      FailoverController.failover(svc1, svc2, true, false);
+      doFailover(svc1, svc2, true, false);
       fail("Failed over to service that won't transition to active");
       fail("Failed over to service that won't transition to active");
     } catch (FailoverFailedException ffe) {
     } catch (FailoverFailedException ffe) {
       // Expected
       // Expected
@@ -318,7 +332,7 @@ public class TestFailoverController {
     AlwaysSucceedFencer.fenceCalled = 0;
     AlwaysSucceedFencer.fenceCalled = 0;
 
 
     try {
     try {
-      FailoverController.failover(svc1, svc2, false, false);
+      doFailover(svc1, svc2, false, false);
       fail("Failed over to service that won't transition to active");
       fail("Failed over to service that won't transition to active");
     } catch (FailoverFailedException ffe) {
     } catch (FailoverFailedException ffe) {
       // Expected
       // Expected
@@ -342,7 +356,7 @@ public class TestFailoverController {
     AlwaysFailFencer.fenceCalled = 0;
     AlwaysFailFencer.fenceCalled = 0;
 
 
     try {
     try {
-      FailoverController.failover(svc1, svc2, false, false);
+      doFailover(svc1, svc2, false, false);
       fail("Failed over to service that won't transition to active");
       fail("Failed over to service that won't transition to active");
     } catch (FailoverFailedException ffe) {
     } catch (FailoverFailedException ffe) {
       // Expected
       // Expected
@@ -368,7 +382,7 @@ public class TestFailoverController {
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
     svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
 
 
     try {
     try {
-      FailoverController.failover(svc1, svc2, false, false);
+      doFailover(svc1, svc2, false, false);
       fail("Failover to already active service");
       fail("Failover to already active service");
     } catch (FailoverFailedException ffe) {
     } catch (FailoverFailedException ffe) {
       // Expected
       // Expected
@@ -377,4 +391,37 @@ public class TestFailoverController {
     assertEquals(HAServiceState.STANDBY, svc1.state);
     assertEquals(HAServiceState.STANDBY, svc1.state);
     assertEquals(HAServiceState.STANDBY, svc2.state);
     assertEquals(HAServiceState.STANDBY, svc2.state);
   }
   }
+
+  @Test
+  public void testSelfFailoverFails() throws Exception {
+    DummyHAService svc1 = new DummyHAService(HAServiceState.ACTIVE, svc1Addr);
+    DummyHAService svc2 = new DummyHAService(HAServiceState.STANDBY, svc2Addr);
+    svc1.fencer = svc2.fencer = setupFencer(AlwaysSucceedFencer.class.getName());
+    AlwaysSucceedFencer.fenceCalled = 0;
+
+    try {
+      doFailover(svc1, svc1, false, false);
+      fail("Can't failover to yourself");
+    } catch (FailoverFailedException ffe) {
+      // Expected
+    }
+    assertEquals(0, TestNodeFencer.AlwaysSucceedFencer.fenceCalled);
+    assertEquals(HAServiceState.ACTIVE, svc1.state);
+
+    try {
+      doFailover(svc2, svc2, false, false);
+      fail("Can't failover to yourself");
+    } catch (FailoverFailedException ffe) {
+      // Expected
+    }
+    assertEquals(0, TestNodeFencer.AlwaysSucceedFencer.fenceCalled);
+    assertEquals(HAServiceState.STANDBY, svc2.state);
+  }
+  
+  private void doFailover(HAServiceTarget tgt1, HAServiceTarget tgt2,
+      boolean forceFence, boolean forceActive) throws FailoverFailedException {
+    FailoverController fc = new FailoverController(conf);
+    fc.failover(tgt1, tgt2, forceFence, forceActive);
+  }
+
 }
 }

+ 12 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java

@@ -499,6 +499,18 @@ public class TestNetUtils {
     assertEquals("scheme://host.a.b/path", uri.toString());
     assertEquals("scheme://host.a.b/path", uri.toString());
   }
   }
   
   
+  @Test
+  public void testGetHostNameOfIP() {
+    assertNull(NetUtils.getHostNameOfIP(null));
+    assertNull(NetUtils.getHostNameOfIP(""));
+    assertNull(NetUtils.getHostNameOfIP("crazytown"));
+    assertNull(NetUtils.getHostNameOfIP("127.0.0.1:"));   // no port
+    assertNull(NetUtils.getHostNameOfIP("127.0.0.1:-1")); // bogus port
+    assertNull(NetUtils.getHostNameOfIP("127.0.0.1:A"));  // bogus port
+    assertNotNull(NetUtils.getHostNameOfIP("127.0.0.1"));
+    assertNotNull(NetUtils.getHostNameOfIP("127.0.0.1:1"));
+  }
+
   private <T> void assertBetterArrayEquals(T[] expect, T[]got) {
   private <T> void assertBetterArrayEquals(T[] expect, T[]got) {
     String expectStr = StringUtils.join(expect, ", ");
     String expectStr = StringUtils.join(expect, ", ");
     String gotStr = StringUtils.join(got, ", ");
     String gotStr = StringUtils.join(got, ", ");

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/sbin/httpfs.sh

@@ -55,8 +55,8 @@ if [ "${1}" = "stop" ]; then
 fi
 fi
 
 
 if [ "${HTTPFS_SILENT}" != "true" ]; then
 if [ "${HTTPFS_SILENT}" != "true" ]; then
-  ${HTTPFS_CATALINA_HOME}/bin/catalina.sh "$@"
+  exec ${HTTPFS_CATALINA_HOME}/bin/catalina.sh "$@"
 else
 else
-  ${HTTPFS_CATALINA_HOME}/bin/catalina.sh "$@" > /dev/null
+  exec ${HTTPFS_CATALINA_HOME}/bin/catalina.sh "$@" > /dev/null
 fi
 fi
 
 

+ 32 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -117,6 +117,12 @@ Release 2.0.0 - UNRELEASED
 
 
     HDFS-2303. Unbundle jsvc. (Roman Shaposhnik and Mingjie Lai via eli)
     HDFS-2303. Unbundle jsvc. (Roman Shaposhnik and Mingjie Lai via eli)
 
 
+    HDFS-3137. Bump LAST_UPGRADABLE_LAYOUT_VERSION to -16. (eli)
+    
+    HDFS-3138. Move DatanodeInfo#ipcPort to DatanodeID. (eli)
+
+    HDFS-3164. Move DatanodeInfo#hostName to DatanodeID. (eli)
+
   NEW FEATURES
   NEW FEATURES
 
 
     HDFS-2978. The NameNode should expose name dir statuses via JMX. (atm)
     HDFS-2978. The NameNode should expose name dir statuses via JMX. (atm)
@@ -171,6 +177,8 @@ Release 2.0.0 - UNRELEASED
     DistributedFileSystem to @InterfaceAudience.LimitedPrivate.
     DistributedFileSystem to @InterfaceAudience.LimitedPrivate.
     (harsh via szetszwo)
     (harsh via szetszwo)
 
 
+    HDFS-3167. CLI-based driver for MiniDFSCluster. (Henry Robinson via atm)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HDFS-2018. Move all journal stream management code into one place.
     HDFS-2018. Move all journal stream management code into one place.
@@ -279,6 +287,15 @@ Release 2.0.0 - UNRELEASED
 
 
     HDFS-3155. Clean up FSDataset implemenation related code.  (szetszwo)
     HDFS-3155. Clean up FSDataset implemenation related code.  (szetszwo)
 
 
+    HDFS-3158. LiveNodes member of NameNodeMXBean should list non-DFS used
+    space and capacity per DN. (atm)
+
+    HDFS-3172. dfs.upgrade.permission is dead code. (eli)
+
+    HDFS-3171. The DatanodeID "name" field is overloaded. (eli)
+
+    HDFS-3144. Refactor DatanodeID#getName by use. (eli)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
     HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
@@ -366,6 +383,15 @@ Release 2.0.0 - UNRELEASED
 
 
     HDFS-3143. TestGetBlocks.testGetBlocks is failing. (Arpit Gupta via atm)
     HDFS-3143. TestGetBlocks.testGetBlocks is failing. (Arpit Gupta via atm)
 
 
+    HDFS-3142. TestHDFSCLI.testAll is failing. (Brandon Li via atm)
+
+    HDFS-3070. HDFS balancer doesn't ensure that hdfs-site.xml is loaded. (atm)
+
+    HDFS-2995. start-dfs.sh should only start the 2NN for namenodes
+    with dfs.namenode.secondary.http-address configured. (eli)
+
+    HDFS-3174. Fix assert in TestPendingDataNodeMessages. (eli)
+
   BREAKDOWN OF HDFS-1623 SUBTASKS
   BREAKDOWN OF HDFS-1623 SUBTASKS
 
 
     HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
     HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
@@ -713,6 +739,9 @@ Release 0.23.2 - UNRELEASED
 
 
     HDFS-3104. Add tests for HADOOP-8175. (Daryn Sharp via szetszwo)
     HDFS-3104. Add tests for HADOOP-8175. (Daryn Sharp via szetszwo)
 
 
+    HDFS-3066. Cap space usage of default log4j rolling policy.
+    (Patrick Hunt via eli)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -764,6 +793,9 @@ Release 0.23.2 - UNRELEASED
 
 
     HDFS-3101. Cannot read empty file using WebHDFS.  (szetszwo)
     HDFS-3101. Cannot read empty file using WebHDFS.  (szetszwo)
 
 
+    HDFS-3160. httpfs should exec catalina instead of forking it.
+    (Roman Shaposhnik via eli)
+
 Release 0.23.1 - 2012-02-17 
 Release 0.23.1 - 2012-02-17 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs

@@ -120,7 +120,7 @@ export CLASSPATH=$CLASSPATH
 
 
 #turn security logger on the namenode
 #turn security logger on the namenode
 if [ $COMMAND = "namenode" ]; then
 if [ $COMMAND = "namenode" ]; then
-  HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,DRFAS}"
+  HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS}"
 else
 else
   HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
   HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
 fi
 fi

+ 7 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh

@@ -76,11 +76,13 @@ fi
 
 
 SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>&-)
 SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>&-)
 
 
-echo "Starting secondary namenodes [$SECONDARY_NAMENODES]"
+if [ -n "$SECONDARY_NAMENODES" ]; then
+  echo "Starting secondary namenodes [$SECONDARY_NAMENODES]"
 
 
-"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
-    --config "$HADOOP_CONF_DIR" \
-    --hostnames "$SECONDARY_NAMENODES" \
-    --script "$bin/hdfs" start secondarynamenode
+  "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+      --config "$HADOOP_CONF_DIR" \
+      --hostnames "$SECONDARY_NAMENODES" \
+      --script "$bin/hdfs" start secondarynamenode
+fi
 
 
 # eof
 # eof

+ 7 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh

@@ -52,11 +52,13 @@ fi
 
 
 SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>&-)
 SECONDARY_NAMENODES=$($HADOOP_PREFIX/bin/hdfs getconf -secondarynamenodes 2>&-)
 
 
-echo "Stopping secondary namenodes [$SECONDARY_NAMENODES]"
+if [ -n "$SECONDARY_NAMENODES" ]; then
+  echo "Stopping secondary namenodes [$SECONDARY_NAMENODES]"
 
 
-"$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
-    --config "$HADOOP_CONF_DIR" \
-    --hostnames "$SECONDARY_NAMENODES" \
-    --script "$bin/hdfs" stop secondarynamenode
+  "$HADOOP_PREFIX/sbin/hadoop-daemons.sh" \
+      --config "$HADOOP_CONF_DIR" \
+      --hostnames "$SECONDARY_NAMENODES" \
+      --script "$bin/hdfs" stop secondarynamenode
+fi
 
 
 # eof
 # eof

+ 0 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml

@@ -239,11 +239,6 @@ to the web server.</p>
 	<br />The name of the group of super-users.
 	<br />The name of the group of super-users.
 	</li>
 	</li>
 
 
-	<li><code>dfs.namenode.upgrade.permission = 0777</code>
-	<br />The choice of initial mode during upgrade. The <em>x</em> permission is <em>never</em> set for files. 
-		For configuration files, the decimal value <em>511<sub>10</sub></em> may be used.
-    </li>
-    
 	<li><code>fs.permissions.umask-mode = 022</code>
 	<li><code>fs.permissions.umask-mode = 022</code>
     <br />The <code>umask</code> used when creating files and directories. For configuration files, the decimal 
     <br />The <code>umask</code> used when creating files and directories. For configuration files, the decimal 
 		value <em>18<sub>10</sub></em> may be used.
 		value <em>18<sub>10</sub></em> may be used.

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java

@@ -240,7 +240,7 @@ class BlockReaderLocal implements BlockReader {
   private static BlockLocalPathInfo getBlockPathInfo(ExtendedBlock blk,
   private static BlockLocalPathInfo getBlockPathInfo(ExtendedBlock blk,
       DatanodeInfo node, Configuration conf, int timeout,
       DatanodeInfo node, Configuration conf, int timeout,
       Token<BlockTokenIdentifier> token) throws IOException {
       Token<BlockTokenIdentifier> token) throws IOException {
-    LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node.ipcPort);
+    LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node.getIpcPort());
     BlockLocalPathInfo pathinfo = null;
     BlockLocalPathInfo pathinfo = null;
     ClientDatanodeProtocol proxy = localDatanodeInfo.getDatanodeProxy(node,
     ClientDatanodeProtocol proxy = localDatanodeInfo.getDatanodeProxy(node,
         conf, timeout);
         conf, timeout);

+ 7 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -1340,7 +1340,8 @@ public class DFSClient implements java.io.Closeable {
           //connect to a datanode
           //connect to a datanode
           sock = socketFactory.createSocket();
           sock = socketFactory.createSocket();
           NetUtils.connect(sock,
           NetUtils.connect(sock,
-              NetUtils.createSocketAddr(datanodes[j].getName()), timeout);
+              NetUtils.createSocketAddr(datanodes[j].getXferAddr()),
+              timeout);
           sock.setSoTimeout(timeout);
           sock.setSoTimeout(timeout);
 
 
           out = new DataOutputStream(
           out = new DataOutputStream(
@@ -1349,7 +1350,7 @@ public class DFSClient implements java.io.Closeable {
           in = new DataInputStream(NetUtils.getInputStream(sock));
           in = new DataInputStream(NetUtils.getInputStream(sock));
 
 
           if (LOG.isDebugEnabled()) {
           if (LOG.isDebugEnabled()) {
-            LOG.debug("write to " + datanodes[j].getName() + ": "
+            LOG.debug("write to " + datanodes[j] + ": "
                 + Op.BLOCK_CHECKSUM + ", block=" + block);
                 + Op.BLOCK_CHECKSUM + ", block=" + block);
           }
           }
           // get block MD5
           // get block MD5
@@ -1364,7 +1365,7 @@ public class DFSClient implements java.io.Closeable {
               if (LOG.isDebugEnabled()) {
               if (LOG.isDebugEnabled()) {
                 LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM "
                 LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM "
                     + "for file " + src + " for block " + block
                     + "for file " + src + " for block " + block
-                    + " from datanode " + datanodes[j].getName()
+                    + " from datanode " + datanodes[j]
                     + ". Will retry the block once.");
                     + ". Will retry the block once.");
               }
               }
               lastRetriedIndex = i;
               lastRetriedIndex = i;
@@ -1374,7 +1375,7 @@ public class DFSClient implements java.io.Closeable {
               break;
               break;
             } else {
             } else {
               throw new IOException("Bad response " + reply + " for block "
               throw new IOException("Bad response " + reply + " for block "
-                  + block + " from datanode " + datanodes[j].getName());
+                  + block + " from datanode " + datanodes[j]);
             }
             }
           }
           }
           
           
@@ -1409,12 +1410,10 @@ public class DFSClient implements java.io.Closeable {
               LOG.debug("set bytesPerCRC=" + bytesPerCRC
               LOG.debug("set bytesPerCRC=" + bytesPerCRC
                   + ", crcPerBlock=" + crcPerBlock);
                   + ", crcPerBlock=" + crcPerBlock);
             }
             }
-            LOG.debug("got reply from " + datanodes[j].getName()
-                + ": md5=" + md5);
+            LOG.debug("got reply from " + datanodes[j] + ": md5=" + md5);
           }
           }
         } catch (IOException ie) {
         } catch (IOException ie) {
-          LOG.warn("src=" + src + ", datanodes[" + j + "].getName()="
-              + datanodes[j].getName(), ie);
+          LOG.warn("src=" + src + ", datanodes["+j+"]=" + datanodes[j], ie);
         } finally {
         } finally {
           IOUtils.closeStream(in);
           IOUtils.closeStream(in);
           IOUtils.closeStream(out);
           IOUtils.closeStream(out);

+ 0 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -107,8 +107,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final long    DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT = 3600;
   public static final long    DFS_NAMENODE_CHECKPOINT_PERIOD_DEFAULT = 3600;
   public static final String  DFS_NAMENODE_CHECKPOINT_TXNS_KEY = "dfs.namenode.checkpoint.txns";
   public static final String  DFS_NAMENODE_CHECKPOINT_TXNS_KEY = "dfs.namenode.checkpoint.txns";
   public static final long    DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT = 40000;
   public static final long    DFS_NAMENODE_CHECKPOINT_TXNS_DEFAULT = 40000;
-  public static final String  DFS_NAMENODE_UPGRADE_PERMISSION_KEY = "dfs.namenode.upgrade.permission";
-  public static final int     DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT = 00777;
   public static final String  DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY = "dfs.namenode.heartbeat.recheck-interval";
   public static final String  DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY = "dfs.namenode.heartbeat.recheck-interval";
   public static final int     DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT = 5*60*1000;
   public static final int     DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT = 5*60*1000;
   public static final String  DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.client.https.keystore.resource";
   public static final String  DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY = "dfs.client.https.keystore.resource";

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java

@@ -543,7 +543,7 @@ public class DFSInputStream extends FSInputStream implements ByteBufferReadable
         return reader.doRead(blockReader, off, len);
         return reader.doRead(blockReader, off, len);
       } catch ( ChecksumException ce ) {
       } catch ( ChecksumException ce ) {
         DFSClient.LOG.warn("Found Checksum error for "
         DFSClient.LOG.warn("Found Checksum error for "
-            + getCurrentBlock() + " from " + currentNode.getName()
+            + getCurrentBlock() + " from " + currentNode
             + " at " + ce.getPos());        
             + " at " + ce.getPos());        
         ioe = ce;
         ioe = ce;
         retryCurrentNode = false;
         retryCurrentNode = false;
@@ -671,7 +671,7 @@ public class DFSInputStream extends FSInputStream implements ByteBufferReadable
       try {
       try {
         DatanodeInfo chosenNode = bestNode(nodes, deadNodes);
         DatanodeInfo chosenNode = bestNode(nodes, deadNodes);
         InetSocketAddress targetAddr = 
         InetSocketAddress targetAddr = 
-                          NetUtils.createSocketAddr(chosenNode.getName());
+          NetUtils.createSocketAddr(chosenNode.getXferAddr());
         return new DNAddrPair(chosenNode, targetAddr);
         return new DNAddrPair(chosenNode, targetAddr);
       } catch (IOException ie) {
       } catch (IOException ie) {
         String blockInfo = block.getBlock() + " file=" + src;
         String blockInfo = block.getBlock() + " file=" + src;
@@ -746,7 +746,7 @@ public class DFSInputStream extends FSInputStream implements ByteBufferReadable
       } catch (ChecksumException e) {
       } catch (ChecksumException e) {
         DFSClient.LOG.warn("fetchBlockByteRange(). Got a checksum exception for " +
         DFSClient.LOG.warn("fetchBlockByteRange(). Got a checksum exception for " +
                  src + " at " + block.getBlock() + ":" + 
                  src + " at " + block.getBlock() + ":" + 
-                 e.getPos() + " from " + chosenNode.getName());
+                 e.getPos() + " from " + chosenNode);
         // we want to remember what we have tried
         // we want to remember what we have tried
         addIntoCorruptedBlockMap(block.getBlock(), chosenNode, corruptedBlockMap);
         addIntoCorruptedBlockMap(block.getBlock(), chosenNode, corruptedBlockMap);
       } catch (AccessControlException ex) {
       } catch (AccessControlException ex) {

+ 8 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java

@@ -667,7 +667,7 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
                 throw new IOException("Bad response " + reply +
                 throw new IOException("Bad response " + reply +
                     " for block " + block +
                     " for block " + block +
                     " from datanode " + 
                     " from datanode " + 
-                    targets[i].getName());
+                    targets[i]);
               }
               }
             }
             }
             
             
@@ -898,7 +898,7 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
         if (errorIndex >= 0) {
         if (errorIndex >= 0) {
           StringBuilder pipelineMsg = new StringBuilder();
           StringBuilder pipelineMsg = new StringBuilder();
           for (int j = 0; j < nodes.length; j++) {
           for (int j = 0; j < nodes.length; j++) {
-            pipelineMsg.append(nodes[j].getName());
+            pipelineMsg.append(nodes[j]);
             if (j < nodes.length - 1) {
             if (j < nodes.length - 1) {
               pipelineMsg.append(", ");
               pipelineMsg.append(", ");
             }
             }
@@ -911,7 +911,7 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
           }
           }
           DFSClient.LOG.warn("Error Recovery for block " + block +
           DFSClient.LOG.warn("Error Recovery for block " + block +
               " in pipeline " + pipelineMsg + 
               " in pipeline " + pipelineMsg + 
-              ": bad datanode " + nodes[errorIndex].getName());
+              ": bad datanode " + nodes[errorIndex]);
           failed.add(nodes[errorIndex]);
           failed.add(nodes[errorIndex]);
 
 
           DatanodeInfo[] newnodes = new DatanodeInfo[nodes.length-1];
           DatanodeInfo[] newnodes = new DatanodeInfo[nodes.length-1];
@@ -1005,7 +1005,7 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
       String firstBadLink = "";
       String firstBadLink = "";
       if (DFSClient.LOG.isDebugEnabled()) {
       if (DFSClient.LOG.isDebugEnabled()) {
         for (int i = 0; i < nodes.length; i++) {
         for (int i = 0; i < nodes.length; i++) {
-          DFSClient.LOG.debug("pipeline = " + nodes[i].getName());
+          DFSClient.LOG.debug("pipeline = " + nodes[i]);
         }
         }
       }
       }
 
 
@@ -1061,7 +1061,7 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
         // find the datanode that matches
         // find the datanode that matches
         if (firstBadLink.length() != 0) {
         if (firstBadLink.length() != 0) {
           for (int i = 0; i < nodes.length; i++) {
           for (int i = 0; i < nodes.length; i++) {
-            if (nodes[i].getName().equals(firstBadLink)) {
+            if (nodes[i].getXferAddr().equals(firstBadLink)) {
               errorIndex = i;
               errorIndex = i;
               break;
               break;
             }
             }
@@ -1165,9 +1165,10 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
   static Socket createSocketForPipeline(final DatanodeInfo first,
   static Socket createSocketForPipeline(final DatanodeInfo first,
       final int length, final DFSClient client) throws IOException {
       final int length, final DFSClient client) throws IOException {
     if(DFSClient.LOG.isDebugEnabled()) {
     if(DFSClient.LOG.isDebugEnabled()) {
-      DFSClient.LOG.debug("Connecting to datanode " + first.getName());
+      DFSClient.LOG.debug("Connecting to datanode " + first);
     }
     }
-    final InetSocketAddress isa = NetUtils.createSocketAddr(first.getName());
+    final InetSocketAddress isa =
+      NetUtils.createSocketAddr(first.getXferAddr());
     final Socket sock = client.socketFactory.createSocket();
     final Socket sock = client.socketFactory.createSocket();
     final int timeout = client.getDatanodeReadTimeout(length);
     final int timeout = client.getDatanodeReadTimeout(length);
     NetUtils.connect(sock, isa, timeout);
     NetUtils.connect(sock, isa, timeout);

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -295,16 +295,16 @@ public class DFSUtil {
       assert idx < nrBlocks : "Incorrect index";
       assert idx < nrBlocks : "Incorrect index";
       DatanodeInfo[] locations = blk.getLocations();
       DatanodeInfo[] locations = blk.getLocations();
       String[] hosts = new String[locations.length];
       String[] hosts = new String[locations.length];
-      String[] names = new String[locations.length];
+      String[] xferAddrs = new String[locations.length];
       String[] racks = new String[locations.length];
       String[] racks = new String[locations.length];
       for (int hCnt = 0; hCnt < locations.length; hCnt++) {
       for (int hCnt = 0; hCnt < locations.length; hCnt++) {
         hosts[hCnt] = locations[hCnt].getHostName();
         hosts[hCnt] = locations[hCnt].getHostName();
-        names[hCnt] = locations[hCnt].getName();
-        NodeBase node = new NodeBase(names[hCnt], 
+        xferAddrs[hCnt] = locations[hCnt].getXferAddr();
+        NodeBase node = new NodeBase(xferAddrs[hCnt], 
                                      locations[hCnt].getNetworkLocation());
                                      locations[hCnt].getNetworkLocation());
         racks[hCnt] = node.toString();
         racks[hCnt] = node.toString();
       }
       }
-      blkLocations[idx] = new BlockLocation(names, hosts, racks,
+      blkLocations[idx] = new BlockLocation(xferAddrs, hosts, racks,
                                             blk.getStartOffset(),
                                             blk.getStartOffset(),
                                             blk.getBlockSize(),
                                             blk.getBlockSize(),
                                             blk.isCorrupt());
                                             blk.isCorrupt());

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -688,7 +688,7 @@ public class DistributedFileSystem extends FileSystem {
     lblocks[0] = new LocatedBlock(dataBlock, dataNode);
     lblocks[0] = new LocatedBlock(dataBlock, dataNode);
     LOG.info("Found checksum error in data stream at block="
     LOG.info("Found checksum error in data stream at block="
         + dataBlock + " on datanode="
         + dataBlock + " on datanode="
-        + dataNode[0].getName());
+        + dataNode[0]);
 
 
     // Find block in checksum stream
     // Find block in checksum stream
     DFSClient.DFSDataInputStream dfsSums = (DFSClient.DFSDataInputStream) sums;
     DFSClient.DFSDataInputStream dfsSums = (DFSClient.DFSDataInputStream) sums;
@@ -700,8 +700,7 @@ public class DistributedFileSystem extends FileSystem {
     DatanodeInfo[] sumsNode = {dfsSums.getCurrentDatanode()}; 
     DatanodeInfo[] sumsNode = {dfsSums.getCurrentDatanode()}; 
     lblocks[1] = new LocatedBlock(sumsBlock, sumsNode);
     lblocks[1] = new LocatedBlock(sumsBlock, sumsNode);
     LOG.info("Found checksum error in checksum stream at block="
     LOG.info("Found checksum error in checksum stream at block="
-        + sumsBlock + " on datanode="
-        + sumsNode[0].getName());
+        + sumsBlock + " on datanode=" + sumsNode[0]);
 
 
     // Ask client to delete blocks.
     // Ask client to delete blocks.
     dfs.reportChecksumFailure(f.toString(), lblocks);
     dfs.reportChecksumFailure(f.toString(), lblocks);

+ 0 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java

@@ -86,7 +86,6 @@ public class HdfsConfiguration extends Configuration {
     deprecate("fs.checkpoint.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
     deprecate("fs.checkpoint.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
     deprecate("fs.checkpoint.edits.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
     deprecate("fs.checkpoint.edits.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
     deprecate("fs.checkpoint.period", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY);
     deprecate("fs.checkpoint.period", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY);
-    deprecate("dfs.upgrade.permission", DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_KEY);
     deprecate("heartbeat.recheck.interval", DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY);
     deprecate("heartbeat.recheck.interval", DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY);
     deprecate("dfs.https.client.keystore.resource", DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY);
     deprecate("dfs.https.client.keystore.resource", DFSConfigKeys.DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY);
     deprecate("dfs.https.need.client.auth", DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY);
     deprecate("dfs.https.need.client.auth", DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY);

+ 112 - 64
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java

@@ -24,7 +24,7 @@ import java.io.IOException;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.hdfs.DeprecatedUTF8;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.io.WritableComparable;
 
 
 /**
 /**
@@ -32,22 +32,32 @@ import org.apache.hadoop.io.WritableComparable;
  * Datanodes are identified by how they can be contacted (hostname
  * Datanodes are identified by how they can be contacted (hostname
  * and ports) and their storage ID, a unique number that associates
  * and ports) and their storage ID, a unique number that associates
  * the Datanodes blocks with a particular Datanode.
  * the Datanodes blocks with a particular Datanode.
+ *
+ * {@link DatanodeInfo#getName()} should be used to get the network
+ * location (for topology) of a datanode, instead of using
+ * {@link DatanodeID#getXferAddr()} here. Helpers are defined below
+ * for each context in which a DatanodeID is used.
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 public class DatanodeID implements WritableComparable<DatanodeID> {
 public class DatanodeID implements WritableComparable<DatanodeID> {
   public static final DatanodeID[] EMPTY_ARRAY = {}; 
   public static final DatanodeID[] EMPTY_ARRAY = {}; 
 
 
-  public String name;       // hostname:port (data transfer port)
-  public String storageID;  // unique per cluster storageID
-  protected int infoPort;   // info server port
-  public int ipcPort;       // ipc server port
+  protected String ipAddr;     // IP address
+  protected String hostName;   // hostname
+  protected String storageID;  // unique per cluster storageID
+  protected int xferPort;      // data streaming port
+  protected int infoPort;      // info server port
+  protected int ipcPort;       // IPC server port
 
 
   /** Equivalent to DatanodeID(""). */
   /** Equivalent to DatanodeID(""). */
   public DatanodeID() {this("");}
   public DatanodeID() {this("");}
 
 
-  /** Equivalent to DatanodeID(nodeName, "", -1, -1). */
-  public DatanodeID(String nodeName) {this(nodeName, "", -1, -1);}
+  /** Equivalent to DatanodeID(ipAddr, "", -1, -1, -1). */
+  public DatanodeID(String ipAddr) {this(ipAddr, "", "", -1, -1, -1);}
+
+  /** Equivalent to DatanodeID(ipAddr, "", xferPort, -1, -1). */
+  public DatanodeID(String ipAddr, int xferPort) {this(ipAddr, "", "", xferPort, -1, -1);}
 
 
   /**
   /**
    * DatanodeID copy constructor
    * DatanodeID copy constructor
@@ -55,29 +65,43 @@ public class DatanodeID implements WritableComparable<DatanodeID> {
    * @param from
    * @param from
    */
    */
   public DatanodeID(DatanodeID from) {
   public DatanodeID(DatanodeID from) {
-    this(from.getName(),
+    this(from.getIpAddr(),
+        from.getHostName(),
         from.getStorageID(),
         from.getStorageID(),
+        from.getXferPort(),
         from.getInfoPort(),
         from.getInfoPort(),
         from.getIpcPort());
         from.getIpcPort());
   }
   }
   
   
   /**
   /**
    * Create DatanodeID
    * Create DatanodeID
-   * @param nodeName (hostname:portNumber) 
+   * @param ipAddr IP
+   * @param hostName hostname
    * @param storageID data storage ID
    * @param storageID data storage ID
+   * @param xferPort data transfer port
    * @param infoPort info server port 
    * @param infoPort info server port 
    * @param ipcPort ipc server port
    * @param ipcPort ipc server port
    */
    */
-  public DatanodeID(String nodeName, String storageID,
-      int infoPort, int ipcPort) {
-    this.name = nodeName;
+  public DatanodeID(String ipAddr, String hostName, String storageID,
+      int xferPort, int infoPort, int ipcPort) {
+    this.ipAddr = ipAddr;
+    this.hostName = hostName;
     this.storageID = storageID;
     this.storageID = storageID;
+    this.xferPort = xferPort;
     this.infoPort = infoPort;
     this.infoPort = infoPort;
     this.ipcPort = ipcPort;
     this.ipcPort = ipcPort;
   }
   }
   
   
-  public void setName(String name) {
-    this.name = name;
+  public void setIpAddr(String ipAddr) {
+    this.ipAddr = ipAddr;
+  }
+
+  public void setHostName(String hostName) {
+    this.hostName = hostName;
+  }
+
+  public void setXferPort(int xferPort) {
+    this.xferPort = xferPort;
   }
   }
 
 
   public void setInfoPort(int infoPort) {
   public void setInfoPort(int infoPort) {
@@ -87,60 +111,79 @@ public class DatanodeID implements WritableComparable<DatanodeID> {
   public void setIpcPort(int ipcPort) {
   public void setIpcPort(int ipcPort) {
     this.ipcPort = ipcPort;
     this.ipcPort = ipcPort;
   }
   }
-  
+
+  public void setStorageID(String storageID) {
+    this.storageID = storageID;
+  }
+
   /**
   /**
-   * @return hostname:portNumber.
+   * @return ipAddr;
    */
    */
-  public String getName() {
-    return name;
+  public String getIpAddr() {
+    return ipAddr;
   }
   }
-  
+
   /**
   /**
-   * @return data storage ID.
+   * @return hostname
    */
    */
-  public String getStorageID() {
-    return this.storageID;
+  public String getHostName() {
+    return hostName;
   }
   }
 
 
   /**
   /**
-   * @return infoPort (the port at which the HTTP server bound to)
+   * @return IP:xferPort string
    */
    */
-  public int getInfoPort() {
-    return infoPort;
+  public String getXferAddr() {
+    return ipAddr + ":" + xferPort;
   }
   }
 
 
   /**
   /**
-   * @return ipcPort (the port at which the IPC server bound to)
+   * @return IP:ipcPort string
    */
    */
-  public int getIpcPort() {
-    return ipcPort;
+  public String getIpcAddr() {
+    return ipAddr + ":" + ipcPort;
   }
   }
 
 
   /**
   /**
-   * sets the data storage ID.
+   * @return IP:infoPort string
    */
    */
-  public void setStorageID(String storageID) {
-    this.storageID = storageID;
+  public String getInfoAddr() {
+    return ipAddr + ":" + infoPort;
   }
   }
 
 
   /**
   /**
-   * @return hostname and no :portNumber.
+   * @return hostname:xferPort
    */
    */
-  public String getHost() {
-    int colon = name.indexOf(":");
-    if (colon < 0) {
-      return name;
-    } else {
-      return name.substring(0, colon);
-    }
+  public String getXferAddrWithHostname() {
+    return hostName + ":" + xferPort;
   }
   }
-  
-  public int getPort() {
-    int colon = name.indexOf(":");
-    if (colon < 0) {
-      return 50010; // default port.
-    }
-    return Integer.parseInt(name.substring(colon+1));
+
+  /**
+   * @return data storage ID.
+   */
+  public String getStorageID() {
+    return storageID;
+  }
+
+  /**
+   * @return xferPort (the port for data streaming)
+   */
+  public int getXferPort() {
+    return xferPort;
+  }
+
+  /**
+   * @return infoPort (the port at which the HTTP server bound to)
+   */
+  public int getInfoPort() {
+    return infoPort;
+  }
+
+  /**
+   * @return ipcPort (the port at which the IPC server bound to)
+   */
+  public int getIpcPort() {
+    return ipcPort;
   }
   }
 
 
   public boolean equals(Object to) {
   public boolean equals(Object to) {
@@ -150,16 +193,16 @@ public class DatanodeID implements WritableComparable<DatanodeID> {
     if (!(to instanceof DatanodeID)) {
     if (!(to instanceof DatanodeID)) {
       return false;
       return false;
     }
     }
-    return (name.equals(((DatanodeID)to).getName()) &&
+    return (getXferAddr().equals(((DatanodeID)to).getXferAddr()) &&
             storageID.equals(((DatanodeID)to).getStorageID()));
             storageID.equals(((DatanodeID)to).getStorageID()));
   }
   }
   
   
   public int hashCode() {
   public int hashCode() {
-    return name.hashCode()^ storageID.hashCode();
+    return getXferAddr().hashCode()^ storageID.hashCode();
   }
   }
   
   
   public String toString() {
   public String toString() {
-    return name;
+    return getXferAddr();
   }
   }
   
   
   /**
   /**
@@ -167,39 +210,44 @@ public class DatanodeID implements WritableComparable<DatanodeID> {
    * Note that this does not update storageID.
    * Note that this does not update storageID.
    */
    */
   public void updateRegInfo(DatanodeID nodeReg) {
   public void updateRegInfo(DatanodeID nodeReg) {
-    name = nodeReg.getName();
+    ipAddr = nodeReg.getIpAddr();
+    hostName = nodeReg.getHostName();
+    xferPort = nodeReg.getXferPort();
     infoPort = nodeReg.getInfoPort();
     infoPort = nodeReg.getInfoPort();
     ipcPort = nodeReg.getIpcPort();
     ipcPort = nodeReg.getIpcPort();
-    // update any more fields added in future.
   }
   }
     
     
-  /** Comparable.
-   * Basis of compare is the String name (host:portNumber) only.
+  /**
+   * Compare based on data transfer address.
+   *
    * @param that
    * @param that
-   * @return as specified by Comparable.
+   * @return as specified by Comparable
    */
    */
   public int compareTo(DatanodeID that) {
   public int compareTo(DatanodeID that) {
-    return name.compareTo(that.getName());
+    return getXferAddr().compareTo(that.getXferAddr());
   }
   }
 
 
-  /////////////////////////////////////////////////
-  // Writable
-  /////////////////////////////////////////////////
   @Override
   @Override
   public void write(DataOutput out) throws IOException {
   public void write(DataOutput out) throws IOException {
-    DeprecatedUTF8.writeString(out, name);
-    DeprecatedUTF8.writeString(out, storageID);
+    Text.writeString(out, ipAddr);
+    Text.writeString(out, hostName);
+    Text.writeString(out, storageID);
+    out.writeShort(xferPort);
     out.writeShort(infoPort);
     out.writeShort(infoPort);
+    out.writeShort(ipcPort);
   }
   }
 
 
   @Override
   @Override
   public void readFields(DataInput in) throws IOException {
   public void readFields(DataInput in) throws IOException {
-    name = DeprecatedUTF8.readString(in);
-    storageID = DeprecatedUTF8.readString(in);
-    // the infoPort read could be negative, if the port is a large number (more
+    ipAddr = Text.readString(in);
+    hostName = Text.readString(in);
+    storageID = Text.readString(in);
+    // The port read could be negative, if the port is a large number (more
     // than 15 bits in storage size (but less than 16 bits).
     // than 15 bits in storage size (but less than 16 bits).
     // So chop off the first two bytes (and hence the signed bits) before 
     // So chop off the first two bytes (and hence the signed bits) before 
     // setting the field.
     // setting the field.
-    this.infoPort = in.readShort() & 0x0000ffff;
+    xferPort = in.readShort() & 0x0000ffff;
+    infoPort = in.readShort() & 0x0000ffff;
+    ipcPort = in.readShort() & 0x0000ffff;
   }
   }
 }
 }

+ 18 - 38
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java

@@ -51,9 +51,6 @@ public class DatanodeInfo extends DatanodeID implements Node {
   protected long lastUpdate;
   protected long lastUpdate;
   protected int xceiverCount;
   protected int xceiverCount;
   protected String location = NetworkTopology.DEFAULT_RACK;
   protected String location = NetworkTopology.DEFAULT_RACK;
-
-  // The FQDN of the IP associated with the Datanode's hostname
-  protected String hostName = null;
   
   
   // Datanode administrative states
   // Datanode administrative states
   public enum AdminStates {
   public enum AdminStates {
@@ -110,30 +107,27 @@ public class DatanodeInfo extends DatanodeID implements Node {
     this.adminState = null;    
     this.adminState = null;    
   }
   }
   
   
-  public DatanodeInfo(DatanodeID nodeID, String location, String hostName) {
+  public DatanodeInfo(DatanodeID nodeID, String location) {
     this(nodeID);
     this(nodeID);
     this.location = location;
     this.location = location;
-    this.hostName = hostName;
   }
   }
   
   
-  public DatanodeInfo(DatanodeID nodeID, String location, String hostName,
+  public DatanodeInfo(DatanodeID nodeID, String location,
       final long capacity, final long dfsUsed, final long remaining,
       final long capacity, final long dfsUsed, final long remaining,
       final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
       final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
       final AdminStates adminState) {
       final AdminStates adminState) {
-    this(nodeID.getName(), nodeID.getStorageID(), nodeID.getInfoPort(), nodeID
-        .getIpcPort(), capacity, dfsUsed, remaining, blockPoolUsed, lastUpdate,
-        xceiverCount, location, hostName, adminState);
+    this(nodeID.getIpAddr(), nodeID.getHostName(), nodeID.getStorageID(), nodeID.getXferPort(),
+        nodeID.getInfoPort(), nodeID.getIpcPort(), capacity, dfsUsed, remaining,
+        blockPoolUsed, lastUpdate, xceiverCount, location, adminState);
   }
   }
 
 
   /** Constructor */
   /** Constructor */
-  public DatanodeInfo(final String name, final String storageID,
-      final int infoPort, final int ipcPort,
+  public DatanodeInfo(final String name, final String hostName,
+      final String storageID, final int xferPort, final int infoPort, final int ipcPort,
       final long capacity, final long dfsUsed, final long remaining,
       final long capacity, final long dfsUsed, final long remaining,
       final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
       final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
-      final String networkLocation, final String hostName,
-      final AdminStates adminState) {
-    super(name, storageID, infoPort, ipcPort);
-
+      final String networkLocation, final AdminStates adminState) {
+    super(name, hostName, storageID, xferPort, infoPort, ipcPort);
     this.capacity = capacity;
     this.capacity = capacity;
     this.dfsUsed = dfsUsed;
     this.dfsUsed = dfsUsed;
     this.remaining = remaining;
     this.remaining = remaining;
@@ -141,10 +135,14 @@ public class DatanodeInfo extends DatanodeID implements Node {
     this.lastUpdate = lastUpdate;
     this.lastUpdate = lastUpdate;
     this.xceiverCount = xceiverCount;
     this.xceiverCount = xceiverCount;
     this.location = networkLocation;
     this.location = networkLocation;
-    this.hostName = hostName;
     this.adminState = adminState;
     this.adminState = adminState;
   }
   }
   
   
+  /** Network location name */
+  public String getName() {
+    return getXferAddr();
+  }
+  
   /** The raw capacity. */
   /** The raw capacity. */
   public long getCapacity() { return capacity; }
   public long getCapacity() { return capacity; }
   
   
@@ -221,15 +219,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
   public synchronized void setNetworkLocation(String location) {
   public synchronized void setNetworkLocation(String location) {
     this.location = NodeBase.normalize(location);
     this.location = NodeBase.normalize(location);
   }
   }
-  
-  public String getHostName() {
-    return (hostName == null || hostName.length()==0) ? getHost() : hostName;
-  }
-  
-  public void setHostName(String host) {
-    hostName = host;
-  }
-  
+    
   /** A formatted string for reporting the status of the DataNode. */
   /** A formatted string for reporting the status of the DataNode. */
   public String getDatanodeReport() {
   public String getDatanodeReport() {
     StringBuilder buffer = new StringBuilder();
     StringBuilder buffer = new StringBuilder();
@@ -239,9 +229,9 @@ public class DatanodeInfo extends DatanodeID implements Node {
     long nonDFSUsed = getNonDfsUsed();
     long nonDFSUsed = getNonDfsUsed();
     float usedPercent = getDfsUsedPercent();
     float usedPercent = getDfsUsedPercent();
     float remainingPercent = getRemainingPercent();
     float remainingPercent = getRemainingPercent();
-    String lookupName = NetUtils.getHostNameOfIP(name);
+    String lookupName = NetUtils.getHostNameOfIP(getName());
 
 
-    buffer.append("Name: "+ name);
+    buffer.append("Name: "+ getName());
     if (lookupName != null) {
     if (lookupName != null) {
       buffer.append(" (" + lookupName + ")");
       buffer.append(" (" + lookupName + ")");
     }
     }
@@ -275,7 +265,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
     long c = getCapacity();
     long c = getCapacity();
     long r = getRemaining();
     long r = getRemaining();
     long u = getDfsUsed();
     long u = getDfsUsed();
-    buffer.append(name);
+    buffer.append(ipAddr);
     if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
     if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
       buffer.append(" "+location);
       buffer.append(" "+location);
     }
     }
@@ -380,10 +370,6 @@ public class DatanodeInfo extends DatanodeID implements Node {
   @Override
   @Override
   public void write(DataOutput out) throws IOException {
   public void write(DataOutput out) throws IOException {
     super.write(out);
     super.write(out);
-
-    //TODO: move it to DatanodeID once DatanodeID is not stored in FSImage
-    out.writeShort(ipcPort);
-
     out.writeLong(capacity);
     out.writeLong(capacity);
     out.writeLong(dfsUsed);
     out.writeLong(dfsUsed);
     out.writeLong(remaining);
     out.writeLong(remaining);
@@ -391,17 +377,12 @@ public class DatanodeInfo extends DatanodeID implements Node {
     out.writeLong(lastUpdate);
     out.writeLong(lastUpdate);
     out.writeInt(xceiverCount);
     out.writeInt(xceiverCount);
     Text.writeString(out, location);
     Text.writeString(out, location);
-    Text.writeString(out, hostName == null? "": hostName);
     WritableUtils.writeEnum(out, getAdminState());
     WritableUtils.writeEnum(out, getAdminState());
   }
   }
 
 
   @Override
   @Override
   public void readFields(DataInput in) throws IOException {
   public void readFields(DataInput in) throws IOException {
     super.readFields(in);
     super.readFields(in);
-
-    //TODO: move it to DatanodeID once DatanodeID is not stored in FSImage
-    this.ipcPort = in.readShort() & 0x0000ffff;
-
     this.capacity = in.readLong();
     this.capacity = in.readLong();
     this.dfsUsed = in.readLong();
     this.dfsUsed = in.readLong();
     this.remaining = in.readLong();
     this.remaining = in.readLong();
@@ -409,7 +390,6 @@ public class DatanodeInfo extends DatanodeID implements Node {
     this.lastUpdate = in.readLong();
     this.lastUpdate = in.readLong();
     this.xceiverCount = in.readInt();
     this.xceiverCount = in.readInt();
     this.location = Text.readString(in);
     this.location = Text.readString(in);
-    this.hostName = Text.readString(in);
     setAdminState(WritableUtils.readEnum(in, AdminStates.class));
     setAdminState(WritableUtils.readEnum(in, AdminStates.class));
   }
   }
 
 

+ 7 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsProtoUtil.java

@@ -84,8 +84,10 @@ public abstract class HdfsProtoUtil {
   private static HdfsProtos.DatanodeIDProto toProto(
   private static HdfsProtos.DatanodeIDProto toProto(
       DatanodeID dni) {
       DatanodeID dni) {
     return HdfsProtos.DatanodeIDProto.newBuilder()
     return HdfsProtos.DatanodeIDProto.newBuilder()
-      .setName(dni.getName())
+      .setIpAddr(dni.getIpAddr())
+      .setHostName(dni.getHostName())
       .setStorageID(dni.getStorageID())
       .setStorageID(dni.getStorageID())
+      .setXferPort(dni.getXferPort())
       .setInfoPort(dni.getInfoPort())
       .setInfoPort(dni.getInfoPort())
       .setIpcPort(dni.getIpcPort())
       .setIpcPort(dni.getIpcPort())
       .build();
       .build();
@@ -93,8 +95,10 @@ public abstract class HdfsProtoUtil {
   
   
   private static DatanodeID fromProto(HdfsProtos.DatanodeIDProto idProto) {
   private static DatanodeID fromProto(HdfsProtos.DatanodeIDProto idProto) {
     return new DatanodeID(
     return new DatanodeID(
-        idProto.getName(),
+        idProto.getIpAddr(),
+        idProto.getHostName(),
         idProto.getStorageID(),
         idProto.getStorageID(),
+        idProto.getXferPort(),
         idProto.getInfoPort(),
         idProto.getInfoPort(),
         idProto.getIpcPort());
         idProto.getIpcPort());
   }
   }
@@ -111,7 +115,6 @@ public abstract class HdfsProtoUtil {
       .setLastUpdate(dni.getLastUpdate())
       .setLastUpdate(dni.getLastUpdate())
       .setXceiverCount(dni.getXceiverCount())
       .setXceiverCount(dni.getXceiverCount())
       .setLocation(dni.getNetworkLocation())
       .setLocation(dni.getNetworkLocation())
-      .setHostName(dni.getHostName())
       .setAdminState(HdfsProtos.DatanodeInfoProto.AdminState.valueOf(
       .setAdminState(HdfsProtos.DatanodeInfoProto.AdminState.valueOf(
           dni.getAdminState().name()))
           dni.getAdminState().name()))
       .build();
       .build();
@@ -119,7 +122,7 @@ public abstract class HdfsProtoUtil {
 
 
   public static DatanodeInfo fromProto(HdfsProtos.DatanodeInfoProto dniProto) {
   public static DatanodeInfo fromProto(HdfsProtos.DatanodeInfoProto dniProto) {
     DatanodeInfo dniObj = new DatanodeInfo(fromProto(dniProto.getId()),
     DatanodeInfo dniObj = new DatanodeInfo(fromProto(dniProto.getId()),
-        dniProto.getLocation(), dniProto.getHostName());
+        dniProto.getLocation());
 
 
     dniObj.setCapacity(dniProto.getCapacity());
     dniObj.setCapacity(dniProto.getCapacity());
     dniObj.setDfsUsed(dniProto.getDfsUsed());
     dniObj.setDfsUsed(dniProto.getDfsUsed());

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/UnregisteredNodeException.java

@@ -45,9 +45,8 @@ public class UnregisteredNodeException extends IOException {
    * @param storedNode data-node stored in the system with this storage id
    * @param storedNode data-node stored in the system with this storage id
    */
    */
   public UnregisteredNodeException(DatanodeID nodeID, DatanodeInfo storedNode) {
   public UnregisteredNodeException(DatanodeID nodeID, DatanodeInfo storedNode) {
-    super("Data node " + nodeID.getName() 
-          + " is attempting to report storage ID "
+    super("Data node " + nodeID + " is attempting to report storage ID " 
           + nodeID.getStorageID() + ". Node " 
           + nodeID.getStorageID() + ". Node " 
-          + storedNode.getName() + " is expected to serve this storage.");
+          + storedNode + " is expected to serve this storage.");
   }
   }
 }
 }

+ 2 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java

@@ -97,8 +97,7 @@ public class ClientDatanodeProtocolTranslatorPB implements
    */
    */
   public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid,
   public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid,
       Configuration conf, int socketTimeout) throws IOException {
       Configuration conf, int socketTimeout) throws IOException {
-    InetSocketAddress addr = NetUtils.createSocketAddr(datanodeid.getHost()
-        + ":" + datanodeid.getIpcPort());
+    InetSocketAddress addr = NetUtils.createSocketAddr(datanodeid.getIpcAddr());
     rpcProxy = createClientDatanodeProtocolProxy(addr,
     rpcProxy = createClientDatanodeProtocolProxy(addr,
         UserGroupInformation.getCurrentUser(), conf,
         UserGroupInformation.getCurrentUser(), conf,
         NetUtils.getDefaultSocketFactory(conf), socketTimeout);
         NetUtils.getDefaultSocketFactory(conf), socketTimeout);
@@ -107,8 +106,7 @@ public class ClientDatanodeProtocolTranslatorPB implements
   static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy(
   static ClientDatanodeProtocolPB createClientDatanodeProtocolProxy(
       DatanodeID datanodeid, Configuration conf, int socketTimeout,
       DatanodeID datanodeid, Configuration conf, int socketTimeout,
       LocatedBlock locatedBlock) throws IOException {
       LocatedBlock locatedBlock) throws IOException {
-    InetSocketAddress addr = NetUtils.createSocketAddr(
-      datanodeid.getHost() + ":" + datanodeid.getIpcPort());
+    InetSocketAddress addr = NetUtils.createSocketAddr(datanodeid.getIpcAddr());
     if (LOG.isDebugEnabled()) {
     if (LOG.isDebugEnabled()) {
       LOG.debug("ClientDatanodeProtocol addr=" + addr);
       LOG.debug("ClientDatanodeProtocol addr=" + addr);
     }
     }

+ 11 - 12
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java

@@ -204,14 +204,18 @@ public class PBHelper {
 
 
   // DatanodeId
   // DatanodeId
   public static DatanodeID convert(DatanodeIDProto dn) {
   public static DatanodeID convert(DatanodeIDProto dn) {
-    return new DatanodeID(dn.getName(), dn.getStorageID(), dn.getInfoPort(),
-        dn.getIpcPort());
+    return new DatanodeID(dn.getIpAddr(), dn.getHostName(), dn.getStorageID(),
+        dn.getXferPort(), dn.getInfoPort(), dn.getIpcPort());
   }
   }
 
 
   public static DatanodeIDProto convert(DatanodeID dn) {
   public static DatanodeIDProto convert(DatanodeID dn) {
-    return DatanodeIDProto.newBuilder().setName(dn.getName())
-        .setInfoPort(dn.getInfoPort()).setIpcPort(dn.getIpcPort())
-        .setStorageID(dn.getStorageID()).build();
+    return DatanodeIDProto.newBuilder()
+        .setIpAddr(dn.getIpAddr())
+        .setHostName(dn.getHostName())
+        .setStorageID(dn.getStorageID())
+        .setXferPort(dn.getXferPort())
+        .setInfoPort(dn.getInfoPort())
+        .setIpcPort(dn.getIpcPort()).build();
   }
   }
 
 
   // Arrays of DatanodeId
   // Arrays of DatanodeId
@@ -442,7 +446,6 @@ public class PBHelper {
     return new DatanodeInfo(
     return new DatanodeInfo(
         PBHelper.convert(di.getId()),
         PBHelper.convert(di.getId()),
         di.hasLocation() ? di.getLocation() : null , 
         di.hasLocation() ? di.getLocation() : null , 
-        di.hasHostName() ? di.getHostName() : null,
         di.getCapacity(),  di.getDfsUsed(),  di.getRemaining(),
         di.getCapacity(),  di.getDfsUsed(),  di.getRemaining(),
         di.getBlockPoolUsed()  ,  di.getLastUpdate() , di.getXceiverCount() ,
         di.getBlockPoolUsed()  ,  di.getLastUpdate() , di.getXceiverCount() ,
         PBHelper.convert(di.getAdminState())); 
         PBHelper.convert(di.getAdminState())); 
@@ -451,9 +454,6 @@ public class PBHelper {
   static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) {
   static public DatanodeInfoProto convertDatanodeInfo(DatanodeInfo di) {
     if (di == null) return null;
     if (di == null) return null;
     DatanodeInfoProto.Builder builder = DatanodeInfoProto.newBuilder();
     DatanodeInfoProto.Builder builder = DatanodeInfoProto.newBuilder();
-    if (di.getHostName() != null) {
-      builder.setHostName(di.getHostName());
-    }
     if (di.getNetworkLocation() != null) {
     if (di.getNetworkLocation() != null) {
       builder.setLocation(di.getNetworkLocation());
       builder.setLocation(di.getNetworkLocation());
     }
     }
@@ -503,7 +503,6 @@ public class PBHelper {
     builder.setAdminState(PBHelper.convert(info.getAdminState()));
     builder.setAdminState(PBHelper.convert(info.getAdminState()));
     builder.setCapacity(info.getCapacity())
     builder.setCapacity(info.getCapacity())
         .setDfsUsed(info.getDfsUsed())
         .setDfsUsed(info.getDfsUsed())
-        .setHostName(info.getHostName())
         .setId(PBHelper.convert((DatanodeID)info))
         .setId(PBHelper.convert((DatanodeID)info))
         .setLastUpdate(info.getLastUpdate())
         .setLastUpdate(info.getLastUpdate())
         .setLocation(info.getNetworkLocation())
         .setLocation(info.getNetworkLocation())
@@ -610,8 +609,8 @@ public class PBHelper {
     DatanodeRegistrationProto.Builder builder = DatanodeRegistrationProto
     DatanodeRegistrationProto.Builder builder = DatanodeRegistrationProto
         .newBuilder();
         .newBuilder();
     return builder.setDatanodeID(PBHelper.convert((DatanodeID) registration))
     return builder.setDatanodeID(PBHelper.convert((DatanodeID) registration))
-        .setStorageInfo(PBHelper.convert(registration.storageInfo))
-        .setKeys(PBHelper.convert(registration.exportedKeys)).build();
+        .setStorageInfo(PBHelper.convert(registration.getStorageInfo()))
+        .setKeys(PBHelper.convert(registration.getExportedKeys())).build();
   }
   }
 
 
   public static DatanodeRegistration convert(DatanodeRegistrationProto proto) {
   public static DatanodeRegistration convert(DatanodeRegistrationProto proto) {

+ 8 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java

@@ -51,6 +51,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -304,8 +305,9 @@ public class Balancer {
       DataOutputStream out = null;
       DataOutputStream out = null;
       DataInputStream in = null;
       DataInputStream in = null;
       try {
       try {
-        sock.connect(NetUtils.createSocketAddr(
-            target.datanode.getName()), HdfsServerConstants.READ_TIMEOUT);
+        sock.connect(
+            NetUtils.createSocketAddr(target.datanode.getXferAddr()),
+            HdfsServerConstants.READ_TIMEOUT);
         sock.setKeepAlive(true);
         sock.setKeepAlive(true);
         out = new DataOutputStream( new BufferedOutputStream(
         out = new DataOutputStream( new BufferedOutputStream(
             sock.getOutputStream(), HdfsConstants.IO_FILE_BUFFER_SIZE));
             sock.getOutputStream(), HdfsConstants.IO_FILE_BUFFER_SIZE));
@@ -586,7 +588,7 @@ public class Balancer {
     /** Add a node task */
     /** Add a node task */
     private void addNodeTask(NodeTask task) {
     private void addNodeTask(NodeTask task) {
       assert (task.datanode != this) :
       assert (task.datanode != this) :
-        "Source and target are the same " + datanode.getName();
+        "Source and target are the same " + datanode;
       incScheduledSize(task.getSize());
       incScheduledSize(task.getSize());
       nodeTasks.add(task);
       nodeTasks.add(task);
     }
     }
@@ -1006,7 +1008,7 @@ public class Balancer {
         targetCandidates.remove();
         targetCandidates.remove();
       }
       }
       LOG.info("Decided to move "+StringUtils.byteDesc(size)+" bytes from "
       LOG.info("Decided to move "+StringUtils.byteDesc(size)+" bytes from "
-          +source.datanode.getName() + " to " + target.datanode.getName());
+          +source.datanode + " to " + target.datanode);
       return true;
       return true;
     }
     }
     return false;
     return false;
@@ -1054,7 +1056,7 @@ public class Balancer {
         sourceCandidates.remove();
         sourceCandidates.remove();
       }
       }
       LOG.info("Decided to move "+StringUtils.byteDesc(size)+" bytes from "
       LOG.info("Decided to move "+StringUtils.byteDesc(size)+" bytes from "
-          +source.datanode.getName() + " to " + target.datanode.getName());
+          +source.datanode + " to " + target.datanode);
       return true;
       return true;
     }
     }
     return false;
     return false;
@@ -1550,7 +1552,7 @@ public class Balancer {
    */
    */
   public static void main(String[] args) {
   public static void main(String[] args) {
     try {
     try {
-      System.exit(ToolRunner.run(null, new Cli(), args));
+      System.exit(ToolRunner.run(new HdfsConfiguration(), new Cli(), args));
     } catch (Throwable e) {
     } catch (Throwable e) {
       LOG.error("Exiting balancer due an exception", e);
       LOG.error("Exiting balancer due an exception", e);
       System.exit(-1);
       System.exit(-1);

+ 32 - 32
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -808,9 +808,9 @@ public class BlockManager {
     final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
     final DatanodeDescriptor node = getDatanodeManager().getDatanode(datanode);
     if (node == null) {
     if (node == null) {
       NameNode.stateChangeLog.warn("BLOCK* getBlocks: "
       NameNode.stateChangeLog.warn("BLOCK* getBlocks: "
-          + "Asking for blocks from an unrecorded node " + datanode.getName());
+          + "Asking for blocks from an unrecorded node " + datanode);
       throw new HadoopIllegalArgumentException(
       throw new HadoopIllegalArgumentException(
-          "Datanode " + datanode.getName() + " not found.");
+          "Datanode " + datanode + " not found.");
     }
     }
 
 
     int numBlocks = node.numBlocks();
     int numBlocks = node.numBlocks();
@@ -882,7 +882,7 @@ public class BlockManager {
         .hasNext();) {
         .hasNext();) {
       DatanodeDescriptor node = it.next();
       DatanodeDescriptor node = it.next();
       invalidateBlocks.add(b, node, false);
       invalidateBlocks.add(b, node, false);
-      datanodes.append(node.getName()).append(" ");
+      datanodes.append(node).append(" ");
     }
     }
     if (datanodes.length() != 0) {
     if (datanodes.length() != 0) {
       NameNode.stateChangeLog.info("BLOCK* addToInvalidates: "
       NameNode.stateChangeLog.info("BLOCK* addToInvalidates: "
@@ -921,7 +921,7 @@ public class BlockManager {
     if (node == null) {
     if (node == null) {
       throw new IOException("Cannot mark block " + 
       throw new IOException("Cannot mark block " + 
                             storedBlock.getBlockName() +
                             storedBlock.getBlockName() +
-                            " as corrupt because datanode " + dn.getName() +
+                            " as corrupt because datanode " + dn +
                             " does not exist. ");
                             " does not exist. ");
     }
     }
 
 
@@ -955,11 +955,11 @@ public class BlockManager {
   private void invalidateBlock(Block blk, DatanodeInfo dn)
   private void invalidateBlock(Block blk, DatanodeInfo dn)
       throws IOException {
       throws IOException {
     NameNode.stateChangeLog.info("BLOCK* invalidateBlock: "
     NameNode.stateChangeLog.info("BLOCK* invalidateBlock: "
-                                 + blk + " on " + dn.getName());
+                                 + blk + " on " + dn);
     DatanodeDescriptor node = getDatanodeManager().getDatanode(dn);
     DatanodeDescriptor node = getDatanodeManager().getDatanode(dn);
     if (node == null) {
     if (node == null) {
       throw new IOException("Cannot invalidate block " + blk
       throw new IOException("Cannot invalidate block " + blk
-          + " because datanode " + dn.getName() + " does not exist.");
+          + " because datanode " + dn + " does not exist.");
     }
     }
 
 
     // Check how many copies we have of the block
     // Check how many copies we have of the block
@@ -977,11 +977,11 @@ public class BlockManager {
       removeStoredBlock(blk, node);
       removeStoredBlock(blk, node);
       if(NameNode.stateChangeLog.isDebugEnabled()) {
       if(NameNode.stateChangeLog.isDebugEnabled()) {
         NameNode.stateChangeLog.debug("BLOCK* invalidateBlocks: "
         NameNode.stateChangeLog.debug("BLOCK* invalidateBlocks: "
-            + blk + " on " + dn.getName() + " listed for deletion.");
+            + blk + " on " + dn + " listed for deletion.");
       }
       }
     } else {
     } else {
       NameNode.stateChangeLog.info("BLOCK* invalidateBlocks: " + blk + " on "
       NameNode.stateChangeLog.info("BLOCK* invalidateBlocks: " + blk + " on "
-          + dn.getName() + " is the only copy and was not deleted.");
+          + dn + " is the only copy and was not deleted.");
     }
     }
   }
   }
 
 
@@ -1224,11 +1224,11 @@ public class BlockManager {
           StringBuilder targetList = new StringBuilder("datanode(s)");
           StringBuilder targetList = new StringBuilder("datanode(s)");
           for (int k = 0; k < targets.length; k++) {
           for (int k = 0; k < targets.length; k++) {
             targetList.append(' ');
             targetList.append(' ');
-            targetList.append(targets[k].getName());
+            targetList.append(targets[k]);
           }
           }
           NameNode.stateChangeLog.info(
           NameNode.stateChangeLog.info(
                   "BLOCK* ask "
                   "BLOCK* ask "
-                  + rw.srcNode.getName() + " to replicate "
+                  + rw.srcNode + " to replicate "
                   + rw.block + " to " + targetList);
                   + rw.block + " to " + targetList);
         }
         }
       }
       }
@@ -1410,15 +1410,15 @@ public class BlockManager {
     try {
     try {
       final DatanodeDescriptor node = datanodeManager.getDatanode(nodeID);
       final DatanodeDescriptor node = datanodeManager.getDatanode(nodeID);
       if (node == null || !node.isAlive) {
       if (node == null || !node.isAlive) {
-        throw new IOException("ProcessReport from dead or unregistered node: "
-                              + nodeID.getName());
+        throw new IOException(
+            "ProcessReport from dead or unregistered node: " + nodeID);
       }
       }
 
 
       // To minimize startup time, we discard any second (or later) block reports
       // To minimize startup time, we discard any second (or later) block reports
       // that we receive while still in startup phase.
       // that we receive while still in startup phase.
       if (namesystem.isInStartupSafeMode() && !node.isFirstBlockReport()) {
       if (namesystem.isInStartupSafeMode() && !node.isFirstBlockReport()) {
         NameNode.stateChangeLog.info("BLOCK* processReport: "
         NameNode.stateChangeLog.info("BLOCK* processReport: "
-            + "discarded non-initial block report from " + nodeID.getName()
+            + "discarded non-initial block report from " + nodeID
             + " because namenode still in startup phase");
             + " because namenode still in startup phase");
         return;
         return;
       }
       }
@@ -1451,7 +1451,7 @@ public class BlockManager {
     // Log the block report processing stats from Namenode perspective
     // Log the block report processing stats from Namenode perspective
     NameNode.getNameNodeMetrics().addBlockReport((int) (endTime - startTime));
     NameNode.getNameNodeMetrics().addBlockReport((int) (endTime - startTime));
     NameNode.stateChangeLog.info("BLOCK* processReport: from "
     NameNode.stateChangeLog.info("BLOCK* processReport: from "
-        + nodeID.getName() + ", blocks: " + newReport.getNumberOfBlocks()
+        + nodeID + ", blocks: " + newReport.getNumberOfBlocks()
         + ", processing time: " + (endTime - startTime) + " msecs");
         + ", processing time: " + (endTime - startTime) + " msecs");
   }
   }
 
 
@@ -1511,7 +1511,7 @@ public class BlockManager {
     }
     }
     for (Block b : toInvalidate) {
     for (Block b : toInvalidate) {
       NameNode.stateChangeLog.info("BLOCK* processReport: block "
       NameNode.stateChangeLog.info("BLOCK* processReport: block "
-          + b + " on " + node.getName() + " size " + b.getNumBytes()
+          + b + " on " + node + " size " + b.getNumBytes()
           + " does not belong to any file.");
           + " does not belong to any file.");
       addToInvalidates(b, node);
       addToInvalidates(b, node);
     }
     }
@@ -1662,7 +1662,7 @@ public class BlockManager {
     
     
     if(LOG.isDebugEnabled()) {
     if(LOG.isDebugEnabled()) {
       LOG.debug("Reported block " + block
       LOG.debug("Reported block " + block
-          + " on " + dn.getName() + " size " + block.getNumBytes()
+          + " on " + dn + " size " + block.getNumBytes()
           + " replicaState = " + reportedState);
           + " replicaState = " + reportedState);
     }
     }
   
   
@@ -1837,7 +1837,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
           // closed. So, ignore this report, assuming we will get a
           // closed. So, ignore this report, assuming we will get a
           // FINALIZED replica later. See HDFS-2791
           // FINALIZED replica later. See HDFS-2791
           LOG.info("Received an RBW replica for block " + storedBlock +
           LOG.info("Received an RBW replica for block " + storedBlock +
-              " on " + dn.getName() + ": ignoring it, since the block is " +
+              " on " + dn + ": ignoring it, since the block is " +
               "complete with the same generation stamp.");
               "complete with the same generation stamp.");
           return null;
           return null;
         } else {
         } else {
@@ -1850,7 +1850,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
     default:
     default:
       String msg = "Unexpected replica state " + reportedState
       String msg = "Unexpected replica state " + reportedState
       + " for block: " + storedBlock + 
       + " for block: " + storedBlock + 
-      " on " + dn.getName() + " size " + storedBlock.getNumBytes();
+      " on " + dn + " size " + storedBlock.getNumBytes();
       // log here at WARN level since this is really a broken HDFS
       // log here at WARN level since this is really a broken HDFS
       // invariant
       // invariant
       LOG.warn(msg);
       LOG.warn(msg);
@@ -1949,7 +1949,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
     if (storedBlock == null || storedBlock.getINode() == null) {
     if (storedBlock == null || storedBlock.getINode() == null) {
       // If this block does not belong to anyfile, then we are done.
       // If this block does not belong to anyfile, then we are done.
       NameNode.stateChangeLog.info("BLOCK* addStoredBlock: " + block + " on "
       NameNode.stateChangeLog.info("BLOCK* addStoredBlock: " + block + " on "
-          + node.getName() + " size " + block.getNumBytes()
+          + node + " size " + block.getNumBytes()
           + " but it does not belong to any file.");
           + " but it does not belong to any file.");
       // we could add this block to invalidate set of this datanode.
       // we could add this block to invalidate set of this datanode.
       // it will happen in next block report otherwise.
       // it will happen in next block report otherwise.
@@ -1972,7 +1972,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
       curReplicaDelta = 0;
       curReplicaDelta = 0;
       NameNode.stateChangeLog.warn("BLOCK* addStoredBlock: "
       NameNode.stateChangeLog.warn("BLOCK* addStoredBlock: "
           + "Redundant addStoredBlock request received for " + storedBlock
           + "Redundant addStoredBlock request received for " + storedBlock
-          + " on " + node.getName() + " size " + storedBlock.getNumBytes());
+          + " on " + node + " size " + storedBlock.getNumBytes());
     }
     }
 
 
     // Now check for completion of blocks and safe block count
     // Now check for completion of blocks and safe block count
@@ -2035,7 +2035,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
     
     
     StringBuilder sb = new StringBuilder(500);
     StringBuilder sb = new StringBuilder(500);
     sb.append("BLOCK* addStoredBlock: blockMap updated: ")
     sb.append("BLOCK* addStoredBlock: blockMap updated: ")
-      .append(node.getName())
+      .append(node)
       .append(" is added to ");
       .append(" is added to ");
     storedBlock.appendStringTo(sb);
     storedBlock.appendStringTo(sb);
     sb.append(" size " )
     sb.append(" size " )
@@ -2069,7 +2069,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
       } catch (IOException e) {
       } catch (IOException e) {
         NameNode.stateChangeLog.info("NameNode.invalidateCorruptReplicas " +
         NameNode.stateChangeLog.info("NameNode.invalidateCorruptReplicas " +
                                       "error in deleting bad block " + blk +
                                       "error in deleting bad block " + blk +
-                                      " on " + node + e);
+                                      " on " + node, e);
         gotException = true;
         gotException = true;
       }
       }
     }
     }
@@ -2335,7 +2335,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
       //
       //
       addToInvalidates(b, cur);
       addToInvalidates(b, cur);
       NameNode.stateChangeLog.info("BLOCK* chooseExcessReplicates: "
       NameNode.stateChangeLog.info("BLOCK* chooseExcessReplicates: "
-                +"("+cur.getName()+", "+b+") is added to invalidated blocks set.");
+                +"("+cur+", "+b+") is added to invalidated blocks set.");
     }
     }
   }
   }
 
 
@@ -2350,7 +2350,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
       excessBlocksCount++;
       excessBlocksCount++;
       if(NameNode.stateChangeLog.isDebugEnabled()) {
       if(NameNode.stateChangeLog.isDebugEnabled()) {
         NameNode.stateChangeLog.debug("BLOCK* addToExcessReplicate:"
         NameNode.stateChangeLog.debug("BLOCK* addToExcessReplicate:"
-            + " (" + dn.getName() + ", " + block
+            + " (" + dn + ", " + block
             + ") is added to excessReplicateMap");
             + ") is added to excessReplicateMap");
       }
       }
     }
     }
@@ -2363,7 +2363,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
   public void removeStoredBlock(Block block, DatanodeDescriptor node) {
   public void removeStoredBlock(Block block, DatanodeDescriptor node) {
     if(NameNode.stateChangeLog.isDebugEnabled()) {
     if(NameNode.stateChangeLog.isDebugEnabled()) {
       NameNode.stateChangeLog.debug("BLOCK* removeStoredBlock: "
       NameNode.stateChangeLog.debug("BLOCK* removeStoredBlock: "
-          + block + " from " + node.getName());
+          + block + " from " + node);
     }
     }
     assert (namesystem.hasWriteLock());
     assert (namesystem.hasWriteLock());
     {
     {
@@ -2476,7 +2476,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
     }
     }
     for (Block b : toInvalidate) {
     for (Block b : toInvalidate) {
       NameNode.stateChangeLog.info("BLOCK* addBlock: block "
       NameNode.stateChangeLog.info("BLOCK* addBlock: block "
-          + b + " on " + node.getName() + " size " + b.getNumBytes()
+          + b + " on " + node + " size " + b.getNumBytes()
           + " does not belong to any file.");
           + " does not belong to any file.");
       addToInvalidates(b, node);
       addToInvalidates(b, node);
     }
     }
@@ -2504,7 +2504,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
         NameNode.stateChangeLog
         NameNode.stateChangeLog
             .warn("BLOCK* processIncrementalBlockReport"
             .warn("BLOCK* processIncrementalBlockReport"
                 + " is received from dead or unregistered node "
                 + " is received from dead or unregistered node "
-                + nodeID.getName());
+                + nodeID);
         throw new IOException(
         throw new IOException(
             "Got incremental block report from unregistered or dead node");
             "Got incremental block report from unregistered or dead node");
       }
       }
@@ -2526,7 +2526,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
           break;
           break;
         default:
         default:
           String msg = 
           String msg = 
-            "Unknown block status code reported by " + nodeID.getName() +
+            "Unknown block status code reported by " + nodeID +
             ": " + rdbi;
             ": " + rdbi;
           NameNode.stateChangeLog.warn(msg);
           NameNode.stateChangeLog.warn(msg);
           assert false : msg; // if assertions are enabled, throw.
           assert false : msg; // if assertions are enabled, throw.
@@ -2535,14 +2535,14 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
         if (NameNode.stateChangeLog.isDebugEnabled()) {
         if (NameNode.stateChangeLog.isDebugEnabled()) {
           NameNode.stateChangeLog.debug("BLOCK* block "
           NameNode.stateChangeLog.debug("BLOCK* block "
               + (rdbi.getStatus()) + ": " + rdbi.getBlock()
               + (rdbi.getStatus()) + ": " + rdbi.getBlock()
-              + " is received from " + nodeID.getName());
+              + " is received from " + nodeID);
         }
         }
       }
       }
     } finally {
     } finally {
       namesystem.writeUnlock();
       namesystem.writeUnlock();
       NameNode.stateChangeLog
       NameNode.stateChangeLog
           .debug("*BLOCK* NameNode.processIncrementalBlockReport: " + "from "
           .debug("*BLOCK* NameNode.processIncrementalBlockReport: " + "from "
-              + nodeID.getName()
+              + nodeID
               +  " receiving: " + receiving + ", "
               +  " receiving: " + receiving + ", "
               + " received: " + received + ", "
               + " received: " + received + ", "
               + " deleted: " + deleted);
               + " deleted: " + deleted);
@@ -2618,7 +2618,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
     StringBuilder nodeList = new StringBuilder();
     StringBuilder nodeList = new StringBuilder();
     while (nodeIter.hasNext()) {
     while (nodeIter.hasNext()) {
       DatanodeDescriptor node = nodeIter.next();
       DatanodeDescriptor node = nodeIter.next();
-      nodeList.append(node.name);
+      nodeList.append(node);
       nodeList.append(" ");
       nodeList.append(" ");
     }
     }
     LOG.info("Block: " + block + ", Expected Replicas: "
     LOG.info("Block: " + block + ", Expected Replicas: "
@@ -2628,7 +2628,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
         + ", excess replicas: " + num.excessReplicas()
         + ", excess replicas: " + num.excessReplicas()
         + ", Is Open File: " + fileINode.isUnderConstruction()
         + ", Is Open File: " + fileINode.isUnderConstruction()
         + ", Datanodes having this block: " + nodeList + ", Current Datanode: "
         + ", Datanodes having this block: " + nodeList + ", Current Datanode: "
-        + srcNode.name + ", Is current datanode decommissioning: "
+        + srcNode + ", Is current datanode decommissioning: "
         + srcNode.isDecommissionInProgress());
         + srcNode.isDecommissionInProgress());
   }
   }
   
   

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CorruptReplicasMap.java

@@ -65,14 +65,14 @@ public class CorruptReplicasMap{
       nodes.add(dn);
       nodes.add(dn);
       NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
       NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
                                    blk.getBlockName() +
                                    blk.getBlockName() +
-                                   " added as corrupt on " + dn.getName() +
+                                   " added as corrupt on " + dn +
                                    " by " + Server.getRemoteIp() +
                                    " by " + Server.getRemoteIp() +
                                    reasonText);
                                    reasonText);
     } else {
     } else {
       NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
       NameNode.stateChangeLog.info("BLOCK NameSystem.addToCorruptReplicasMap: "+
                                    "duplicate requested for " + 
                                    "duplicate requested for " + 
                                    blk.getBlockName() + " to add as corrupt " +
                                    blk.getBlockName() + " to add as corrupt " +
-                                   "on " + dn.getName() +
+                                   "on " + dn +
                                    " by " + Server.getRemoteIp() +
                                    " by " + Server.getRemoteIp() +
                                    reasonText);
                                    reasonText);
     }
     }

+ 2 - 32
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java

@@ -175,19 +175,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
    */
    */
   public DatanodeDescriptor(DatanodeID nodeID, 
   public DatanodeDescriptor(DatanodeID nodeID, 
                             String networkLocation) {
                             String networkLocation) {
-    this(nodeID, networkLocation, null);
-  }
-  
-  /** DatanodeDescriptor constructor
-   * 
-   * @param nodeID id of the data node
-   * @param networkLocation location of the data node in network
-   * @param hostName it could be different from host specified for DatanodeID
-   */
-  public DatanodeDescriptor(DatanodeID nodeID, 
-                            String networkLocation,
-                            String hostName) {
-    this(nodeID, networkLocation, hostName, 0L, 0L, 0L, 0L, 0, 0);
+    this(nodeID, networkLocation, 0L, 0L, 0L, 0L, 0, 0);
   }
   }
   
   
   /** DatanodeDescriptor constructor
   /** DatanodeDescriptor constructor
@@ -223,14 +211,13 @@ public class DatanodeDescriptor extends DatanodeInfo {
    */
    */
   public DatanodeDescriptor(DatanodeID nodeID,
   public DatanodeDescriptor(DatanodeID nodeID,
                             String networkLocation,
                             String networkLocation,
-                            String hostName,
                             long capacity,
                             long capacity,
                             long dfsUsed,
                             long dfsUsed,
                             long remaining,
                             long remaining,
                             long bpused,
                             long bpused,
                             int xceiverCount,
                             int xceiverCount,
                             int failedVolumes) {
                             int failedVolumes) {
-    super(nodeID, networkLocation, hostName);
+    super(nodeID, networkLocation);
     updateHeartbeat(capacity, dfsUsed, remaining, bpused, xceiverCount, 
     updateHeartbeat(capacity, dfsUsed, remaining, bpused, xceiverCount, 
         failedVolumes);
         failedVolumes);
   }
   }
@@ -436,23 +423,6 @@ public class DatanodeDescriptor extends DatanodeInfo {
     }
     }
   }
   }
 
 
-  /** Serialization for FSEditLog */
-  public void readFieldsFromFSEditLog(DataInput in) throws IOException {
-    this.name = DeprecatedUTF8.readString(in);
-    this.storageID = DeprecatedUTF8.readString(in);
-    this.infoPort = in.readShort() & 0x0000ffff;
-
-    this.capacity = in.readLong();
-    this.dfsUsed = in.readLong();
-    this.remaining = in.readLong();
-    this.blockPoolUsed = in.readLong();
-    this.lastUpdate = in.readLong();
-    this.xceiverCount = in.readInt();
-    this.location = Text.readString(in);
-    this.hostName = Text.readString(in);
-    setAdminState(WritableUtils.readEnum(in, AdminStates.class));
-  }
-  
   /**
   /**
    * @return Approximate number of blocks currently scheduled to be written 
    * @return Approximate number of blocks currently scheduled to be written 
    * to this datanode.
    * to this datanode.

+ 66 - 79
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java

@@ -238,7 +238,7 @@ public class DatanodeManager {
     final DatanodeDescriptor node = getDatanode(nodeID.getStorageID());
     final DatanodeDescriptor node = getDatanode(nodeID.getStorageID());
     if (node == null) 
     if (node == null) 
       return null;
       return null;
-    if (!node.getName().equals(nodeID.getName())) {
+    if (!node.getXferAddr().equals(nodeID.getXferAddr())) {
       final UnregisteredNodeException e = new UnregisteredNodeException(
       final UnregisteredNodeException e = new UnregisteredNodeException(
           nodeID, node);
           nodeID, node);
       NameNode.stateChangeLog.fatal("BLOCK* NameSystem.getDatanode: "
       NameNode.stateChangeLog.fatal("BLOCK* NameSystem.getDatanode: "
@@ -270,7 +270,7 @@ public class DatanodeManager {
     networktopology.remove(nodeInfo);
     networktopology.remove(nodeInfo);
 
 
     if (LOG.isDebugEnabled()) {
     if (LOG.isDebugEnabled()) {
-      LOG.debug("remove datanode " + nodeInfo.getName());
+      LOG.debug("remove datanode " + nodeInfo);
     }
     }
     namesystem.checkSafeMode();
     namesystem.checkSafeMode();
   }
   }
@@ -288,7 +288,7 @@ public class DatanodeManager {
         removeDatanode(descriptor);
         removeDatanode(descriptor);
       } else {
       } else {
         NameNode.stateChangeLog.warn("BLOCK* removeDatanode: "
         NameNode.stateChangeLog.warn("BLOCK* removeDatanode: "
-                                     + node.getName() + " does not exist");
+                                     + node + " does not exist");
       }
       }
     } finally {
     } finally {
       namesystem.writeUnlock();
       namesystem.writeUnlock();
@@ -306,7 +306,7 @@ public class DatanodeManager {
         }
         }
         if (d != null && isDatanodeDead(d)) {
         if (d != null && isDatanodeDead(d)) {
           NameNode.stateChangeLog.info(
           NameNode.stateChangeLog.info(
-              "BLOCK* removeDeadDatanode: lost heartbeat from " + d.getName());
+              "BLOCK* removeDeadDatanode: lost heartbeat from " + d);
           removeDatanode(d);
           removeDatanode(d);
         }
         }
       }
       }
@@ -332,19 +332,19 @@ public class DatanodeManager {
 
 
     if (LOG.isDebugEnabled()) {
     if (LOG.isDebugEnabled()) {
       LOG.debug(getClass().getSimpleName() + ".addDatanode: "
       LOG.debug(getClass().getSimpleName() + ".addDatanode: "
-          + "node " + node.getName() + " is added to datanodeMap.");
+          + "node " + node + " is added to datanodeMap.");
     }
     }
   }
   }
 
 
   /** Physically remove node from datanodeMap. */
   /** Physically remove node from datanodeMap. */
-  private void wipeDatanode(final DatanodeID node) throws IOException {
+  private void wipeDatanode(final DatanodeID node) {
     final String key = node.getStorageID();
     final String key = node.getStorageID();
     synchronized (datanodeMap) {
     synchronized (datanodeMap) {
       host2DatanodeMap.remove(datanodeMap.remove(key));
       host2DatanodeMap.remove(datanodeMap.remove(key));
     }
     }
     if (LOG.isDebugEnabled()) {
     if (LOG.isDebugEnabled()) {
       LOG.debug(getClass().getSimpleName() + ".wipeDatanode("
       LOG.debug(getClass().getSimpleName() + ".wipeDatanode("
-          + node.getName() + "): storage " + key 
+          + node + "): storage " + key 
           + " is removed from datanodeMap.");
           + " is removed from datanodeMap.");
     }
     }
   }
   }
@@ -354,7 +354,7 @@ public class DatanodeManager {
     List<String> names = new ArrayList<String>(1);
     List<String> names = new ArrayList<String>(1);
     if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
     if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
       // get the node's IP address
       // get the node's IP address
-      names.add(node.getHost());
+      names.add(node.getIpAddr());
     } else {
     } else {
       // get the node's host name
       // get the node's host name
       String hostName = node.getHostName();
       String hostName = node.getHostName();
@@ -376,12 +376,12 @@ public class DatanodeManager {
     node.setNetworkLocation(networkLocation);
     node.setNetworkLocation(networkLocation);
   }
   }
 
 
-  private boolean inHostsList(DatanodeID node, String ipAddr) {
-     return checkInList(node, ipAddr, hostsReader.getHosts(), false);
+  private boolean inHostsList(DatanodeID node) {
+     return checkInList(node, hostsReader.getHosts(), false);
   }
   }
   
   
-  private boolean inExcludedHostsList(DatanodeID node, String ipAddr) {
-    return checkInList(node, ipAddr, hostsReader.getExcludedHosts(), true);
+  private boolean inExcludedHostsList(DatanodeID node) {
+    return checkInList(node, hostsReader.getExcludedHosts(), true);
   }
   }
 
 
   /**
   /**
@@ -419,7 +419,7 @@ public class DatanodeManager {
     
     
     for (Iterator<DatanodeDescriptor> it = nodeList.iterator(); it.hasNext();) {
     for (Iterator<DatanodeDescriptor> it = nodeList.iterator(); it.hasNext();) {
       DatanodeDescriptor node = it.next();
       DatanodeDescriptor node = it.next();
-      if ((!inHostsList(node, null)) && (!inExcludedHostsList(node, null))
+      if ((!inHostsList(node)) && (!inExcludedHostsList(node))
           && node.isDecommissioned()) {
           && node.isDecommissioned()) {
         // Include list is not empty, an existing datanode does not appear
         // Include list is not empty, an existing datanode does not appear
         // in both include or exclude lists and it has been decommissioned.
         // in both include or exclude lists and it has been decommissioned.
@@ -430,37 +430,23 @@ public class DatanodeManager {
   }
   }
 
 
   /**
   /**
-   * Check if the given node (of DatanodeID or ipAddress) is in the (include or
-   * exclude) list.  If ipAddress in null, check only based upon the given 
-   * DatanodeID.  If ipAddress is not null, the ipAddress should refers to the
-   * same host that given DatanodeID refers to.
+   * Check if the given DatanodeID is in the given (include or exclude) list.
    * 
    * 
-   * @param node, the host DatanodeID
-   * @param ipAddress, if not null, should refers to the same host
-   *                   that DatanodeID refers to
-   * @param hostsList, the list of hosts in the include/exclude file
-   * @param isExcludeList, boolean, true if this is the exclude list
-   * @return boolean, if in the list
+   * @param node the DatanodeID to check
+   * @param hostsList the list of hosts in the include/exclude file
+   * @param isExcludeList true if this is the exclude list
+   * @return true if the node is in the list, false otherwise
    */
    */
   private static boolean checkInList(final DatanodeID node,
   private static boolean checkInList(final DatanodeID node,
-      final String ipAddress,
       final Set<String> hostsList,
       final Set<String> hostsList,
       final boolean isExcludeList) {
       final boolean isExcludeList) {
     final InetAddress iaddr;
     final InetAddress iaddr;
-    if (ipAddress != null) {
-      try {
-        iaddr = InetAddress.getByName(ipAddress);
-      } catch (UnknownHostException e) {
-        LOG.warn("Unknown ip address: " + ipAddress, e);
-        return isExcludeList;
-      }
-    } else {
-      try {
-        iaddr = InetAddress.getByName(node.getHost());
-      } catch (UnknownHostException e) {
-        LOG.warn("Unknown host: " + node.getHost(), e);
-        return isExcludeList;
-      }
+
+    try {
+      iaddr = InetAddress.getByName(node.getIpAddr());
+    } catch (UnknownHostException e) {
+      LOG.warn("Unknown IP: " + node.getIpAddr(), e);
+      return isExcludeList;
     }
     }
 
 
     // if include list is empty, host is in include list
     // if include list is empty, host is in include list
@@ -470,10 +456,10 @@ public class DatanodeManager {
     return // compare ipaddress(:port)
     return // compare ipaddress(:port)
     (hostsList.contains(iaddr.getHostAddress().toString()))
     (hostsList.contains(iaddr.getHostAddress().toString()))
         || (hostsList.contains(iaddr.getHostAddress().toString() + ":"
         || (hostsList.contains(iaddr.getHostAddress().toString() + ":"
-            + node.getPort()))
+            + node.getXferPort()))
         // compare hostname(:port)
         // compare hostname(:port)
         || (hostsList.contains(iaddr.getHostName()))
         || (hostsList.contains(iaddr.getHostName()))
-        || (hostsList.contains(iaddr.getHostName() + ":" + node.getPort()))
+        || (hostsList.contains(iaddr.getHostName() + ":" + node.getXferPort()))
         || ((node instanceof DatanodeInfo) && hostsList
         || ((node instanceof DatanodeInfo) && hostsList
             .contains(((DatanodeInfo) node).getHostName()));
             .contains(((DatanodeInfo) node).getHostName()));
   }
   }
@@ -481,10 +467,9 @@ public class DatanodeManager {
   /**
   /**
    * Decommission the node if it is in exclude list.
    * Decommission the node if it is in exclude list.
    */
    */
-  private void checkDecommissioning(DatanodeDescriptor nodeReg, String ipAddr) 
-    throws IOException {
+  private void checkDecommissioning(DatanodeDescriptor nodeReg, String ipAddr) { 
     // If the registered node is in exclude list, then decommission it
     // If the registered node is in exclude list, then decommission it
-    if (inExcludedHostsList(nodeReg, ipAddr)) {
+    if (inExcludedHostsList(nodeReg)) {
       startDecommission(nodeReg);
       startDecommission(nodeReg);
     }
     }
   }
   }
@@ -499,16 +484,16 @@ public class DatanodeManager {
     if (node.isDecommissionInProgress()) {
     if (node.isDecommissionInProgress()) {
       if (!blockManager.isReplicationInProgress(node)) {
       if (!blockManager.isReplicationInProgress(node)) {
         node.setDecommissioned();
         node.setDecommissioned();
-        LOG.info("Decommission complete for node " + node.getName());
+        LOG.info("Decommission complete for node " + node);
       }
       }
     }
     }
     return node.isDecommissioned();
     return node.isDecommissioned();
   }
   }
 
 
   /** Start decommissioning the specified datanode. */
   /** Start decommissioning the specified datanode. */
-  private void startDecommission(DatanodeDescriptor node) throws IOException {
+  private void startDecommission(DatanodeDescriptor node) {
     if (!node.isDecommissionInProgress() && !node.isDecommissioned()) {
     if (!node.isDecommissionInProgress() && !node.isDecommissioned()) {
-      LOG.info("Start Decommissioning node " + node.getName() + " with " + 
+      LOG.info("Start Decommissioning node " + node + " with " + 
           node.numBlocks() +  " blocks.");
           node.numBlocks() +  " blocks.");
       heartbeatManager.startDecommission(node);
       heartbeatManager.startDecommission(node);
       node.decommissioningStatus.setStartTime(now());
       node.decommissioningStatus.setStartTime(now());
@@ -519,9 +504,9 @@ public class DatanodeManager {
   }
   }
 
 
   /** Stop decommissioning the specified datanodes. */
   /** Stop decommissioning the specified datanodes. */
-  void stopDecommission(DatanodeDescriptor node) throws IOException {
+  void stopDecommission(DatanodeDescriptor node) {
     if (node.isDecommissionInProgress() || node.isDecommissioned()) {
     if (node.isDecommissionInProgress() || node.isDecommissioned()) {
-      LOG.info("Stop Decommissioning node " + node.getName());
+      LOG.info("Stop Decommissioning node " + node);
       heartbeatManager.stopDecommission(node);
       heartbeatManager.stopDecommission(node);
       blockManager.processOverReplicatedBlocksOnReCommission(node);
       blockManager.processOverReplicatedBlocksOnReCommission(node);
     }
     }
@@ -545,41 +530,44 @@ public class DatanodeManager {
     return newID;
     return newID;
   }
   }
 
 
-  public void registerDatanode(DatanodeRegistration nodeReg
-      ) throws IOException {
+  /**
+   * Register the given datanode with the namenode. NB: the given
+   * registration is mutated and given back to the datanode.
+   *
+   * @param nodeReg the datanode registration
+   * @throws DisallowedDatanodeException if the registration request is
+   *    denied because the datanode does not match includes/excludes
+   */
+  public void registerDatanode(DatanodeRegistration nodeReg)
+      throws DisallowedDatanodeException {
     String dnAddress = Server.getRemoteAddress();
     String dnAddress = Server.getRemoteAddress();
     if (dnAddress == null) {
     if (dnAddress == null) {
       // Mostly called inside an RPC.
       // Mostly called inside an RPC.
       // But if not, use address passed by the data-node.
       // But if not, use address passed by the data-node.
-      dnAddress = nodeReg.getHost();
-    }      
+      dnAddress = nodeReg.getIpAddr();
+    }
+
+    // Update the IP to the address of the RPC request that is
+    // registering this datanode.
+    nodeReg.setIpAddr(dnAddress);
+    nodeReg.setExportedKeys(blockManager.getBlockKeys());
 
 
     // Checks if the node is not on the hosts list.  If it is not, then
     // Checks if the node is not on the hosts list.  If it is not, then
     // it will be disallowed from registering. 
     // it will be disallowed from registering. 
-    if (!inHostsList(nodeReg, dnAddress)) {
+    if (!inHostsList(nodeReg)) {
       throw new DisallowedDatanodeException(nodeReg);
       throw new DisallowedDatanodeException(nodeReg);
     }
     }
-
-    String hostName = nodeReg.getHost();
-      
-    // update the datanode's name with ip:port
-    DatanodeID dnReg = new DatanodeID(dnAddress + ":" + nodeReg.getPort(),
-                                      nodeReg.getStorageID(),
-                                      nodeReg.getInfoPort(),
-                                      nodeReg.getIpcPort());
-    nodeReg.updateRegInfo(dnReg);
-    nodeReg.exportedKeys = blockManager.getBlockKeys();
       
       
     NameNode.stateChangeLog.info("BLOCK* NameSystem.registerDatanode: "
     NameNode.stateChangeLog.info("BLOCK* NameSystem.registerDatanode: "
-        + "node registration from " + nodeReg.getName()
+        + "node registration from " + nodeReg
         + " storage " + nodeReg.getStorageID());
         + " storage " + nodeReg.getStorageID());
 
 
     DatanodeDescriptor nodeS = datanodeMap.get(nodeReg.getStorageID());
     DatanodeDescriptor nodeS = datanodeMap.get(nodeReg.getStorageID());
-    DatanodeDescriptor nodeN = getDatanodeByHost(nodeReg.getName());
+    DatanodeDescriptor nodeN = getDatanodeByHost(nodeReg.getXferAddr());
       
       
     if (nodeN != null && nodeN != nodeS) {
     if (nodeN != null && nodeN != nodeS) {
       NameNode.LOG.info("BLOCK* NameSystem.registerDatanode: "
       NameNode.LOG.info("BLOCK* NameSystem.registerDatanode: "
-                        + "node from name: " + nodeN.getName());
+                        + "node from name: " + nodeN);
       // nodeN previously served a different data storage, 
       // nodeN previously served a different data storage, 
       // which is not served by anybody anymore.
       // which is not served by anybody anymore.
       removeDatanode(nodeN);
       removeDatanode(nodeN);
@@ -608,15 +596,14 @@ public class DatanodeManager {
           but this is might not work if VERSION file format has changed 
           but this is might not work if VERSION file format has changed 
        */        
        */        
         NameNode.stateChangeLog.info( "BLOCK* NameSystem.registerDatanode: "
         NameNode.stateChangeLog.info( "BLOCK* NameSystem.registerDatanode: "
-                                      + "node " + nodeS.getName()
-                                      + " is replaced by " + nodeReg.getName() + 
+                                      + "node " + nodeS
+                                      + " is replaced by " + nodeReg + 
                                       " with the same storageID " +
                                       " with the same storageID " +
                                       nodeReg.getStorageID());
                                       nodeReg.getStorageID());
       }
       }
       // update cluster map
       // update cluster map
       getNetworkTopology().remove(nodeS);
       getNetworkTopology().remove(nodeS);
       nodeS.updateRegInfo(nodeReg);
       nodeS.updateRegInfo(nodeReg);
-      nodeS.setHostName(hostName);
       nodeS.setDisallowed(false); // Node is in the include list
       nodeS.setDisallowed(false); // Node is in the include list
       
       
       // resolve network location
       // resolve network location
@@ -630,11 +617,11 @@ public class DatanodeManager {
     } 
     } 
 
 
     // this is a new datanode serving a new data storage
     // this is a new datanode serving a new data storage
-    if (nodeReg.getStorageID().equals("")) {
+    if ("".equals(nodeReg.getStorageID())) {
       // this data storage has never been registered
       // this data storage has never been registered
       // it is either empty or was created by pre-storageID version of DFS
       // it is either empty or was created by pre-storageID version of DFS
-      nodeReg.storageID = newStorageID();
-      if(NameNode.stateChangeLog.isDebugEnabled()) {
+      nodeReg.setStorageID(newStorageID());
+      if (NameNode.stateChangeLog.isDebugEnabled()) {
         NameNode.stateChangeLog.debug(
         NameNode.stateChangeLog.debug(
             "BLOCK* NameSystem.registerDatanode: "
             "BLOCK* NameSystem.registerDatanode: "
             + "new storageID " + nodeReg.getStorageID() + " assigned.");
             + "new storageID " + nodeReg.getStorageID() + " assigned.");
@@ -642,7 +629,7 @@ public class DatanodeManager {
     }
     }
     // register new datanode
     // register new datanode
     DatanodeDescriptor nodeDescr 
     DatanodeDescriptor nodeDescr 
-      = new DatanodeDescriptor(nodeReg, NetworkTopology.DEFAULT_RACK, hostName);
+      = new DatanodeDescriptor(nodeReg, NetworkTopology.DEFAULT_RACK);
     resolveNetworkLocation(nodeDescr);
     resolveNetworkLocation(nodeDescr);
     addDatanode(nodeDescr);
     addDatanode(nodeDescr);
     checkDecommissioning(nodeDescr, dnAddress);
     checkDecommissioning(nodeDescr, dnAddress);
@@ -690,10 +677,10 @@ public class DatanodeManager {
   private void refreshDatanodes() throws IOException {
   private void refreshDatanodes() throws IOException {
     for(DatanodeDescriptor node : datanodeMap.values()) {
     for(DatanodeDescriptor node : datanodeMap.values()) {
       // Check if not include.
       // Check if not include.
-      if (!inHostsList(node, null)) {
+      if (!inHostsList(node)) {
         node.setDisallowed(true); // case 2.
         node.setDisallowed(true); // case 2.
       } else {
       } else {
-        if (inExcludedHostsList(node, null)) {
+        if (inExcludedHostsList(node)) {
           startDecommission(node); // case 3.
           startDecommission(node); // case 3.
         } else {
         } else {
           stopDecommission(node); // case 4.
           stopDecommission(node); // case 4.
@@ -820,16 +807,16 @@ public class DatanodeManager {
         }
         }
         //Remove any form of the this datanode in include/exclude lists.
         //Remove any form of the this datanode in include/exclude lists.
         try {
         try {
-          InetAddress inet = InetAddress.getByName(dn.getHost());
+          InetAddress inet = InetAddress.getByName(dn.getIpAddr());
           // compare hostname(:port)
           // compare hostname(:port)
           mustList.remove(inet.getHostName());
           mustList.remove(inet.getHostName());
-          mustList.remove(inet.getHostName()+":"+dn.getPort());
+          mustList.remove(inet.getHostName()+":"+dn.getXferPort());
           // compare ipaddress(:port)
           // compare ipaddress(:port)
           mustList.remove(inet.getHostAddress().toString());
           mustList.remove(inet.getHostAddress().toString());
-          mustList.remove(inet.getHostAddress().toString()+ ":" +dn.getPort());
+          mustList.remove(inet.getHostAddress().toString()+ ":" +dn.getXferPort());
         } catch ( UnknownHostException e ) {
         } catch ( UnknownHostException e ) {
           mustList.remove(dn.getName());
           mustList.remove(dn.getName());
-          mustList.remove(dn.getHost());
+          mustList.remove(dn.getIpAddr());
           LOG.warn(e);
           LOG.warn(e);
         }
         }
       }
       }

+ 15 - 50
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java

@@ -39,10 +39,10 @@ class Host2NodesMap {
       return false;
       return false;
     }
     }
       
       
-    String host = node.getHost();
+    String ipAddr = node.getIpAddr();
     hostmapLock.readLock().lock();
     hostmapLock.readLock().lock();
     try {
     try {
-      DatanodeDescriptor[] nodes = map.get(host);
+      DatanodeDescriptor[] nodes = map.get(ipAddr);
       if (nodes != null) {
       if (nodes != null) {
         for(DatanodeDescriptor containedNode:nodes) {
         for(DatanodeDescriptor containedNode:nodes) {
           if (node==containedNode) {
           if (node==containedNode) {
@@ -66,8 +66,8 @@ class Host2NodesMap {
         return false;
         return false;
       }
       }
       
       
-      String host = node.getHost();
-      DatanodeDescriptor[] nodes = map.get(host);
+      String ipAddr = node.getIpAddr();
+      DatanodeDescriptor[] nodes = map.get(ipAddr);
       DatanodeDescriptor[] newNodes;
       DatanodeDescriptor[] newNodes;
       if (nodes==null) {
       if (nodes==null) {
         newNodes = new DatanodeDescriptor[1];
         newNodes = new DatanodeDescriptor[1];
@@ -77,7 +77,7 @@ class Host2NodesMap {
         System.arraycopy(nodes, 0, newNodes, 0, nodes.length);
         System.arraycopy(nodes, 0, newNodes, 0, nodes.length);
         newNodes[nodes.length] = node;
         newNodes[nodes.length] = node;
       }
       }
-      map.put(host, newNodes);
+      map.put(ipAddr, newNodes);
       return true;
       return true;
     } finally {
     } finally {
       hostmapLock.writeLock().unlock();
       hostmapLock.writeLock().unlock();
@@ -92,17 +92,17 @@ class Host2NodesMap {
       return false;
       return false;
     }
     }
       
       
-    String host = node.getHost();
+    String ipAddr = node.getIpAddr();
     hostmapLock.writeLock().lock();
     hostmapLock.writeLock().lock();
     try {
     try {
 
 
-      DatanodeDescriptor[] nodes = map.get(host);
+      DatanodeDescriptor[] nodes = map.get(ipAddr);
       if (nodes==null) {
       if (nodes==null) {
         return false;
         return false;
       }
       }
       if (nodes.length==1) {
       if (nodes.length==1) {
         if (nodes[0]==node) {
         if (nodes[0]==node) {
-          map.remove(host);
+          map.remove(ipAddr);
           return true;
           return true;
         } else {
         } else {
           return false;
           return false;
@@ -122,7 +122,7 @@ class Host2NodesMap {
         newNodes = new DatanodeDescriptor[nodes.length-1];
         newNodes = new DatanodeDescriptor[nodes.length-1];
         System.arraycopy(nodes, 0, newNodes, 0, i);
         System.arraycopy(nodes, 0, newNodes, 0, i);
         System.arraycopy(nodes, i+1, newNodes, i, nodes.length-i-1);
         System.arraycopy(nodes, i+1, newNodes, i, nodes.length-i-1);
-        map.put(host, newNodes);
+        map.put(ipAddr, newNodes);
         return true;
         return true;
       }
       }
     } finally {
     } finally {
@@ -130,17 +130,18 @@ class Host2NodesMap {
     }
     }
   }
   }
     
     
-  /** get a data node by its host.
-   * @return DatanodeDescriptor if found; otherwise null.
+  /**
+   * Get a data node by its IP address.
+   * @return DatanodeDescriptor if found, null otherwise 
    */
    */
-  DatanodeDescriptor getDatanodeByHost(String host) {
-    if (host==null) {
+  DatanodeDescriptor getDatanodeByHost(String ipAddr) {
+    if (ipAddr == null) {
       return null;
       return null;
     }
     }
       
       
     hostmapLock.readLock().lock();
     hostmapLock.readLock().lock();
     try {
     try {
-      DatanodeDescriptor[] nodes = map.get(host);
+      DatanodeDescriptor[] nodes = map.get(ipAddr);
       // no entry
       // no entry
       if (nodes== null) {
       if (nodes== null) {
         return null;
         return null;
@@ -155,40 +156,4 @@ class Host2NodesMap {
       hostmapLock.readLock().unlock();
       hostmapLock.readLock().unlock();
     }
     }
   }
   }
-    
-  /**
-   * Find data node by its name.
-   * 
-   * @return DatanodeDescriptor if found or null otherwise 
-   */
-  public DatanodeDescriptor getDatanodeByName(String name) {
-    if (name==null) {
-      return null;
-    }
-      
-    int colon = name.indexOf(":");
-    String host;
-    if (colon < 0) {
-      host = name;
-    } else {
-      host = name.substring(0, colon);
-    }
-
-    hostmapLock.readLock().lock();
-    try {
-      DatanodeDescriptor[] nodes = map.get(host);
-      // no entry
-      if (nodes== null) {
-        return null;
-      }
-      for(DatanodeDescriptor containedNode:nodes) {
-        if (name.equals(containedNode.getName())) {
-          return containedNode;
-        }
-      }
-      return null;
-    } finally {
-      hostmapLock.readLock().unlock();
-    }
-  }
 }
 }

+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/InvalidateBlocks.java

@@ -75,7 +75,7 @@ class InvalidateBlocks {
       numBlocks++;
       numBlocks++;
       if (log) {
       if (log) {
         NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName()
         NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName()
-            + ": add " + block + " to " + datanode.getName());
+            + ": add " + block + " to " + datanode);
       }
       }
     }
     }
   }
   }
@@ -111,7 +111,8 @@ class InvalidateBlocks {
     for(Map.Entry<String,LightWeightHashSet<Block>> entry : node2blocks.entrySet()) {
     for(Map.Entry<String,LightWeightHashSet<Block>> entry : node2blocks.entrySet()) {
       final LightWeightHashSet<Block> blocks = entry.getValue();
       final LightWeightHashSet<Block> blocks = entry.getValue();
       if (blocks.size() > 0) {
       if (blocks.size() > 0) {
-        out.println(datanodeManager.getDatanode(entry.getKey()).getName() + blocks);
+        out.println(datanodeManager.getDatanode(entry.getKey()));
+        out.println(blocks);
       }
       }
     }
     }
   }
   }
@@ -135,7 +136,7 @@ class InvalidateBlocks {
 
 
     if (NameNode.stateChangeLog.isInfoEnabled()) {
     if (NameNode.stateChangeLog.isInfoEnabled()) {
       NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName()
       NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName()
-          + ": ask " + dn.getName() + " to delete " + toInvalidate);
+          + ": ask " + dn + " to delete " + toInvalidate);
     }
     }
     return toInvalidate.size();
     return toInvalidate.size();
   }
   }

+ 1 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java

@@ -88,9 +88,6 @@ public class JspHelper {
   private static class NodeRecord extends DatanodeInfo {
   private static class NodeRecord extends DatanodeInfo {
     int frequency;
     int frequency;
 
 
-    public NodeRecord() {
-      frequency = -1;
-    }
     public NodeRecord(DatanodeInfo info, int count) {
     public NodeRecord(DatanodeInfo info, int count) {
       super(info);
       super(info);
       this.frequency = count;
       this.frequency = count;
@@ -172,7 +169,7 @@ public class JspHelper {
 
 
       //just ping to check whether the node is alive
       //just ping to check whether the node is alive
       InetSocketAddress targetAddr = NetUtils.createSocketAddr(
       InetSocketAddress targetAddr = NetUtils.createSocketAddr(
-          chosenNode.getHost() + ":" + chosenNode.getInfoPort());
+          chosenNode.getInfoAddr());
         
         
       try {
       try {
         s = NetUtils.getDefaultSocketFactory(conf).createSocket();
         s = NetUtils.getDefaultSocketFactory(conf).createSocket();

+ 3 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java

@@ -64,18 +64,12 @@ import org.apache.hadoop.util.VersionInfo;
 public abstract class Storage extends StorageInfo {
 public abstract class Storage extends StorageInfo {
   public static final Log LOG = LogFactory.getLog(Storage.class.getName());
   public static final Log LOG = LogFactory.getLog(Storage.class.getName());
 
 
-  // Constants
-  
   // last layout version that did not support upgrades
   // last layout version that did not support upgrades
   public static final int LAST_PRE_UPGRADE_LAYOUT_VERSION = -3;
   public static final int LAST_PRE_UPGRADE_LAYOUT_VERSION = -3;
   
   
-  // this corresponds to Hadoop-0.14.
-  public static final int LAST_UPGRADABLE_LAYOUT_VERSION = -7;
-  protected static final String LAST_UPGRADABLE_HADOOP_VERSION = "Hadoop-0.14";
-
-  /* this should be removed when LAST_UPGRADABLE_LV goes beyond -13.
-   * any upgrade code that uses this constant should also be removed. */
-  public static final int PRE_GENERATIONSTAMP_LAYOUT_VERSION = -13;
+  // this corresponds to Hadoop-0.18
+  public static final int LAST_UPGRADABLE_LAYOUT_VERSION = -16;
+  protected static final String LAST_UPGRADABLE_HADOOP_VERSION = "Hadoop-0.18";
   
   
   /** Layout versions of 0.20.203 release */
   /** Layout versions of 0.20.203 release */
   public static final int[] LAYOUT_VERSIONS_203 = {-19, -31};
   public static final int[] LAYOUT_VERSIONS_203 = {-19, -31};

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java

@@ -325,10 +325,10 @@ class BPOfferService {
   void registrationSucceeded(BPServiceActor bpServiceActor,
   void registrationSucceeded(BPServiceActor bpServiceActor,
       DatanodeRegistration reg) throws IOException {
       DatanodeRegistration reg) throws IOException {
     if (bpRegistration != null) {
     if (bpRegistration != null) {
-      checkNSEquality(bpRegistration.storageInfo.getNamespaceID(),
-          reg.storageInfo.getNamespaceID(), "namespace ID");
-      checkNSEquality(bpRegistration.storageInfo.getClusterID(),
-          reg.storageInfo.getClusterID(), "cluster ID");
+      checkNSEquality(bpRegistration.getStorageInfo().getNamespaceID(),
+          reg.getStorageInfo().getNamespaceID(), "namespace ID");
+      checkNSEquality(bpRegistration.getStorageInfo().getClusterID(),
+          reg.getStorageInfo().getClusterID(), "cluster ID");
     } else {
     } else {
       bpRegistration = reg;
       bpRegistration = reg;
     }
     }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java

@@ -602,7 +602,7 @@ class BPServiceActor implements Runnable {
 
 
     while (shouldRun()) {
     while (shouldRun()) {
       try {
       try {
-        // Use returned registration from namenode with updated machine name.
+        // Use returned registration from namenode with updated fields
         bpRegistration = bpNamenode.registerDatanode(bpRegistration);
         bpRegistration = bpNamenode.registerDatanode(bpRegistration);
         break;
         break;
       } catch(SocketTimeoutException e) {  // namenode is busy
       } catch(SocketTimeoutException e) {  // namenode is busy

+ 91 - 98
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -164,9 +164,9 @@ import org.apache.hadoop.util.VersionInfo;
 import org.mortbay.util.ajax.JSON;
 import org.mortbay.util.ajax.JSON;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
+import com.google.common.annotations.VisibleForTesting;
 import com.google.protobuf.BlockingService;
 import com.google.protobuf.BlockingService;
 
 
-
 /**********************************************************
 /**********************************************************
  * DataNode is a class (and program) that stores a set of
  * DataNode is a class (and program) that stores a set of
  * blocks for a DFS deployment.  A single deployment can
  * blocks for a DFS deployment.  A single deployment can
@@ -244,9 +244,10 @@ public class DataNode extends Configured
   private DataStorage storage = null;
   private DataStorage storage = null;
   private HttpServer infoServer = null;
   private HttpServer infoServer = null;
   DataNodeMetrics metrics;
   DataNodeMetrics metrics;
-  private InetSocketAddress selfAddr;
+  private InetSocketAddress streamingAddr;
   
   
-  private volatile String hostName; // Host name of this datanode
+  private String hostName;
+  private DatanodeID id;
   
   
   boolean isBlockTokenEnabled;
   boolean isBlockTokenEnabled;
   BlockPoolTokenSecretManager blockPoolTokenSecretManager;
   BlockPoolTokenSecretManager blockPoolTokenSecretManager;
@@ -288,6 +289,7 @@ public class DataNode extends Configured
         .get(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY);
         .get(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY);
     try {
     try {
       hostName = getHostName(conf);
       hostName = getHostName(conf);
+      LOG.info("Configured hostname is " + hostName);
       startDataNode(conf, dataDirs, resources);
       startDataNode(conf, dataDirs, resources);
     } catch (IOException ie) {
     } catch (IOException ie) {
       shutdown();
       shutdown();
@@ -305,16 +307,25 @@ public class DataNode extends Configured
     clusterId = nsCid;
     clusterId = nsCid;
   }
   }
 
 
+  /**
+   * Returns the hostname for this datanode. If the hostname is not
+   * explicitly configured in the given config, then it is determined
+   * via the DNS class.
+   *
+   * @param config
+   * @return the hostname (NB: may not be a FQDN)
+   * @throws UnknownHostException if the dfs.datanode.dns.interface
+   *    option is used and the hostname can not be determined
+   */
   private static String getHostName(Configuration config)
   private static String getHostName(Configuration config)
       throws UnknownHostException {
       throws UnknownHostException {
-    // use configured nameserver & interface to get local hostname
     String name = config.get(DFS_DATANODE_HOST_NAME_KEY);
     String name = config.get(DFS_DATANODE_HOST_NAME_KEY);
     if (name == null) {
     if (name == null) {
-      name = DNS
-          .getDefaultHost(config.get(DFS_DATANODE_DNS_INTERFACE_KEY,
-              DFS_DATANODE_DNS_INTERFACE_DEFAULT), config.get(
-              DFS_DATANODE_DNS_NAMESERVER_KEY,
-              DFS_DATANODE_DNS_NAMESERVER_DEFAULT));
+      name = DNS.getDefaultHost(
+          config.get(DFS_DATANODE_DNS_INTERFACE_KEY,
+                     DFS_DATANODE_DNS_INTERFACE_DEFAULT),
+          config.get(DFS_DATANODE_DNS_NAMESERVER_KEY,
+                     DFS_DATANODE_DNS_NAMESERVER_DEFAULT));
     }
     }
     return name;
     return name;
   }
   }
@@ -485,23 +496,22 @@ public class DataNode extends Configured
   }
   }
   
   
   private void initDataXceiver(Configuration conf) throws IOException {
   private void initDataXceiver(Configuration conf) throws IOException {
-    InetSocketAddress streamingAddr = DataNode.getStreamingAddr(conf);
-
     // find free port or use privileged port provided
     // find free port or use privileged port provided
     ServerSocket ss;
     ServerSocket ss;
-    if(secureResources == null) {
+    if (secureResources == null) {
+      InetSocketAddress addr = DataNode.getStreamingAddr(conf);
       ss = (dnConf.socketWriteTimeout > 0) ? 
       ss = (dnConf.socketWriteTimeout > 0) ? 
           ServerSocketChannel.open().socket() : new ServerSocket();
           ServerSocketChannel.open().socket() : new ServerSocket();
-          Server.bind(ss, streamingAddr, 0);
+          Server.bind(ss, addr, 0);
     } else {
     } else {
       ss = secureResources.getStreamingSocket();
       ss = secureResources.getStreamingSocket();
     }
     }
     ss.setReceiveBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE); 
     ss.setReceiveBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE); 
-    // adjust machine name with the actual port
-    int tmpPort = ss.getLocalPort();
-    selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(),
-                                     tmpPort);
-    LOG.info("Opened streaming server at " + selfAddr);
+
+    streamingAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(),
+                                     ss.getLocalPort());
+
+    LOG.info("Opened streaming server at " + streamingAddr);
     this.threadGroup = new ThreadGroup("dataXceiverServer");
     this.threadGroup = new ThreadGroup("dataXceiverServer");
     this.dataXceiverServer = new Daemon(threadGroup, 
     this.dataXceiverServer = new Daemon(threadGroup, 
         new DataXceiverServer(ss, conf, this));
         new DataXceiverServer(ss, conf, this));
@@ -646,7 +656,7 @@ public class DataNode extends Configured
     this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();
     this.blockPoolTokenSecretManager = new BlockPoolTokenSecretManager();
     initIpcServer(conf);
     initIpcServer(conf);
 
 
-    metrics = DataNodeMetrics.create(conf, getMachineName());
+    metrics = DataNodeMetrics.create(conf, getDisplayName());
 
 
     blockPoolManager = new BlockPoolManager(this);
     blockPoolManager = new BlockPoolManager(this);
     blockPoolManager.refreshNamenodes(conf);
     blockPoolManager.refreshNamenodes(conf);
@@ -657,14 +667,18 @@ public class DataNode extends Configured
    * @param nsInfo the namespace info from the first part of the NN handshake
    * @param nsInfo the namespace info from the first part of the NN handshake
    */
    */
   DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
   DatanodeRegistration createBPRegistration(NamespaceInfo nsInfo) {
-    DatanodeRegistration bpRegistration = createUnknownBPRegistration();
-    String blockPoolId = nsInfo.getBlockPoolID();
-    
+    final String xferIp = streamingAddr.getAddress().getHostAddress();
+    DatanodeRegistration bpRegistration = new DatanodeRegistration(xferIp);
+    bpRegistration.setXferPort(getXferPort());
+    bpRegistration.setInfoPort(getInfoPort());
+    bpRegistration.setIpcPort(getIpcPort());
+    bpRegistration.setHostName(hostName);
     bpRegistration.setStorageID(getStorageId());
     bpRegistration.setStorageID(getStorageId());
-    StorageInfo storageInfo = storage.getBPStorage(blockPoolId);
+
+    StorageInfo storageInfo = storage.getBPStorage(nsInfo.getBlockPoolID());
     if (storageInfo == null) {
     if (storageInfo == null) {
       // it's null in the case of SimulatedDataSet
       // it's null in the case of SimulatedDataSet
-      bpRegistration.storageInfo.layoutVersion = HdfsConstants.LAYOUT_VERSION;
+      bpRegistration.getStorageInfo().layoutVersion = HdfsConstants.LAYOUT_VERSION;
       bpRegistration.setStorageInfo(nsInfo);
       bpRegistration.setStorageInfo(nsInfo);
     } else {
     } else {
       bpRegistration.setStorageInfo(storageInfo);
       bpRegistration.setStorageInfo(storageInfo);
@@ -679,17 +693,18 @@ public class DataNode extends Configured
    * Also updates the block pool's state in the secret manager.
    * Also updates the block pool's state in the secret manager.
    */
    */
   synchronized void bpRegistrationSucceeded(DatanodeRegistration bpRegistration,
   synchronized void bpRegistrationSucceeded(DatanodeRegistration bpRegistration,
-      String blockPoolId)
-      throws IOException {
-    hostName = bpRegistration.getHost();
+      String blockPoolId) throws IOException {
+    // Set the ID if we haven't already
+    if (null == id) {
+      id = bpRegistration;
+    }
 
 
     if (storage.getStorageID().equals("")) {
     if (storage.getStorageID().equals("")) {
-      // This is a fresh datanode -- take the storage ID provided by the
-      // NN and persist it.
+      // This is a fresh datanode, persist the NN-provided storage ID
       storage.setStorageID(bpRegistration.getStorageID());
       storage.setStorageID(bpRegistration.getStorageID());
       storage.writeAll();
       storage.writeAll();
       LOG.info("New storage id " + bpRegistration.getStorageID()
       LOG.info("New storage id " + bpRegistration.getStorageID()
-          + " is assigned to data-node " + bpRegistration.getName());
+          + " is assigned to data-node " + bpRegistration);
     } else if(!storage.getStorageID().equals(bpRegistration.getStorageID())) {
     } else if(!storage.getStorageID().equals(bpRegistration.getStorageID())) {
       throw new IOException("Inconsistent storage IDs. Name-node returned "
       throw new IOException("Inconsistent storage IDs. Name-node returned "
           + bpRegistration.getStorageID() 
           + bpRegistration.getStorageID() 
@@ -708,7 +723,7 @@ public class DataNode extends Configured
    */
    */
   private void registerBlockPoolWithSecretManager(DatanodeRegistration bpRegistration,
   private void registerBlockPoolWithSecretManager(DatanodeRegistration bpRegistration,
       String blockPoolId) throws IOException {
       String blockPoolId) throws IOException {
-    ExportedBlockKeys keys = bpRegistration.exportedKeys;
+    ExportedBlockKeys keys = bpRegistration.getExportedKeys();
     isBlockTokenEnabled = keys.isBlockTokenEnabled();
     isBlockTokenEnabled = keys.isBlockTokenEnabled();
     // TODO should we check that all federated nns are either enabled or
     // TODO should we check that all federated nns are either enabled or
     // disabled?
     // disabled?
@@ -728,8 +743,8 @@ public class DataNode extends Configured
     }
     }
     
     
     blockPoolTokenSecretManager.setKeys(blockPoolId,
     blockPoolTokenSecretManager.setKeys(blockPoolId,
-        bpRegistration.exportedKeys);
-    bpRegistration.exportedKeys = ExportedBlockKeys.DUMMY_KEYS;
+        bpRegistration.getExportedKeys());
+    bpRegistration.setExportedKeys(ExportedBlockKeys.DUMMY_KEYS);
   }
   }
 
 
   /**
   /**
@@ -783,18 +798,6 @@ public class DataNode extends Configured
     data.addBlockPool(nsInfo.getBlockPoolID(), conf);
     data.addBlockPool(nsInfo.getBlockPoolID(), conf);
   }
   }
 
 
-  /**
-   * Create a DatanodeRegistration object with no valid StorageInfo.
-   * This is used when reporting an error during handshake - ie
-   * before we can load any specific block pool.
-   */
-  private DatanodeRegistration createUnknownBPRegistration() {
-    DatanodeRegistration reg = new DatanodeRegistration(getMachineName());
-    reg.setInfoPort(infoServer.getPort());
-    reg.setIpcPort(getIpcPort());
-    return reg;
-  }
-
   BPOfferService[] getAllBpOs() {
   BPOfferService[] getAllBpOs() {
     return blockPoolManager.getAllNamenodeThreads();
     return blockPoolManager.getAllNamenodeThreads();
   }
   }
@@ -844,23 +847,37 @@ public class DataNode extends Configured
     MBeans.register("DataNode", "DataNodeInfo", this);
     MBeans.register("DataNode", "DataNodeInfo", this);
   }
   }
   
   
-  int getPort() {
-    return selfAddr.getPort();
+  int getXferPort() {
+    return streamingAddr.getPort();
   }
   }
   
   
   String getStorageId() {
   String getStorageId() {
     return storage.getStorageID();
     return storage.getStorageID();
   }
   }
-  
-  /** 
-   * Get host:port with host set to Datanode host and port set to the
-   * port {@link DataXceiver} is serving.
-   * @return host:port string
+
+  /**
+   * @return name useful for logging
    */
    */
-  public String getMachineName() {
-    return hostName + ":" + getPort();
+  public String getDisplayName() {
+    // NB: our DatanodeID may not be set yet
+    return hostName + ":" + getIpcPort();
   }
   }
-  
+
+  /**
+   * NB: The datanode can perform data transfer on the streaming
+   * address however clients are given the IPC IP address for data
+   * transfer, and that may be be a different address.
+   * 
+   * @return socket address for data transfer
+   */
+  public InetSocketAddress getXferAddress() {
+    return streamingAddr;
+  }
+
+  /**
+   * @return the datanode's IPC port
+   */
+  @VisibleForTesting
   public int getIpcPort() {
   public int getIpcPort() {
     return ipcServer.getListenerAddress().getPort();
     return ipcServer.getListenerAddress().getPort();
   }
   }
@@ -880,25 +897,6 @@ public class DataNode extends Configured
     return bpos.bpRegistration;
     return bpos.bpRegistration;
   }
   }
   
   
-  /**
-   * get BP registration by machine and port name (host:port)
-   * @param mName - the name that the NN used
-   * @return BP registration 
-   * @throws IOException 
-   */
-  DatanodeRegistration getDNRegistrationByMachineName(String mName) {
-    // TODO: all the BPs should have the same name as each other, they all come
-    // from getName() here! and the use cases only are in tests where they just
-    // call with getName(). So we could probably just make this method return
-    // the first BPOS's registration. See HDFS-2609.
-    BPOfferService [] bposArray = blockPoolManager.getAllNamenodeThreads();
-    for (BPOfferService bpos : bposArray) {
-      if(bpos.bpRegistration.getName().equals(mName))
-        return bpos.bpRegistration;
-    }
-    return null;
-  }
-  
   /**
   /**
    * Creates either NIO or regular depending on socketWriteTimeout.
    * Creates either NIO or regular depending on socketWriteTimeout.
    */
    */
@@ -918,8 +916,8 @@ public class DataNode extends Configured
   public static InterDatanodeProtocol createInterDataNodeProtocolProxy(
   public static InterDatanodeProtocol createInterDataNodeProtocolProxy(
       DatanodeID datanodeid, final Configuration conf, final int socketTimeout)
       DatanodeID datanodeid, final Configuration conf, final int socketTimeout)
     throws IOException {
     throws IOException {
-    final InetSocketAddress addr = NetUtils.createSocketAddr(
-        datanodeid.getHost() + ":" + datanodeid.getIpcPort());
+    final InetSocketAddress addr =
+      NetUtils.createSocketAddr(datanodeid.getIpcAddr());
     if (InterDatanodeProtocol.LOG.isDebugEnabled()) {
     if (InterDatanodeProtocol.LOG.isDebugEnabled()) {
       InterDatanodeProtocol.LOG.debug("InterDatanodeProtocol addr=" + addr);
       InterDatanodeProtocol.LOG.debug("InterDatanodeProtocol addr=" + addr);
     }
     }
@@ -936,10 +934,6 @@ public class DataNode extends Configured
       throw new IOException(ie.getMessage());
       throw new IOException(ie.getMessage());
     }
     }
   }
   }
-  
-  public InetSocketAddress getSelfAddr() {
-    return selfAddr;
-  }
     
     
   DataNodeMetrics getMetrics() {
   DataNodeMetrics getMetrics() {
     return metrics;
     return metrics;
@@ -947,7 +941,7 @@ public class DataNode extends Configured
   
   
   public static void setNewStorageID(DatanodeID dnId) {
   public static void setNewStorageID(DatanodeID dnId) {
     LOG.info("Datanode is " + dnId);
     LOG.info("Datanode is " + dnId);
-    dnId.storageID = createNewStorageId(dnId.getPort());
+    dnId.setStorageID(createNewStorageId(dnId.getXferPort()));
   }
   }
   
   
   static String createNewStorageId(int port) {
   static String createNewStorageId(int port) {
@@ -1223,7 +1217,7 @@ public class DataNode extends Configured
       if (LOG.isInfoEnabled()) {
       if (LOG.isInfoEnabled()) {
         StringBuilder xfersBuilder = new StringBuilder();
         StringBuilder xfersBuilder = new StringBuilder();
         for (int i = 0; i < numTargets; i++) {
         for (int i = 0; i < numTargets; i++) {
-          xfersBuilder.append(xferTargets[i].getName());
+          xfersBuilder.append(xferTargets[i]);
           xfersBuilder.append(" ");
           xfersBuilder.append(" ");
         }
         }
         LOG.info(bpReg + " Starting thread to transfer block " + 
         LOG.info(bpReg + " Starting thread to transfer block " + 
@@ -1381,7 +1375,7 @@ public class DataNode extends Configured
       
       
       try {
       try {
         InetSocketAddress curTarget = 
         InetSocketAddress curTarget = 
-          NetUtils.createSocketAddr(targets[0].getName());
+          NetUtils.createSocketAddr(targets[0].getXferAddr());
         sock = newSocket();
         sock = newSocket();
         NetUtils.connect(sock, curTarget, dnConf.socketTimeout);
         NetUtils.connect(sock, curTarget, dnConf.socketTimeout);
         sock.setSoTimeout(targets.length * dnConf.socketTimeout);
         sock.setSoTimeout(targets.length * dnConf.socketTimeout);
@@ -1434,9 +1428,8 @@ public class DataNode extends Configured
           }
           }
         }
         }
       } catch (IOException ie) {
       } catch (IOException ie) {
-        LOG.warn(
-            bpReg + ":Failed to transfer " + b + " to " + targets[0].getName()
-                + " got ", ie);
+        LOG.warn(bpReg + ":Failed to transfer " + b + " to " +
+            targets[0] + " got ", ie);
         // check if there are any disk problem
         // check if there are any disk problem
         checkDiskError();
         checkDiskError();
         
         
@@ -1632,7 +1625,7 @@ public class DataNode extends Configured
 
 
   @Override
   @Override
   public String toString() {
   public String toString() {
-    return "DataNode{data=" + data + ", localName='" + getMachineName()
+    return "DataNode{data=" + data + ", localName='" + getDisplayName()
         + "', storageID='" + getStorageId() + "', xmitsInProgress="
         + "', storageID='" + getStorageId() + "', xmitsInProgress="
         + xmitsInProgress.get() + "}";
         + xmitsInProgress.get() + "}";
   }
   }
@@ -1990,15 +1983,14 @@ public class DataNode extends Configured
   
   
   private static void logRecoverBlock(String who,
   private static void logRecoverBlock(String who,
       ExtendedBlock block, DatanodeID[] targets) {
       ExtendedBlock block, DatanodeID[] targets) {
-    StringBuilder msg = new StringBuilder(targets[0].getName());
+    StringBuilder msg = new StringBuilder(targets[0].toString());
     for (int i = 1; i < targets.length; i++) {
     for (int i = 1; i < targets.length; i++) {
-      msg.append(", " + targets[i].getName());
+      msg.append(", " + targets[i]);
     }
     }
     LOG.info(who + " calls recoverBlock(block=" + block
     LOG.info(who + " calls recoverBlock(block=" + block
         + ", targets=[" + msg + "])");
         + ", targets=[" + msg + "])");
   }
   }
 
 
-  // ClientDataNodeProtocol implementation
   @Override // ClientDataNodeProtocol
   @Override // ClientDataNodeProtocol
   public long getReplicaVisibleLength(final ExtendedBlock block) throws IOException {
   public long getReplicaVisibleLength(final ExtendedBlock block) throws IOException {
     checkWriteAccess(block);
     checkWriteAccess(block);
@@ -2076,8 +2068,7 @@ public class DataNode extends Configured
     storage.finalizeUpgrade(blockPoolId);
     storage.finalizeUpgrade(blockPoolId);
   }
   }
 
 
-  // Determine a Datanode's streaming address
-  public static InetSocketAddress getStreamingAddr(Configuration conf) {
+  static InetSocketAddress getStreamingAddr(Configuration conf) {
     return NetUtils.createSocketAddr(
     return NetUtils.createSocketAddr(
         conf.get(DFS_DATANODE_ADDRESS_KEY, DFS_DATANODE_ADDRESS_DEFAULT));
         conf.get(DFS_DATANODE_ADDRESS_KEY, DFS_DATANODE_ADDRESS_DEFAULT));
   }
   }
@@ -2099,8 +2090,11 @@ public class DataNode extends Configured
     return this.getConf().get("dfs.datanode.info.port");
     return this.getConf().get("dfs.datanode.info.port");
   }
   }
   
   
-  public int getInfoPort(){
-    return this.infoServer.getPort();
+  /**
+   * @return the datanode's http port
+   */
+  public int getInfoPort() {
+    return infoServer.getPort();
   }
   }
 
 
   /**
   /**
@@ -2142,7 +2136,7 @@ public class DataNode extends Configured
     blockPoolManager.refreshNamenodes(conf);
     blockPoolManager.refreshNamenodes(conf);
   }
   }
 
 
-  @Override //ClientDatanodeProtocol
+  @Override // ClientDatanodeProtocol
   public void refreshNamenodes() throws IOException {
   public void refreshNamenodes() throws IOException {
     conf = new Configuration();
     conf = new Configuration();
     refreshNamenodes(conf);
     refreshNamenodes(conf);
@@ -2204,10 +2198,9 @@ public class DataNode extends Configured
     return true;
     return true;
   }
   }
   
   
-  /** Methods used by fault injection tests */
+  @VisibleForTesting
   public DatanodeID getDatanodeId() {
   public DatanodeID getDatanodeId() {
-    return new DatanodeID(getMachineName(), getStorageId(),
-        infoServer.getPort(), getIpcPort());
+    return id;
   }
   }
 
 
   /**
   /**

+ 23 - 67
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java

@@ -73,9 +73,6 @@ public class DataStorage extends Storage {
   public final static String STORAGE_DIR_FINALIZED = "finalized";
   public final static String STORAGE_DIR_FINALIZED = "finalized";
   public final static String STORAGE_DIR_TMP = "tmp";
   public final static String STORAGE_DIR_TMP = "tmp";
 
 
-  private static final Pattern PRE_GENSTAMP_META_FILE_PATTERN = 
-    Pattern.compile("(.*blk_[-]*\\d+)\\.meta$");
-  
   /** Access to this variable is guarded by "this" */
   /** Access to this variable is guarded by "this" */
   private String storageID;
   private String storageID;
 
 
@@ -197,7 +194,7 @@ public class DataStorage extends Storage {
     }
     }
     
     
     // make sure we have storage id set - if not - generate new one
     // make sure we have storage id set - if not - generate new one
-    createStorageID(datanode.getPort());
+    createStorageID(datanode.getXferPort());
     
     
     // 3. Update all storages. Some of them might have just been formatted.
     // 3. Update all storages. Some of them might have just been formatted.
     this.writeAll();
     this.writeAll();
@@ -669,13 +666,6 @@ public class DataStorage extends Storage {
           in.close();
           in.close();
         }
         }
       } else {
       } else {
-        
-        //check if we are upgrading from pre-generation stamp version.
-        if (oldLV >= PRE_GENERATIONSTAMP_LAYOUT_VERSION) {
-          // Link to the new file name.
-          to = new File(convertMetatadataFileName(to.getAbsolutePath()));
-        }
-        
         HardLink.createHardLink(from, to);
         HardLink.createHardLink(from, to);
         hl.linkStats.countSingleLinks++;
         hl.linkStats.countSingleLinks++;
       }
       }
@@ -687,50 +677,32 @@ public class DataStorage extends Storage {
     if (!to.mkdirs())
     if (!to.mkdirs())
       throw new IOException("Cannot create directory " + to);
       throw new IOException("Cannot create directory " + to);
     
     
-    //If upgrading from old stuff, need to munge the filenames.  That has to
-    //be done one file at a time, so hardlink them one at a time (slow).
-    if (oldLV >= PRE_GENERATIONSTAMP_LAYOUT_VERSION) {
-      String[] blockNames = from.list(new java.io.FilenameFilter() {
-          public boolean accept(File dir, String name) {
-            return name.startsWith(BLOCK_SUBDIR_PREFIX) 
-              || name.startsWith(BLOCK_FILE_PREFIX)
-              || name.startsWith(COPY_FILE_PREFIX);
-          }
-        });
-      if (blockNames.length == 0) {
-        hl.linkStats.countEmptyDirs++;
+    String[] blockNames = from.list(new java.io.FilenameFilter() {
+      public boolean accept(File dir, String name) {
+        return name.startsWith(BLOCK_FILE_PREFIX);
       }
       }
-      else for(int i = 0; i < blockNames.length; i++)
-        linkBlocks(new File(from, blockNames[i]), 
-            new File(to, blockNames[i]), oldLV, hl);
-    } 
-    else {
-      //If upgrading from a relatively new version, we only need to create
-      //links with the same filename.  This can be done in bulk (much faster).
-      String[] blockNames = from.list(new java.io.FilenameFilter() {
+    });
+
+    // Block files just need hard links with the same file names
+    // but a different directory
+    if (blockNames.length > 0) {
+      HardLink.createHardLinkMult(from, blockNames, to);
+      hl.linkStats.countMultLinks++;
+      hl.linkStats.countFilesMultLinks += blockNames.length;
+    } else {
+      hl.linkStats.countEmptyDirs++;
+    }
+    
+    // Now take care of the rest of the files and subdirectories
+    String[] otherNames = from.list(new java.io.FilenameFilter() {
         public boolean accept(File dir, String name) {
         public boolean accept(File dir, String name) {
-          return name.startsWith(BLOCK_FILE_PREFIX);
+          return name.startsWith(BLOCK_SUBDIR_PREFIX) 
+            || name.startsWith(COPY_FILE_PREFIX);
         }
         }
       });
       });
-      if (blockNames.length > 0) {
-        HardLink.createHardLinkMult(from, blockNames, to);
-        hl.linkStats.countMultLinks++;
-        hl.linkStats.countFilesMultLinks += blockNames.length;
-      } else {
-        hl.linkStats.countEmptyDirs++;
-      }
-      
-      //now take care of the rest of the files and subdirectories
-      String[] otherNames = from.list(new java.io.FilenameFilter() {
-          public boolean accept(File dir, String name) {
-            return name.startsWith(BLOCK_SUBDIR_PREFIX) 
-              || name.startsWith(COPY_FILE_PREFIX);
-          }
-        });
-      for(int i = 0; i < otherNames.length; i++)
-        linkBlocks(new File(from, otherNames[i]), 
-            new File(to, otherNames[i]), oldLV, hl);
-    }
+    for(int i = 0; i < otherNames.length; i++)
+      linkBlocks(new File(from, otherNames[i]), 
+          new File(to, otherNames[i]), oldLV, hl);
   }
   }
 
 
   private void verifyDistributedUpgradeProgress(UpgradeManagerDatanode um,
   private void verifyDistributedUpgradeProgress(UpgradeManagerDatanode um,
@@ -741,22 +713,6 @@ public class DataStorage extends Storage {
     um.initializeUpgrade(nsInfo);
     um.initializeUpgrade(nsInfo);
   }
   }
   
   
-  /**
-   * This is invoked on target file names when upgrading from pre generation 
-   * stamp version (version -13) to correct the metatadata file name.
-   * @param oldFileName
-   * @return the new metadata file name with the default generation stamp.
-   */
-  private static String convertMetatadataFileName(String oldFileName) {
-    Matcher matcher = PRE_GENSTAMP_META_FILE_PATTERN.matcher(oldFileName); 
-    if (matcher.matches()) {
-      //return the current metadata file name
-      return DatanodeUtil.getMetaFileName(matcher.group(1),
-          GenerationStamp.GRANDFATHER_GENERATION_STAMP); 
-    }
-    return oldFileName;
-  }
-
   /**
   /**
    * Add bpStorage into bpStorageMap
    * Add bpStorage into bpStorageMap
    */
    */

+ 6 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java

@@ -168,13 +168,13 @@ class DataXceiver extends Receiver implements Runnable {
         ++opsProcessed;
         ++opsProcessed;
       } while (!s.isClosed() && dnConf.socketKeepaliveTimeout > 0);
       } while (!s.isClosed() && dnConf.socketKeepaliveTimeout > 0);
     } catch (Throwable t) {
     } catch (Throwable t) {
-      LOG.error(datanode.getMachineName() + ":DataXceiver error processing " +
+      LOG.error(datanode.getDisplayName() + ":DataXceiver error processing " +
                 ((op == null) ? "unknown" : op.name()) + " operation " +
                 ((op == null) ? "unknown" : op.name()) + " operation " +
                 " src: " + remoteAddress +
                 " src: " + remoteAddress +
                 " dest: " + localAddress, t);
                 " dest: " + localAddress, t);
     } finally {
     } finally {
       if (LOG.isDebugEnabled()) {
       if (LOG.isDebugEnabled()) {
-        LOG.debug(datanode.getMachineName() + ":Number of active connections is: "
+        LOG.debug(datanode.getDisplayName() + ":Number of active connections is: "
             + datanode.getXceiverCount());
             + datanode.getXceiverCount());
       }
       }
       updateCurrentThreadName("Cleaning up");
       updateCurrentThreadName("Cleaning up");
@@ -352,7 +352,7 @@ class DataXceiver extends Receiver implements Runnable {
       if (targets.length > 0) {
       if (targets.length > 0) {
         InetSocketAddress mirrorTarget = null;
         InetSocketAddress mirrorTarget = null;
         // Connect to backup machine
         // Connect to backup machine
-        mirrorNode = targets[0].getName();
+        mirrorNode = targets[0].getXferAddr();
         mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
         mirrorTarget = NetUtils.createSocketAddr(mirrorNode);
         mirrorSock = datanode.newSocket();
         mirrorSock = datanode.newSocket();
         try {
         try {
@@ -667,8 +667,8 @@ class DataXceiver extends Receiver implements Runnable {
     
     
     try {
     try {
       // get the output stream to the proxy
       // get the output stream to the proxy
-      InetSocketAddress proxyAddr = NetUtils.createSocketAddr(
-          proxySource.getName());
+      InetSocketAddress proxyAddr =
+        NetUtils.createSocketAddr(proxySource.getXferAddr());
       proxySock = datanode.newSocket();
       proxySock = datanode.newSocket();
       NetUtils.connect(proxySock, proxyAddr, dnConf.socketTimeout);
       NetUtils.connect(proxySock, proxyAddr, dnConf.socketTimeout);
       proxySock.setSoTimeout(dnConf.socketTimeout);
       proxySock.setSoTimeout(dnConf.socketTimeout);
@@ -820,7 +820,7 @@ class DataXceiver extends Receiver implements Runnable {
             if (mode == BlockTokenSecretManager.AccessMode.WRITE) {
             if (mode == BlockTokenSecretManager.AccessMode.WRITE) {
               DatanodeRegistration dnR = 
               DatanodeRegistration dnR = 
                 datanode.getDNRegistrationForBP(blk.getBlockPoolId());
                 datanode.getDNRegistrationForBP(blk.getBlockPoolId());
-              resp.setFirstBadLink(dnR.getName());
+              resp.setFirstBadLink(dnR.getXferAddr());
             }
             }
             resp.build().writeDelimitedTo(out);
             resp.build().writeDelimitedTo(out);
             out.flush();
             out.flush();

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java

@@ -152,11 +152,11 @@ class DataXceiverServer implements Runnable {
         // another thread closed our listener socket - that's expected during shutdown,
         // another thread closed our listener socket - that's expected during shutdown,
         // but not in other circumstances
         // but not in other circumstances
         if (datanode.shouldRun) {
         if (datanode.shouldRun) {
-          LOG.warn(datanode.getMachineName() + ":DataXceiverServer: ", ace);
+          LOG.warn(datanode.getDisplayName() + ":DataXceiverServer: ", ace);
         }
         }
       } catch (IOException ie) {
       } catch (IOException ie) {
         IOUtils.closeSocket(s);
         IOUtils.closeSocket(s);
-        LOG.warn(datanode.getMachineName() + ":DataXceiverServer: ", ie);
+        LOG.warn(datanode.getDisplayName() + ":DataXceiverServer: ", ie);
       } catch (OutOfMemoryError ie) {
       } catch (OutOfMemoryError ie) {
         IOUtils.closeSocket(s);
         IOUtils.closeSocket(s);
         // DataNode can run out of memory if there is too many transfers.
         // DataNode can run out of memory if there is too many transfers.
@@ -169,7 +169,7 @@ class DataXceiverServer implements Runnable {
           // ignore
           // ignore
         }
         }
       } catch (Throwable te) {
       } catch (Throwable te) {
-        LOG.error(datanode.getMachineName()
+        LOG.error(datanode.getDisplayName()
             + ":DataXceiverServer: Exiting due to: ", te);
             + ":DataXceiverServer: Exiting due to: ", te);
         datanode.shouldRun = false;
         datanode.shouldRun = false;
       }
       }
@@ -177,7 +177,7 @@ class DataXceiverServer implements Runnable {
     try {
     try {
       ss.close();
       ss.close();
     } catch (IOException ie) {
     } catch (IOException ie) {
-      LOG.warn(datanode.getMachineName()
+      LOG.warn(datanode.getDisplayName()
           + " :DataXceiverServer: close exception", ie);
           + " :DataXceiverServer: close exception", ie);
     }
     }
   }
   }
@@ -188,7 +188,7 @@ class DataXceiverServer implements Runnable {
     try {
     try {
       this.ss.close();
       this.ss.close();
     } catch (IOException ie) {
     } catch (IOException ie) {
-      LOG.warn(datanode.getMachineName() + ":DataXceiverServer.kill(): ", ie);
+      LOG.warn(datanode.getDisplayName() + ":DataXceiverServer.kill(): ", ie);
     }
     }
 
 
     // close all the sockets that were accepted earlier
     // close all the sockets that were accepted earlier

+ 12 - 18
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java

@@ -136,10 +136,8 @@ public class DatanodeJspHelper {
           out.print("Empty file");
           out.print("Empty file");
         } else {
         } else {
           DatanodeInfo chosenNode = JspHelper.bestNode(firstBlock, conf);
           DatanodeInfo chosenNode = JspHelper.bestNode(firstBlock, conf);
-          String fqdn = canonicalize(chosenNode.getHost());
-          String datanodeAddr = chosenNode.getName();
-          int datanodePort = Integer.parseInt(datanodeAddr.substring(
-              datanodeAddr.indexOf(':') + 1, datanodeAddr.length()));
+          String fqdn = canonicalize(chosenNode.getIpAddr());
+          int datanodePort = chosenNode.getXferPort();
           String redirectLocation = "http://" + fqdn + ":"
           String redirectLocation = "http://" + fqdn + ":"
               + chosenNode.getInfoPort() + "/browseBlock.jsp?blockId="
               + chosenNode.getInfoPort() + "/browseBlock.jsp?blockId="
               + firstBlock.getBlock().getBlockId() + "&blockSize="
               + firstBlock.getBlock().getBlockId() + "&blockSize="
@@ -313,7 +311,7 @@ public class DatanodeJspHelper {
       dfs.close();
       dfs.close();
       return;
       return;
     }
     }
-    String fqdn = canonicalize(chosenNode.getHost());
+    String fqdn = canonicalize(chosenNode.getIpAddr());
     String tailUrl = "http://" + fqdn + ":" + chosenNode.getInfoPort()
     String tailUrl = "http://" + fqdn + ":" + chosenNode.getInfoPort()
         + "/tail.jsp?filename=" + URLEncoder.encode(filename, "UTF-8")
         + "/tail.jsp?filename=" + URLEncoder.encode(filename, "UTF-8")
         + "&namenodeInfoPort=" + namenodeInfoPort
         + "&namenodeInfoPort=" + namenodeInfoPort
@@ -360,10 +358,9 @@ public class DatanodeJspHelper {
       out.print("<td>" + blockidstring + ":</td>");
       out.print("<td>" + blockidstring + ":</td>");
       DatanodeInfo[] locs = cur.getLocations();
       DatanodeInfo[] locs = cur.getLocations();
       for (int j = 0; j < locs.length; j++) {
       for (int j = 0; j < locs.length; j++) {
-        String datanodeAddr = locs[j].getName();
-        datanodePort = Integer.parseInt(datanodeAddr.substring(datanodeAddr
-            .indexOf(':') + 1, datanodeAddr.length()));
-        fqdn = canonicalize(locs[j].getHost());
+        String datanodeAddr = locs[j].getXferAddr();
+        datanodePort = locs[j].getXferPort();
+        fqdn = canonicalize(locs[j].getIpAddr());
         String blockUrl = "http://" + fqdn + ":" + locs[j].getInfoPort()
         String blockUrl = "http://" + fqdn + ":" + locs[j].getInfoPort()
             + "/browseBlock.jsp?blockId=" + blockidstring
             + "/browseBlock.jsp?blockId=" + blockidstring
             + "&blockSize=" + blockSize
             + "&blockSize=" + blockSize
@@ -519,10 +516,8 @@ public class DatanodeJspHelper {
             nextStartOffset = 0;
             nextStartOffset = 0;
             nextBlockSize = nextBlock.getBlock().getNumBytes();
             nextBlockSize = nextBlock.getBlock().getNumBytes();
             DatanodeInfo d = JspHelper.bestNode(nextBlock, conf);
             DatanodeInfo d = JspHelper.bestNode(nextBlock, conf);
-            String datanodeAddr = d.getName();
-            nextDatanodePort = Integer.parseInt(datanodeAddr.substring(
-                datanodeAddr.indexOf(':') + 1, datanodeAddr.length()));
-            nextHost = d.getHost();
+            nextDatanodePort = d.getXferPort();
+            nextHost = d.getIpAddr();
             nextPort = d.getInfoPort();
             nextPort = d.getInfoPort();
           }
           }
         }
         }
@@ -573,10 +568,8 @@ public class DatanodeJspHelper {
               prevStartOffset = 0;
               prevStartOffset = 0;
             prevBlockSize = prevBlock.getBlock().getNumBytes();
             prevBlockSize = prevBlock.getBlock().getNumBytes();
             DatanodeInfo d = JspHelper.bestNode(prevBlock, conf);
             DatanodeInfo d = JspHelper.bestNode(prevBlock, conf);
-            String datanodeAddr = d.getName();
-            prevDatanodePort = Integer.parseInt(datanodeAddr.substring(
-                datanodeAddr.indexOf(':') + 1, datanodeAddr.length()));
-            prevHost = d.getHost();
+            prevDatanodePort = d.getXferPort();
+            prevHost = d.getIpAddr();
             prevPort = d.getInfoPort();
             prevPort = d.getInfoPort();
           }
           }
         }
         }
@@ -693,7 +686,8 @@ public class DatanodeJspHelper {
       dfs.close();
       dfs.close();
       return;
       return;
     }
     }
-    InetSocketAddress addr = NetUtils.createSocketAddr(chosenNode.getName());
+    InetSocketAddress addr = 
+      NetUtils.createSocketAddr(chosenNode.getXferAddr());
     // view the last chunkSizeToView bytes while Tailing
     // view the last chunkSizeToView bytes while Tailing
     final long startOffset = blockSize >= chunkSizeToView ? blockSize
     final long startOffset = blockSize >= chunkSizeToView ? blockSize
         - chunkSizeToView : 0;
         - chunkSizeToView : 0;

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java

@@ -55,7 +55,7 @@ class UpgradeManagerDatanode extends UpgradeManager {
     if( ! super.initializeUpgrade())
     if( ! super.initializeUpgrade())
       return; // distr upgrade is not needed
       return; // distr upgrade is not needed
     DataNode.LOG.info("\n   Distributed upgrade for DataNode " 
     DataNode.LOG.info("\n   Distributed upgrade for DataNode " 
-        + dataNode.getMachineName() 
+        + dataNode.getDisplayName() 
         + " version " + getUpgradeVersion() + " to current LV " 
         + " version " + getUpgradeVersion() + " to current LV " 
         + HdfsConstants.LAYOUT_VERSION + " is initialized.");
         + HdfsConstants.LAYOUT_VERSION + " is initialized.");
     UpgradeObjectDatanode curUO = (UpgradeObjectDatanode)currentUpgrades.first();
     UpgradeObjectDatanode curUO = (UpgradeObjectDatanode)currentUpgrades.first();
@@ -113,7 +113,7 @@ class UpgradeManagerDatanode extends UpgradeManager {
     upgradeDaemon = new Daemon(curUO);
     upgradeDaemon = new Daemon(curUO);
     upgradeDaemon.start();
     upgradeDaemon.start();
     DataNode.LOG.info("\n   Distributed upgrade for DataNode " 
     DataNode.LOG.info("\n   Distributed upgrade for DataNode " 
-        + dataNode.getMachineName() 
+        + dataNode.getDisplayName() 
         + " version " + getUpgradeVersion() + " to current LV " 
         + " version " + getUpgradeVersion() + " to current LV " 
         + HdfsConstants.LAYOUT_VERSION + " is started.");
         + HdfsConstants.LAYOUT_VERSION + " is started.");
     return true;
     return true;
@@ -128,7 +128,7 @@ class UpgradeManagerDatanode extends UpgradeManager {
     if(startUpgrade()) // upgrade started
     if(startUpgrade()) // upgrade started
       return;
       return;
     throw new IOException(
     throw new IOException(
-        "Distributed upgrade for DataNode " + dataNode.getMachineName() 
+        "Distributed upgrade for DataNode " + dataNode.getDisplayName() 
         + " version " + getUpgradeVersion() + " to current LV " 
         + " version " + getUpgradeVersion() + " to current LV " 
         + HdfsConstants.LAYOUT_VERSION + " cannot be started. "
         + HdfsConstants.LAYOUT_VERSION + " cannot be started. "
         + "The upgrade object is not defined.");
         + "The upgrade object is not defined.");
@@ -143,7 +143,7 @@ class UpgradeManagerDatanode extends UpgradeManager {
     currentUpgrades = null;
     currentUpgrades = null;
     upgradeDaemon = null;
     upgradeDaemon = null;
     DataNode.LOG.info("\n   Distributed upgrade for DataNode " 
     DataNode.LOG.info("\n   Distributed upgrade for DataNode " 
-        + dataNode.getMachineName()
+        + dataNode.getDisplayName()
         + " version " + getUpgradeVersion() + " to current LV " 
         + " version " + getUpgradeVersion() + " to current LV " 
         + HdfsConstants.LAYOUT_VERSION + " is complete.");
         + HdfsConstants.LAYOUT_VERSION + " is complete.");
   }
   }

+ 4 - 37
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java

@@ -62,7 +62,6 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateBlocksOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
 import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
 import org.apache.hadoop.hdfs.util.Holder;
 import org.apache.hadoop.hdfs.util.Holder;
-import org.apache.hadoop.io.IOUtils;
 
 
 import com.google.common.base.Joiner;
 import com.google.common.base.Joiner;
 
 
@@ -231,37 +230,13 @@ public class FSEditLogLoader {
         // get name and replication
         // get name and replication
         final short replication  = fsNamesys.getBlockManager(
         final short replication  = fsNamesys.getBlockManager(
             ).adjustReplication(addCloseOp.replication);
             ).adjustReplication(addCloseOp.replication);
-        PermissionStatus permissions = fsNamesys.getUpgradePermission();
-        if (addCloseOp.permissions != null) {
-          permissions = addCloseOp.permissions;
-        }
-        long blockSize = addCloseOp.blockSize;
-
-        // Versions of HDFS prior to 0.17 may log an OP_ADD transaction
-        // which includes blocks in it. When we update the minimum
-        // upgrade version to something more recent than 0.17, we can
-        // simplify this code by asserting that OP_ADD transactions
-        // don't have any blocks.
-        
-        // Older versions of HDFS does not store the block size in inode.
-        // If the file has more than one block, use the size of the
-        // first block as the blocksize. Otherwise use the default
-        // block size.
-        if (-8 <= logVersion && blockSize == 0) {
-          if (addCloseOp.blocks.length > 1) {
-            blockSize = addCloseOp.blocks[0].getNumBytes();
-          } else {
-            long first = ((addCloseOp.blocks.length == 1)?
-                addCloseOp.blocks[0].getNumBytes(): 0);
-            blockSize = Math.max(fsNamesys.getDefaultBlockSize(), first);
-          }
-        }
+        assert addCloseOp.blocks.length == 0;
 
 
         // add to the file tree
         // add to the file tree
         newFile = (INodeFile)fsDir.unprotectedAddFile(
         newFile = (INodeFile)fsDir.unprotectedAddFile(
-            addCloseOp.path, permissions,
+            addCloseOp.path, addCloseOp.permissions,
             replication, addCloseOp.mtime,
             replication, addCloseOp.mtime,
-            addCloseOp.atime, blockSize,
+            addCloseOp.atime, addCloseOp.blockSize,
             true, addCloseOp.clientName, addCloseOp.clientMachine);
             true, addCloseOp.clientName, addCloseOp.clientMachine);
         fsNamesys.leaseManager.addLease(addCloseOp.clientName, addCloseOp.path);
         fsNamesys.leaseManager.addLease(addCloseOp.clientName, addCloseOp.path);
 
 
@@ -373,12 +348,7 @@ public class FSEditLogLoader {
     }
     }
     case OP_MKDIR: {
     case OP_MKDIR: {
       MkdirOp mkdirOp = (MkdirOp)op;
       MkdirOp mkdirOp = (MkdirOp)op;
-      PermissionStatus permissions = fsNamesys.getUpgradePermission();
-      if (mkdirOp.permissions != null) {
-        permissions = mkdirOp.permissions;
-      }
-
-      fsDir.unprotectedMkdir(mkdirOp.path, permissions,
+      fsDir.unprotectedMkdir(mkdirOp.path, mkdirOp.permissions,
                              mkdirOp.timestamp);
                              mkdirOp.timestamp);
       break;
       break;
     }
     }
@@ -493,9 +463,6 @@ public class FSEditLogLoader {
       // no data in here currently.
       // no data in here currently.
       break;
       break;
     }
     }
-    case OP_DATANODE_ADD:
-    case OP_DATANODE_REMOVE:
-      break;
     default:
     default:
       throw new IOException("Invalid operation read " + op.opCode);
       throw new IOException("Invalid operation read " + op.opCode);
     }
     }

+ 11 - 129
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java

@@ -30,11 +30,8 @@ import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.util.PureJavaCrc32;
 import org.apache.hadoop.util.PureJavaCrc32;
 
 
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.*;
 import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.*;
@@ -81,8 +78,6 @@ public abstract class FSEditLogOp {
         instances.put(OP_DELETE, new DeleteOp());
         instances.put(OP_DELETE, new DeleteOp());
         instances.put(OP_MKDIR, new MkdirOp());
         instances.put(OP_MKDIR, new MkdirOp());
         instances.put(OP_SET_GENSTAMP, new SetGenstampOp());
         instances.put(OP_SET_GENSTAMP, new SetGenstampOp());
-        instances.put(OP_DATANODE_ADD, new DatanodeAddOp());
-        instances.put(OP_DATANODE_REMOVE, new DatanodeRemoveOp());
         instances.put(OP_SET_PERMISSIONS, new SetPermissionsOp());
         instances.put(OP_SET_PERMISSIONS, new SetPermissionsOp());
         instances.put(OP_SET_OWNER, new SetOwnerOp());
         instances.put(OP_SET_OWNER, new SetOwnerOp());
         instances.put(OP_SET_NS_QUOTA, new SetNSQuotaOp());
         instances.put(OP_SET_NS_QUOTA, new SetNSQuotaOp());
@@ -147,7 +142,6 @@ public abstract class FSEditLogOp {
     PermissionStatus permissions;
     PermissionStatus permissions;
     String clientName;
     String clientName;
     String clientMachine;
     String clientMachine;
-    //final DatanodeDescriptor[] dataNodeDescriptors; UNUSED
 
 
     private AddCloseOp(FSEditLogOpCodes opCode) {
     private AddCloseOp(FSEditLogOpCodes opCode) {
       super(opCode);
       super(opCode);
@@ -226,13 +220,10 @@ public abstract class FSEditLogOp {
     @Override
     @Override
     void readFields(DataInputStream in, int logVersion)
     void readFields(DataInputStream in, int logVersion)
         throws IOException {
         throws IOException {
-      // versions > 0 support per file replication
-      // get name and replication
       if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
       if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
         this.length = in.readInt();
         this.length = in.readInt();
       }
       }
-      if (-7 == logVersion && length != 3||
-          -17 < logVersion && logVersion < -7 && length != 4 ||
+      if ((-17 < logVersion && length != 4) ||
           (logVersion <= -17 && length != 5 && !LayoutVersion.supports(
           (logVersion <= -17 && length != 5 && !LayoutVersion.supports(
               Feature.EDITLOG_OP_OPTIMIZATION, logVersion))) {
               Feature.EDITLOG_OP_OPTIMIZATION, logVersion))) {
         throw new IOException("Incorrect data format."  +
         throw new IOException("Incorrect data format."  +
@@ -259,49 +250,26 @@ public abstract class FSEditLogOp {
       } else {
       } else {
         this.atime = 0;
         this.atime = 0;
       }
       }
-      if (logVersion < -7) {
-        if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
-          this.blockSize = FSImageSerialization.readLong(in);
-        } else {
-          this.blockSize = readLong(in);
-        }
+
+      if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
+        this.blockSize = FSImageSerialization.readLong(in);
       } else {
       } else {
-        this.blockSize = 0;
+        this.blockSize = readLong(in);
       }
       }
 
 
-      // get blocks
       this.blocks = readBlocks(in, logVersion);
       this.blocks = readBlocks(in, logVersion);
-
-      if (logVersion <= -11) {
-        this.permissions = PermissionStatus.read(in);
-      } else {
-        this.permissions = null;
-      }
+      this.permissions = PermissionStatus.read(in);
 
 
       // clientname, clientMachine and block locations of last block.
       // clientname, clientMachine and block locations of last block.
-      if (this.opCode == OP_ADD && logVersion <= -12) {
+      if (this.opCode == OP_ADD) {
         this.clientName = FSImageSerialization.readString(in);
         this.clientName = FSImageSerialization.readString(in);
         this.clientMachine = FSImageSerialization.readString(in);
         this.clientMachine = FSImageSerialization.readString(in);
-        if (-13 <= logVersion) {
-          readDatanodeDescriptorArray(in);
-        }
       } else {
       } else {
         this.clientName = "";
         this.clientName = "";
         this.clientMachine = "";
         this.clientMachine = "";
       }
       }
     }
     }
 
 
-    /** This method is defined for compatibility reason. */
-    private static DatanodeDescriptor[] readDatanodeDescriptorArray(DataInput in)
-        throws IOException {
-      DatanodeDescriptor[] locations = new DatanodeDescriptor[in.readInt()];
-        for (int i = 0; i < locations.length; i++) {
-          locations[i] = new DatanodeDescriptor();
-          locations[i].readFieldsFromFSEditLog(in);
-        }
-        return locations;
-    }
-
     private static Block[] readBlocks(
     private static Block[] readBlocks(
         DataInputStream in,
         DataInputStream in,
         int logVersion) throws IOException {
         int logVersion) throws IOException {
@@ -309,14 +277,7 @@ public abstract class FSEditLogOp {
       Block[] blocks = new Block[numBlocks];
       Block[] blocks = new Block[numBlocks];
       for (int i = 0; i < numBlocks; i++) {
       for (int i = 0; i < numBlocks; i++) {
         Block blk = new Block();
         Block blk = new Block();
-        if (logVersion <= -14) {
-          blk.readFields(in);
-        } else {
-          BlockTwo oldblk = new BlockTwo();
-          oldblk.readFields(in);
-          blk.set(oldblk.blkid, oldblk.len,
-                  GenerationStamp.GRANDFATHER_GENERATION_STAMP);
-        }
+        blk.readFields(in);
         blocks[i] = blk;
         blocks[i] = blk;
       }
       }
       return blocks;
       return blocks;
@@ -788,17 +749,14 @@ public abstract class FSEditLogOp {
     }
     }
     
     
     @Override
     @Override
-    void readFields(DataInputStream in, int logVersion)
-        throws IOException {
-
+    void readFields(DataInputStream in, int logVersion) throws IOException {
       if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
       if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
         this.length = in.readInt();
         this.length = in.readInt();
       }
       }
       if (-17 < logVersion && length != 2 ||
       if (-17 < logVersion && length != 2 ||
           logVersion <= -17 && length != 3
           logVersion <= -17 && length != 3
           && !LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
           && !LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
-        throw new IOException("Incorrect data format. "
-                              + "Mkdir operation.");
+        throw new IOException("Incorrect data format. Mkdir operation.");
       }
       }
       this.path = FSImageSerialization.readString(in);
       this.path = FSImageSerialization.readString(in);
       if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
       if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
@@ -811,7 +769,6 @@ public abstract class FSEditLogOp {
       // However, currently this is not being updated/used because of
       // However, currently this is not being updated/used because of
       // performance reasons.
       // performance reasons.
       if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, logVersion)) {
       if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, logVersion)) {
-        /* unused this.atime = */
         if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
         if (LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
           FSImageSerialization.readLong(in);
           FSImageSerialization.readLong(in);
         } else {
         } else {
@@ -819,11 +776,7 @@ public abstract class FSEditLogOp {
         }
         }
       }
       }
 
 
-      if (logVersion <= -11) {
-        this.permissions = PermissionStatus.read(in);
-      } else {
-        this.permissions = null;
-      }
+      this.permissions = PermissionStatus.read(in);
     }
     }
 
 
     @Override
     @Override
@@ -888,77 +841,6 @@ public abstract class FSEditLogOp {
     }
     }
   }
   }
 
 
-  @SuppressWarnings("deprecation")
-  static class DatanodeAddOp extends FSEditLogOp {
-    private DatanodeAddOp() {
-      super(OP_DATANODE_ADD);
-    }
-
-    static DatanodeAddOp getInstance() {
-      return (DatanodeAddOp)opInstances.get()
-        .get(OP_DATANODE_ADD);
-    }
-
-    @Override 
-    void writeFields(DataOutputStream out) throws IOException {
-      throw new IOException("Deprecated, should not write");
-    }
-
-    @Override
-    void readFields(DataInputStream in, int logVersion)
-        throws IOException {
-      //Datanodes are not persistent any more.
-      FSImageSerialization.DatanodeImage.skipOne(in);
-    }
-
-    @Override
-    public String toString() {
-      StringBuilder builder = new StringBuilder();
-      builder.append("DatanodeAddOp [opCode=");
-      builder.append(opCode);
-      builder.append(", txid=");
-      builder.append(txid);
-      builder.append("]");
-      return builder.toString();
-    }
-  }
-
-  @SuppressWarnings("deprecation")
-  static class DatanodeRemoveOp extends FSEditLogOp {
-    private DatanodeRemoveOp() {
-      super(OP_DATANODE_REMOVE);
-    }
-
-    static DatanodeRemoveOp getInstance() {
-      return (DatanodeRemoveOp)opInstances.get()
-        .get(OP_DATANODE_REMOVE);
-    }
-
-    @Override 
-    void writeFields(DataOutputStream out) throws IOException {
-      throw new IOException("Deprecated, should not write");
-    }
-
-    @Override
-    void readFields(DataInputStream in, int logVersion)
-        throws IOException {
-      DatanodeID nodeID = new DatanodeID();
-      nodeID.readFields(in);
-      //Datanodes are not persistent any more.
-    }
-
-    @Override
-    public String toString() {
-      StringBuilder builder = new StringBuilder();
-      builder.append("DatanodeRemoveOp [opCode=");
-      builder.append(opCode);
-      builder.append(", txid=");
-      builder.append(txid);
-      builder.append("]");
-      return builder.toString();
-    }
-  }
-
   static class SetPermissionsOp extends FSEditLogOp {
   static class SetPermissionsOp extends FSEditLogOp {
     String src;
     String src;
     FsPermission permissions;
     FsPermission permissions;

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOpCodes.java

@@ -36,8 +36,8 @@ public enum FSEditLogOpCodes {
   OP_DELETE                     ((byte)  2),
   OP_DELETE                     ((byte)  2),
   OP_MKDIR                      ((byte)  3),
   OP_MKDIR                      ((byte)  3),
   OP_SET_REPLICATION            ((byte)  4),
   OP_SET_REPLICATION            ((byte)  4),
-  @Deprecated OP_DATANODE_ADD   ((byte)  5),
-  @Deprecated OP_DATANODE_REMOVE((byte)  6),
+  @Deprecated OP_DATANODE_ADD   ((byte)  5), // obsolete
+  @Deprecated OP_DATANODE_REMOVE((byte)  6), // obsolete
   OP_SET_PERMISSIONS            ((byte)  7),
   OP_SET_PERMISSIONS            ((byte)  7),
   OP_SET_OWNER                  ((byte)  8),
   OP_SET_OWNER                  ((byte)  8),
   OP_CLOSE                      ((byte)  9),
   OP_CLOSE                      ((byte)  9),

+ 27 - 100
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java

@@ -131,34 +131,22 @@ class FSImageFormat {
 
 
       DataInputStream in = new DataInputStream(fin);
       DataInputStream in = new DataInputStream(fin);
       try {
       try {
-        /*
-         * Note: Remove any checks for version earlier than 
-         * Storage.LAST_UPGRADABLE_LAYOUT_VERSION since we should never get 
-         * to here with older images.
-         */
-
-        /*
-         * TODO we need to change format of the image file
-         * it should not contain version and namespace fields
-         */
         // read image version: first appeared in version -1
         // read image version: first appeared in version -1
         int imgVersion = in.readInt();
         int imgVersion = in.readInt();
-        if(getLayoutVersion() != imgVersion)
+        if (getLayoutVersion() != imgVersion) {
           throw new InconsistentFSStateException(curFile, 
           throw new InconsistentFSStateException(curFile, 
               "imgVersion " + imgVersion +
               "imgVersion " + imgVersion +
               " expected to be " + getLayoutVersion());
               " expected to be " + getLayoutVersion());
+        }
 
 
         // read namespaceID: first appeared in version -2
         // read namespaceID: first appeared in version -2
         in.readInt();
         in.readInt();
 
 
-        // read number of files
-        long numFiles = readNumFiles(in);
+        long numFiles = in.readLong();
 
 
         // read in the last generation stamp.
         // read in the last generation stamp.
-        if (imgVersion <= -12) {
-          long genstamp = in.readLong();
-          namesystem.setGenerationStamp(genstamp); 
-        }
+        long genstamp = in.readLong();
+        namesystem.setGenerationStamp(genstamp); 
         
         
         // read the transaction ID of the last edit represented by
         // read the transaction ID of the last edit represented by
         // this image
         // this image
@@ -167,7 +155,6 @@ class FSImageFormat {
         } else {
         } else {
           imgTxId = 0;
           imgTxId = 0;
         }
         }
-        
 
 
         // read compression related info
         // read compression related info
         FSImageCompression compression;
         FSImageCompression compression;
@@ -189,13 +176,9 @@ class FSImageFormat {
           loadFullNameINodes(numFiles, in);
           loadFullNameINodes(numFiles, in);
         }
         }
 
 
-        // load datanode info
-        this.loadDatanodes(in);
+        loadFilesUnderConstruction(in);
 
 
-        // load Files Under Construction
-        this.loadFilesUnderConstruction(in);
-
-        this.loadSecretManagerState(in);
+        loadSecretManagerState(in);
 
 
         // make sure to read to the end of file
         // make sure to read to the end of file
         int eof = in.read();
         int eof = in.read();
@@ -335,89 +318,44 @@ class FSImageFormat {
     if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, imgVersion)) {
     if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, imgVersion)) {
       atime = in.readLong();
       atime = in.readLong();
     }
     }
-    if (imgVersion <= -8) {
-      blockSize = in.readLong();
-    }
+    blockSize = in.readLong();
     int numBlocks = in.readInt();
     int numBlocks = in.readInt();
     BlockInfo blocks[] = null;
     BlockInfo blocks[] = null;
 
 
-    // for older versions, a blocklist of size 0
-    // indicates a directory.
-    if ((-9 <= imgVersion && numBlocks > 0) ||
-        (imgVersion < -9 && numBlocks >= 0)) {
+    if (numBlocks >= 0) {
       blocks = new BlockInfo[numBlocks];
       blocks = new BlockInfo[numBlocks];
       for (int j = 0; j < numBlocks; j++) {
       for (int j = 0; j < numBlocks; j++) {
         blocks[j] = new BlockInfo(replication);
         blocks[j] = new BlockInfo(replication);
-        if (-14 < imgVersion) {
-          blocks[j].set(in.readLong(), in.readLong(), 
-                        GenerationStamp.GRANDFATHER_GENERATION_STAMP);
-        } else {
-          blocks[j].readFields(in);
-        }
-      }
-    }
-    // Older versions of HDFS does not store the block size in inode.
-    // If the file has more than one block, use the size of the 
-    // first block as the blocksize. Otherwise use the default block size.
-    //
-    if (-8 <= imgVersion && blockSize == 0) {
-      if (numBlocks > 1) {
-        blockSize = blocks[0].getNumBytes();
-      } else {
-        long first = ((numBlocks == 1) ? blocks[0].getNumBytes(): 0);
-        blockSize = Math.max(namesystem.getDefaultBlockSize(), first);
+        blocks[j].readFields(in);
       }
       }
     }
     }
     
     
     // get quota only when the node is a directory
     // get quota only when the node is a directory
     long nsQuota = -1L;
     long nsQuota = -1L;
-      if (LayoutVersion.supports(Feature.NAMESPACE_QUOTA, imgVersion)
-          && blocks == null && numBlocks == -1) {
-        nsQuota = in.readLong();
-      }
-      long dsQuota = -1L;
-      if (LayoutVersion.supports(Feature.DISKSPACE_QUOTA, imgVersion)
-          && blocks == null && numBlocks == -1) {
-        dsQuota = in.readLong();
-      }
-  
-      // Read the symlink only when the node is a symlink
-      String symlink = "";
-      if (numBlocks == -2) {
-        symlink = Text.readString(in);
-      }
-      
-      PermissionStatus permissions = namesystem.getUpgradePermission();
-      if (imgVersion <= -11) {
-        permissions = PermissionStatus.read(in);
-      }
-  
-      return INode.newINode(permissions, blocks, symlink, replication,
-          modificationTime, atime, nsQuota, dsQuota, blockSize);
+    if (blocks == null && numBlocks == -1) {
+      nsQuota = in.readLong();
+    }
+    long dsQuota = -1L;
+    if (LayoutVersion.supports(Feature.DISKSPACE_QUOTA, imgVersion)
+        && blocks == null && numBlocks == -1) {
+      dsQuota = in.readLong();
     }
     }
 
 
-    private void loadDatanodes(DataInputStream in)
-        throws IOException {
-      int imgVersion = getLayoutVersion();
-
-      if (imgVersion > -3) // pre datanode image version
-        return;
-      if (imgVersion <= -12) {
-        return; // new versions do not store the datanodes any more.
-      }
-      int size = in.readInt();
-      for(int i = 0; i < size; i++) {
-        // We don't need to add these descriptors any more.
-        FSImageSerialization.DatanodeImage.skipOne(in);
-      }
+    // Read the symlink only when the node is a symlink
+    String symlink = "";
+    if (numBlocks == -2) {
+      symlink = Text.readString(in);
     }
     }
+    
+    PermissionStatus permissions = PermissionStatus.read(in);
+
+    return INode.newINode(permissions, blocks, symlink, replication,
+        modificationTime, atime, nsQuota, dsQuota, blockSize);
+  }
 
 
     private void loadFilesUnderConstruction(DataInputStream in)
     private void loadFilesUnderConstruction(DataInputStream in)
     throws IOException {
     throws IOException {
       FSDirectory fsDir = namesystem.dir;
       FSDirectory fsDir = namesystem.dir;
-      int imgVersion = getLayoutVersion();
-      if (imgVersion > -13) // pre lease image version
-        return;
       int size = in.readInt();
       int size = in.readInt();
 
 
       LOG.info("Number of files under construction = " + size);
       LOG.info("Number of files under construction = " + size);
@@ -457,17 +395,6 @@ class FSImageFormat {
       return namesystem.getFSImage().getStorage().getLayoutVersion();
       return namesystem.getFSImage().getStorage().getLayoutVersion();
     }
     }
 
 
-    private long readNumFiles(DataInputStream in)
-        throws IOException {
-      int imgVersion = getLayoutVersion();
-
-      if (LayoutVersion.supports(Feature.NAMESPACE_QUOTA, imgVersion)) {
-        return in.readLong();
-      } else {
-        return in.readInt();
-      }
-    }
-
     private boolean isRoot(byte[][] path) {
     private boolean isRoot(byte[][] path) {
       return path.length == 1 &&
       return path.length == 1 &&
         path[0] == null;    
         path[0] == null;    

+ 3 - 59
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java

@@ -17,9 +17,7 @@
  */
  */
 package org.apache.hadoop.hdfs.server.namenode;
 package org.apache.hadoop.hdfs.server.namenode;
 
 
-import java.io.DataInput;
 import java.io.DataInputStream;
 import java.io.DataInputStream;
-import java.io.DataOutput;
 import java.io.DataOutputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.IOException;
 
 
@@ -31,7 +29,6 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DeprecatedUTF8;
 import org.apache.hadoop.hdfs.DeprecatedUTF8;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
@@ -39,7 +36,6 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.ShortWritable;
 import org.apache.hadoop.io.ShortWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.WritableUtils;
 
 
 /**
 /**
@@ -107,13 +103,10 @@ public class FSImageSerialization {
     String clientName = readString(in);
     String clientName = readString(in);
     String clientMachine = readString(in);
     String clientMachine = readString(in);
 
 
-    // These locations are not used at all
+    // We previously stored locations for the last block, now we
+    // just record that there are none
     int numLocs = in.readInt();
     int numLocs = in.readInt();
-    DatanodeDescriptor[] locations = new DatanodeDescriptor[numLocs];
-    for (i = 0; i < numLocs; i++) {
-      locations[i] = new DatanodeDescriptor();
-      locations[i].readFields(in);
-    }
+    assert numLocs == 0 : "Unexpected block locations";
 
 
     return new INodeFileUnderConstruction(name, 
     return new INodeFileUnderConstruction(name, 
                                           blockReplication, 
                                           blockReplication, 
@@ -320,53 +313,4 @@ public class FSImageSerialization {
     }
     }
     return ret;
     return ret;
   }
   }
-
-  /**
-   * DatanodeImage is used to store persistent information
-   * about datanodes into the fsImage.
-   */
-  static class DatanodeImage implements Writable {
-    DatanodeDescriptor node = new DatanodeDescriptor();
-
-    static void skipOne(DataInput in) throws IOException {
-      DatanodeImage nodeImage = new DatanodeImage();
-      nodeImage.readFields(in);
-    }
-
-    /////////////////////////////////////////////////
-    // Writable
-    /////////////////////////////////////////////////
-    /**
-     * Public method that serializes the information about a
-     * Datanode to be stored in the fsImage.
-     */
-    public void write(DataOutput out) throws IOException {
-      new DatanodeID(node).write(out);
-      out.writeLong(node.getCapacity());
-      out.writeLong(node.getRemaining());
-      out.writeLong(node.getLastUpdate());
-      out.writeInt(node.getXceiverCount());
-    }
-
-    /**
-     * Public method that reads a serialized Datanode
-     * from the fsImage.
-     */
-    public void readFields(DataInput in) throws IOException {
-      DatanodeID id = new DatanodeID();
-      id.readFields(in);
-      long capacity = in.readLong();
-      long remaining = in.readLong();
-      long lastUpdate = in.readLong();
-      int xceiverCount = in.readInt();
-
-      // update the DatanodeDescriptor with the data we read in
-      node.updateRegInfo(id);
-      node.setStorageID(id.getStorageID());
-      node.setCapacity(capacity);
-      node.setRemaining(remaining);
-      node.setLastUpdate(lastUpdate);
-      node.setXceiverCount(xceiverCount);
-    }
-  }
 }
 }

+ 2 - 17
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -52,8 +52,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHO
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERSIST_BLOCKS_KEY;
@@ -118,7 +116,6 @@ import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.ha.ServiceFailedException;
 import org.apache.hadoop.ha.ServiceFailedException;
-import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HAUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -267,7 +264,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
   private boolean persistBlocks;
   private boolean persistBlocks;
   private UserGroupInformation fsOwner;
   private UserGroupInformation fsOwner;
   private String supergroup;
   private String supergroup;
-  private PermissionStatus defaultPermission;
   private boolean standbyShouldCheckpoint;
   private boolean standbyShouldCheckpoint;
   
   
   // Scan interval is not configurable.
   // Scan interval is not configurable.
@@ -846,11 +842,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
           "must not be specified if HA is not enabled.");
           "must not be specified if HA is not enabled.");
     }
     }
 
 
-    short filePermission = (short)conf.getInt(DFS_NAMENODE_UPGRADE_PERMISSION_KEY,
-                                              DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT);
-    this.defaultPermission = PermissionStatus.createImmutable(
-        fsOwner.getShortUserName(), supergroup, new FsPermission(filePermission));
-    
     this.serverDefaults = new FsServerDefaults(
     this.serverDefaults = new FsServerDefaults(
         conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT),
         conf.getLongBytes(DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE_DEFAULT),
         conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT),
         conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY, DFS_BYTES_PER_CHECKSUM_DEFAULT),
@@ -878,14 +869,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
           DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_DEFAULT);
           DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_DEFAULT);
   }
   }
 
 
-  /**
-   * Return the default path permission when upgrading from releases with no
-   * permissions (<=0.15) to releases with permissions (>=0.16)
-   */
-  protected PermissionStatus getUpgradePermission() {
-    return defaultPermission;
-  }
-  
   NamespaceInfo getNamespaceInfo() {
   NamespaceInfo getNamespaceInfo() {
     readLock();
     readLock();
     try {
     try {
@@ -5072,6 +5055,8 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       innerinfo.put("lastContact", getLastContact(node));
       innerinfo.put("lastContact", getLastContact(node));
       innerinfo.put("usedSpace", getDfsUsed(node));
       innerinfo.put("usedSpace", getDfsUsed(node));
       innerinfo.put("adminState", node.getAdminState().toString());
       innerinfo.put("adminState", node.getAdminState().toString());
+      innerinfo.put("nonDfsUsedSpace", node.getNonDfsUsed());
+      innerinfo.put("capacity", node.getCapacity());
       info.put(node.getHostName(), innerinfo);
       info.put(node.getHostName(), innerinfo);
     }
     }
     return JSON.toString(info);
     return JSON.toString(info);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java

@@ -59,7 +59,7 @@ public class FileChecksumServlets {
         HttpServletRequest request, NameNode nn) 
         HttpServletRequest request, NameNode nn) 
         throws IOException {
         throws IOException {
       final String hostname = host instanceof DatanodeInfo 
       final String hostname = host instanceof DatanodeInfo 
-          ? ((DatanodeInfo)host).getHostName() : host.getHost();
+          ? ((DatanodeInfo)host).getHostName() : host.getIpAddr();
       final String scheme = request.getScheme();
       final String scheme = request.getScheme();
       final int port = "https".equals(scheme)
       final int port = "https".equals(scheme)
           ? (Integer)getServletContext().getAttribute("datanode.https.port")
           ? (Integer)getServletContext().getAttribute("datanode.https.port")

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java

@@ -59,7 +59,7 @@ public class FileDataServlet extends DfsServlet {
     if (host instanceof DatanodeInfo) {
     if (host instanceof DatanodeInfo) {
       hostname = ((DatanodeInfo)host).getHostName();
       hostname = ((DatanodeInfo)host).getHostName();
     } else {
     } else {
-      hostname = host.getHost();
+      hostname = host.getIpAddr();
     }
     }
     final int port = "https".equals(scheme)
     final int port = "https".equals(scheme)
       ? (Integer)getServletContext().getAttribute("datanode.https.port")
       ? (Integer)getServletContext().getAttribute("datanode.https.port")

+ 7 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

@@ -854,7 +854,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
     BlockListAsLongs blist = new BlockListAsLongs(reports[0].getBlocks());
     BlockListAsLongs blist = new BlockListAsLongs(reports[0].getBlocks());
     if(stateChangeLog.isDebugEnabled()) {
     if(stateChangeLog.isDebugEnabled()) {
       stateChangeLog.debug("*BLOCK* NameNode.blockReport: "
       stateChangeLog.debug("*BLOCK* NameNode.blockReport: "
-           + "from " + nodeReg.getName() + " " + blist.getNumberOfBlocks()
+           + "from " + nodeReg + " " + blist.getNumberOfBlocks()
            + " blocks");
            + " blocks");
     }
     }
 
 
@@ -870,7 +870,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
     verifyRequest(nodeReg);
     verifyRequest(nodeReg);
     if(stateChangeLog.isDebugEnabled()) {
     if(stateChangeLog.isDebugEnabled()) {
       stateChangeLog.debug("*BLOCK* NameNode.blockReceivedAndDeleted: "
       stateChangeLog.debug("*BLOCK* NameNode.blockReceivedAndDeleted: "
-          +"from "+nodeReg.getName()+" "+receivedAndDeletedBlocks.length
+          +"from "+nodeReg+" "+receivedAndDeletedBlocks.length
           +" blocks.");
           +" blocks.");
     }
     }
     namesystem.getBlockManager().processIncrementalBlockReport(
     namesystem.getBlockManager().processIncrementalBlockReport(
@@ -880,7 +880,8 @@ class NameNodeRpcServer implements NamenodeProtocols {
   @Override // DatanodeProtocol
   @Override // DatanodeProtocol
   public void errorReport(DatanodeRegistration nodeReg,
   public void errorReport(DatanodeRegistration nodeReg,
                           int errorCode, String msg) throws IOException { 
                           int errorCode, String msg) throws IOException { 
-    String dnName = (nodeReg == null ? "unknown DataNode" : nodeReg.getName());
+    String dnName = 
+       (nodeReg == null) ? "Unknown DataNode" : nodeReg.toString();
 
 
     if (errorCode == DatanodeProtocol.NOTIFY) {
     if (errorCode == DatanodeProtocol.NOTIFY) {
       LOG.info("Error report from " + dnName + ": " + msg);
       LOG.info("Error report from " + dnName + ": " + msg);
@@ -909,13 +910,10 @@ class NameNodeRpcServer implements NamenodeProtocols {
   }
   }
 
 
   /** 
   /** 
-   * Verify request.
+   * Verifies the given registration.
    * 
    * 
-   * Verifies correctness of the datanode version, registration ID, and 
-   * if the datanode does not need to be shutdown.
-   * 
-   * @param nodeReg data node registration
-   * @throws IOException
+   * @param nodeReg node registration
+   * @throws UnregisteredNodeException if the registration is invalid
    */
    */
   void verifyRequest(NodeRegistration nodeReg) throws IOException {
   void verifyRequest(NodeRegistration nodeReg) throws IOException {
     verifyVersion(nodeReg.getVersion());
     verifyVersion(nodeReg.getVersion());

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java

@@ -496,7 +496,7 @@ public class NamenodeFsck {
       
       
       try {
       try {
         chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
         chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
-        targetAddr = NetUtils.createSocketAddr(chosenNode.getName());
+        targetAddr = NetUtils.createSocketAddr(chosenNode.getXferAddr());
       }  catch (IOException ie) {
       }  catch (IOException ie) {
         if (failures >= DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) {
         if (failures >= DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT) {
           throw new IOException("Could not obtain block " + lblock);
           throw new IOException("Could not obtain block " + lblock);

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java

@@ -260,14 +260,14 @@ class NamenodeJspHelper {
       // Find out common suffix. Should this be before or after the sort?
       // Find out common suffix. Should this be before or after the sort?
       String port_suffix = null;
       String port_suffix = null;
       if (live.size() > 0) {
       if (live.size() > 0) {
-        String name = live.get(0).getName();
+        String name = live.get(0).getXferAddr();
         int idx = name.indexOf(':');
         int idx = name.indexOf(':');
         if (idx > 0) {
         if (idx > 0) {
           port_suffix = name.substring(idx);
           port_suffix = name.substring(idx);
         }
         }
 
 
         for (int i = 1; port_suffix != null && i < live.size(); i++) {
         for (int i = 1; port_suffix != null && i < live.size(); i++) {
-          if (live.get(i).getName().endsWith(port_suffix) == false) {
+          if (live.get(i).getXferAddr().endsWith(port_suffix) == false) {
             port_suffix = null;
             port_suffix = null;
             break;
             break;
           }
           }
@@ -404,7 +404,7 @@ class NamenodeJspHelper {
     final String nodeToRedirect;
     final String nodeToRedirect;
     int redirectPort;
     int redirectPort;
     if (datanode != null) {
     if (datanode != null) {
-      nodeToRedirect = datanode.getHost();
+      nodeToRedirect = datanode.getIpAddr();
       redirectPort = datanode.getInfoPort();
       redirectPort = datanode.getInfoPort();
     } else {
     } else {
       nodeToRedirect = nn.getHttpAddress().getHostName();
       nodeToRedirect = nn.getHttpAddress().getHostName();
@@ -466,14 +466,14 @@ class NamenodeJspHelper {
           + URLEncoder.encode("/", "UTF-8")
           + URLEncoder.encode("/", "UTF-8")
           + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnaddr);
           + JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnaddr);
 
 
-      String name = d.getHostName() + ":" + d.getPort();
+      String name = d.getXferAddrWithHostname();
       if (!name.matches("\\d+\\.\\d+.\\d+\\.\\d+.*"))
       if (!name.matches("\\d+\\.\\d+.\\d+\\.\\d+.*"))
         name = name.replaceAll("\\.[^.:]*", "");
         name = name.replaceAll("\\.[^.:]*", "");
       int idx = (suffix != null && name.endsWith(suffix)) ? name
       int idx = (suffix != null && name.endsWith(suffix)) ? name
           .indexOf(suffix) : -1;
           .indexOf(suffix) : -1;
 
 
-      out.print(rowTxt() + "<td class=\"name\"><a title=\"" + d.getHost() + ":"
-          + d.getPort() + "\" href=\"" + url + "\">"
+      out.print(rowTxt() + "<td class=\"name\"><a title=\"" + d.getXferAddr()
+          + "\" href=\"" + url + "\">"
           + ((idx > 0) ? name.substring(0, idx) : name) + "</a>"
           + ((idx > 0) ? name.substring(0, idx) : name) + "</a>"
           + ((alive) ? "" : "\n"));
           + ((alive) ? "" : "\n"));
     }
     }
@@ -599,14 +599,14 @@ class NamenodeJspHelper {
       // Find out common suffix. Should this be before or after the sort?
       // Find out common suffix. Should this be before or after the sort?
       String port_suffix = null;
       String port_suffix = null;
       if (live.size() > 0) {
       if (live.size() > 0) {
-        String name = live.get(0).getName();
+        String name = live.get(0).getXferAddr();
         int idx = name.indexOf(':');
         int idx = name.indexOf(':');
         if (idx > 0) {
         if (idx > 0) {
           port_suffix = name.substring(idx);
           port_suffix = name.substring(idx);
         }
         }
 
 
         for (int i = 1; port_suffix != null && i < live.size(); i++) {
         for (int i = 1; port_suffix != null && i < live.size(); i++) {
-          if (live.get(i).getName().endsWith(port_suffix) == false) {
+          if (live.get(i).getXferAddr().endsWith(port_suffix) == false) {
             port_suffix = null;
             port_suffix = null;
             break;
             break;
           }
           }

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java

@@ -80,9 +80,8 @@ public interface DatanodeProtocol {
    *
    *
    * @see org.apache.hadoop.hdfs.server.namenode.FSNamesystem#registerDatanode(DatanodeRegistration)
    * @see org.apache.hadoop.hdfs.server.namenode.FSNamesystem#registerDatanode(DatanodeRegistration)
    * @param registration datanode registration information
    * @param registration datanode registration information
-   * @return updated {@link org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration}, which contains 
-   * new storageID if the datanode did not have one and
-   * registration ID for further communication.
+   * @return the given {@link org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration} with
+   *  updated registration information
    */
    */
   public DatanodeRegistration registerDatanode(DatanodeRegistration registration
   public DatanodeRegistration registerDatanode(DatanodeRegistration registration
       ) throws IOException;
       ) throws IOException;

+ 21 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java

@@ -49,8 +49,8 @@ implements Writable, NodeRegistration {
        });
        });
   }
   }
 
 
-  public StorageInfo storageInfo;
-  public ExportedBlockKeys exportedKeys;
+  private StorageInfo storageInfo;
+  private ExportedBlockKeys exportedKeys;
 
 
   /**
   /**
    * Default constructor.
    * Default constructor.
@@ -62,8 +62,8 @@ implements Writable, NodeRegistration {
   /**
   /**
    * Create DatanodeRegistration
    * Create DatanodeRegistration
    */
    */
-  public DatanodeRegistration(String nodeName) {
-    this(nodeName, new StorageInfo(), new ExportedBlockKeys());
+  public DatanodeRegistration(String ipAddr) {
+    this(ipAddr, new StorageInfo(), new ExportedBlockKeys());
   }
   }
   
   
   public DatanodeRegistration(DatanodeID dn, StorageInfo info,
   public DatanodeRegistration(DatanodeID dn, StorageInfo info,
@@ -73,9 +73,9 @@ implements Writable, NodeRegistration {
     this.exportedKeys = keys;
     this.exportedKeys = keys;
   }
   }
   
   
-  public DatanodeRegistration(String nodeName, StorageInfo info,
+  public DatanodeRegistration(String ipAddr, StorageInfo info,
       ExportedBlockKeys keys) {
       ExportedBlockKeys keys) {
-    super(nodeName);
+    super(ipAddr);
     this.storageInfo = info;
     this.storageInfo = info;
     this.exportedKeys = keys;
     this.exportedKeys = keys;
   }
   }
@@ -83,7 +83,19 @@ implements Writable, NodeRegistration {
   public void setStorageInfo(StorageInfo storage) {
   public void setStorageInfo(StorageInfo storage) {
     this.storageInfo = new StorageInfo(storage);
     this.storageInfo = new StorageInfo(storage);
   }
   }
-  
+
+  public StorageInfo getStorageInfo() {
+    return storageInfo;
+  }
+
+  public void setExportedKeys(ExportedBlockKeys keys) {
+    this.exportedKeys = keys;
+  }
+
+  public ExportedBlockKeys getExportedKeys() {
+    return exportedKeys;
+  }
+
   @Override // NodeRegistration
   @Override // NodeRegistration
   public int getVersion() {
   public int getVersion() {
     return storageInfo.getLayoutVersion();
     return storageInfo.getLayoutVersion();
@@ -96,13 +108,13 @@ implements Writable, NodeRegistration {
 
 
   @Override // NodeRegistration
   @Override // NodeRegistration
   public String getAddress() {
   public String getAddress() {
-    return getName();
+    return getXferAddr();
   }
   }
 
 
   @Override
   @Override
   public String toString() {
   public String toString() {
     return getClass().getSimpleName()
     return getClass().getSimpleName()
-      + "(" + name
+      + "(" + ipAddr
       + ", storageID=" + storageID
       + ", storageID=" + storageID
       + ", infoPort=" + infoPort
       + ", infoPort=" + infoPort
       + ", ipcPort=" + ipcPort
       + ", ipcPort=" + ipcPort

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DisallowedDatanodeException.java

@@ -38,6 +38,6 @@ public class DisallowedDatanodeException extends IOException {
   private static final long serialVersionUID = 1L;
   private static final long serialVersionUID = 1L;
 
 
   public DisallowedDatanodeException(DatanodeID nodeID) {
   public DisallowedDatanodeException(DatanodeID nodeID) {
-    super("Datanode denied communication with namenode: " + nodeID.getName());
+    super("Datanode denied communication with namenode: " + nodeID);
   }
   }
 }
 }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java

@@ -28,7 +28,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 public interface NodeRegistration {
 public interface NodeRegistration {
   /**
   /**
    * Get address of the server node.
    * Get address of the server node.
-   * @return hostname:portNumber
+   * @return ipAddr:portNumber
    */
    */
   public String getAddress();
   public String getAddress();
 
 

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java

@@ -280,10 +280,11 @@ public class JsonUtil {
     }
     }
 
 
     final Map<String, Object> m = new TreeMap<String, Object>();
     final Map<String, Object> m = new TreeMap<String, Object>();
-    m.put("name", datanodeinfo.getName());
+    m.put("ipAddr", datanodeinfo.getIpAddr());
+    m.put("hostName", datanodeinfo.getHostName());
     m.put("storageID", datanodeinfo.getStorageID());
     m.put("storageID", datanodeinfo.getStorageID());
+    m.put("xferPort", datanodeinfo.getXferPort());
     m.put("infoPort", datanodeinfo.getInfoPort());
     m.put("infoPort", datanodeinfo.getInfoPort());
-
     m.put("ipcPort", datanodeinfo.getIpcPort());
     m.put("ipcPort", datanodeinfo.getIpcPort());
 
 
     m.put("capacity", datanodeinfo.getCapacity());
     m.put("capacity", datanodeinfo.getCapacity());
@@ -293,7 +294,6 @@ public class JsonUtil {
     m.put("lastUpdate", datanodeinfo.getLastUpdate());
     m.put("lastUpdate", datanodeinfo.getLastUpdate());
     m.put("xceiverCount", datanodeinfo.getXceiverCount());
     m.put("xceiverCount", datanodeinfo.getXceiverCount());
     m.put("networkLocation", datanodeinfo.getNetworkLocation());
     m.put("networkLocation", datanodeinfo.getNetworkLocation());
-    m.put("hostName", datanodeinfo.getHostName());
     m.put("adminState", datanodeinfo.getAdminState().name());
     m.put("adminState", datanodeinfo.getAdminState().name());
     return m;
     return m;
   }
   }
@@ -306,7 +306,9 @@ public class JsonUtil {
 
 
     return new DatanodeInfo(
     return new DatanodeInfo(
         (String)m.get("name"),
         (String)m.get("name"),
+        (String)m.get("hostName"),
         (String)m.get("storageID"),
         (String)m.get("storageID"),
+        (int)(long)(Long)m.get("xferPort"),
         (int)(long)(Long)m.get("infoPort"),
         (int)(long)(Long)m.get("infoPort"),
         (int)(long)(Long)m.get("ipcPort"),
         (int)(long)(Long)m.get("ipcPort"),
 
 
@@ -317,7 +319,6 @@ public class JsonUtil {
         (Long)m.get("lastUpdate"),
         (Long)m.get("lastUpdate"),
         (int)(long)(Long)m.get("xceiverCount"),
         (int)(long)(Long)m.get("xceiverCount"),
         (String)m.get("networkLocation"),
         (String)m.get("networkLocation"),
-        (String)m.get("hostName"),
         AdminStates.valueOf((String)m.get("adminState")));
         AdminStates.valueOf((String)m.get("adminState")));
   }
   }
 
 

+ 6 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/hdfs.proto

@@ -48,10 +48,12 @@ message BlockTokenIdentifierProto {
  * Identifies a Datanode
  * Identifies a Datanode
  */
  */
 message DatanodeIDProto {
 message DatanodeIDProto {
-  required string name = 1;      // hostname:portNumber
-  required string storageID = 2; // Unique storage id
-  required uint32 infoPort = 3;  // the port where the infoserver is running
-  required uint32 ipcPort = 4;   // the port where the ipc Server is running
+  required string ipAddr = 1;    // IP address
+  required string hostName = 2;  // hostname
+  required string storageID = 3; // unique storage id
+  required uint32 xferPort = 4;  // data streaming port
+  required uint32 infoPort = 5;  // info server port
+  required uint32 ipcPort = 6;   // ipc server port
 }
 }
 
 
 /**
 /**
@@ -73,7 +75,6 @@ message DatanodeInfoProto {
   optional uint64 lastUpdate = 6 [default = 0];
   optional uint64 lastUpdate = 6 [default = 0];
   optional uint32 xceiverCount = 7 [default = 0];
   optional uint32 xceiverCount = 7 [default = 0];
   optional string location = 8;
   optional string location = 8;
-  optional string hostName = 9;
   enum AdminState {
   enum AdminState {
     NORMAL = 0;
     NORMAL = 0;
     DECOMMISSION_INPROGRESS = 1;
     DECOMMISSION_INPROGRESS = 1;

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java

@@ -143,7 +143,7 @@ public class BlockReaderTestUtil {
     Socket sock = null;
     Socket sock = null;
     ExtendedBlock block = testBlock.getBlock();
     ExtendedBlock block = testBlock.getBlock();
     DatanodeInfo[] nodes = testBlock.getLocations();
     DatanodeInfo[] nodes = testBlock.getLocations();
-    targetAddr = NetUtils.createSocketAddr(nodes[0].getName());
+    targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
     sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
     sock = NetUtils.getDefaultSocketFactory(conf).createSocket();
     sock.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
     sock.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
     sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
     sock.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
@@ -162,7 +162,7 @@ public class BlockReaderTestUtil {
    */
    */
   public DataNode getDataNode(LocatedBlock testBlock) {
   public DataNode getDataNode(LocatedBlock testBlock) {
     DatanodeInfo[] nodes = testBlock.getLocations();
     DatanodeInfo[] nodes = testBlock.getLocations();
-    int ipcport = nodes[0].ipcPort;
+    int ipcport = nodes[0].getIpcPort();
     return cluster.getDataNode(ipcport);
     return cluster.getDataNode(ipcport);
   }
   }
 
 

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java

@@ -339,7 +339,7 @@ public class DFSTestUtil {
   }
   }
 
 
   /*
   /*
-   * Wait up to 20s for the given DN (host:port) to be decommissioned.
+   * Wait up to 20s for the given DN (IP:port) to be decommissioned
    */
    */
   public static void waitForDecommission(FileSystem fs, String name) 
   public static void waitForDecommission(FileSystem fs, String name) 
       throws IOException, InterruptedException, TimeoutException {
       throws IOException, InterruptedException, TimeoutException {
@@ -351,7 +351,7 @@ public class DFSTestUtil {
       Thread.sleep(1000);
       Thread.sleep(1000);
       DistributedFileSystem dfs = (DistributedFileSystem)fs;
       DistributedFileSystem dfs = (DistributedFileSystem)fs;
       for (DatanodeInfo info : dfs.getDataNodeStats()) {
       for (DatanodeInfo info : dfs.getDataNodeStats()) {
-        if (name.equals(info.getName())) {
+        if (name.equals(info.getXferAddr())) {
           dn = info;
           dn = info;
         }
         }
       }
       }

+ 10 - 13
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -1041,9 +1041,9 @@ public class MiniDFSCluster {
       //      hadoop.security.token.service.use_ip=true
       //      hadoop.security.token.service.use_ip=true
       //since the HDFS does things based on IP:port, we need to add the mapping
       //since the HDFS does things based on IP:port, we need to add the mapping
       //for IP:port to rackId
       //for IP:port to rackId
-      String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();
+      String ipAddr = dn.getXferAddress().getAddress().getHostAddress();
       if (racks != null) {
       if (racks != null) {
-        int port = dn.getSelfAddr().getPort();
+        int port = dn.getXferAddress().getPort();
         LOG.info("Adding node with IP:port : " + ipAddr + ":" + port +
         LOG.info("Adding node with IP:port : " + ipAddr + ":" + port +
                             " to rack " + racks[i-curDatanodesNum]);
                             " to rack " + racks[i-curDatanodesNum]);
         StaticMapping.addNodeToRack(ipAddr + ":" + port,
         StaticMapping.addNodeToRack(ipAddr + ":" + port,
@@ -1422,7 +1422,7 @@ public class MiniDFSCluster {
     DataNodeProperties dnprop = dataNodes.remove(i);
     DataNodeProperties dnprop = dataNodes.remove(i);
     DataNode dn = dnprop.datanode;
     DataNode dn = dnprop.datanode;
     LOG.info("MiniDFSCluster Stopping DataNode " +
     LOG.info("MiniDFSCluster Stopping DataNode " +
-                       dn.getMachineName() +
+                       dn.getDisplayName() +
                        " from a total of " + (dataNodes.size() + 1) + 
                        " from a total of " + (dataNodes.size() + 1) + 
                        " datanodes.");
                        " datanodes.");
     dn.shutdown();
     dn.shutdown();
@@ -1433,16 +1433,13 @@ public class MiniDFSCluster {
   /*
   /*
    * Shutdown a datanode by name.
    * Shutdown a datanode by name.
    */
    */
-  public synchronized DataNodeProperties stopDataNode(String name) {
+  public synchronized DataNodeProperties stopDataNode(String dnName) {
     int i;
     int i;
     for (i = 0; i < dataNodes.size(); i++) {
     for (i = 0; i < dataNodes.size(); i++) {
       DataNode dn = dataNodes.get(i).datanode;
       DataNode dn = dataNodes.get(i).datanode;
-      // get BP registration
-      DatanodeRegistration dnR = 
-        DataNodeTestUtils.getDNRegistrationByMachineName(dn, name);
-      LOG.info("for name=" + name + " found bp=" + dnR + 
-          "; with dnMn=" + dn.getMachineName());
-      if(dnR != null) {
+      LOG.info("DN name=" + dnName + " found DN=" + dn +
+          " with name=" + dn.getDisplayName());
+      if (dnName.equals(dn.getDatanodeId().getXferAddr())) {
         break;
         break;
       }
       }
     }
     }
@@ -1472,9 +1469,9 @@ public class MiniDFSCluster {
     String[] args = dnprop.dnArgs;
     String[] args = dnprop.dnArgs;
     Configuration newconf = new HdfsConfiguration(conf); // save cloned config
     Configuration newconf = new HdfsConfiguration(conf); // save cloned config
     if (keepPort) {
     if (keepPort) {
-      InetSocketAddress addr = dnprop.datanode.getSelfAddr();
-      conf.set(DFS_DATANODE_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":"
-          + addr.getPort());
+      InetSocketAddress addr = dnprop.datanode.getXferAddress();
+      conf.set(DFS_DATANODE_ADDRESS_KEY, 
+          addr.getAddress().getHostAddress() + ":" + addr.getPort());
     }
     }
     dataNodes.add(new DataNodeProperties(DataNode.createDataNode(args, conf),
     dataNodes.add(new DataNodeProperties(DataNode.createDataNode(args, conf),
         newconf, args));
         newconf, args));

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java

@@ -220,7 +220,7 @@ public class TestClientReportBadBlock {
       final DataNode dn = cluster.getDataNode(dninfo.getIpcPort());
       final DataNode dn = cluster.getDataNode(dninfo.getIpcPort());
       corruptBlock(block, dn);
       corruptBlock(block, dn);
       LOG.debug("Corrupted block " + block.getBlockName() + " on data node "
       LOG.debug("Corrupted block " + block.getBlockName() + " on data node "
-          + dninfo.getName());
+          + dninfo);
 
 
     }
     }
   }
   }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java

@@ -158,7 +158,7 @@ public class TestConnCache {
             testFile.toString(), 0, FILE_SIZE)
             testFile.toString(), 0, FILE_SIZE)
         .getLocatedBlocks().get(0);
         .getLocatedBlocks().get(0);
     DataNode dn = util.getDataNode(block);
     DataNode dn = util.getDataNode(block);
-    InetSocketAddress dnAddr = dn.getSelfAddr();
+    InetSocketAddress dnAddr = dn.getXferAddress();
 
 
     // Make some sockets to the DN
     // Make some sockets to the DN
     Socket[] dnSockets = new Socket[CACHE_SIZE];
     Socket[] dnSockets = new Socket[CACHE_SIZE];

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSAddressConfig.java

@@ -50,7 +50,7 @@ public class TestDFSAddressConfig extends TestCase {
     ArrayList<DataNode> dns = cluster.getDataNodes();
     ArrayList<DataNode> dns = cluster.getDataNodes();
     DataNode dn = dns.get(0);
     DataNode dn = dns.get(0);
 
 
-    String selfSocketAddr = dn.getSelfAddr().toString();
+    String selfSocketAddr = dn.getXferAddress().toString();
     System.out.println("DN Self Socket Addr == " + selfSocketAddr);
     System.out.println("DN Self Socket Addr == " + selfSocketAddr);
     assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
     assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
 
 
@@ -75,7 +75,7 @@ public class TestDFSAddressConfig extends TestCase {
     dns = cluster.getDataNodes();
     dns = cluster.getDataNodes();
     dn = dns.get(0);
     dn = dns.get(0);
 
 
-    selfSocketAddr = dn.getSelfAddr().toString();
+    selfSocketAddr = dn.getXferAddress().toString();
     System.out.println("DN Self Socket Addr == " + selfSocketAddr);
     System.out.println("DN Self Socket Addr == " + selfSocketAddr);
     // assert that default self socket address is 127.0.0.1
     // assert that default self socket address is 127.0.0.1
     assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
     assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
@@ -100,7 +100,7 @@ public class TestDFSAddressConfig extends TestCase {
     dns = cluster.getDataNodes();
     dns = cluster.getDataNodes();
     dn = dns.get(0);
     dn = dns.get(0);
 
 
-    selfSocketAddr = dn.getSelfAddr().toString();
+    selfSocketAddr = dn.getXferAddress().toString();
     System.out.println("DN Self Socket Addr == " + selfSocketAddr);
     System.out.println("DN Self Socket Addr == " + selfSocketAddr);
     // assert that default self socket address is 0.0.0.0
     // assert that default self socket address is 0.0.0.0
     assertTrue(selfSocketAddr.contains("/0.0.0.0:"));
     assertTrue(selfSocketAddr.contains("/0.0.0.0:"));

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java

@@ -334,7 +334,7 @@ public class TestDFSClientRetries extends TestCase {
       LocatedBlock badLocatedBlock = new LocatedBlock(
       LocatedBlock badLocatedBlock = new LocatedBlock(
         goodLocatedBlock.getBlock(),
         goodLocatedBlock.getBlock(),
         new DatanodeInfo[] {
         new DatanodeInfo[] {
-          new DatanodeInfo(new DatanodeID("255.255.255.255:234"))
+          new DatanodeInfo(new DatanodeID("255.255.255.255", 234))
         },
         },
         goodLocatedBlock.getStartOffset(),
         goodLocatedBlock.getStartOffset(),
         false);
         false);
@@ -608,7 +608,7 @@ public class TestDFSClientRetries extends TestCase {
           cluster.getNameNodeRpc(), f, 0, Long.MAX_VALUE)
           cluster.getNameNodeRpc(), f, 0, Long.MAX_VALUE)
             .getLocatedBlocks();
             .getLocatedBlocks();
       final DatanodeInfo first = locatedblocks.get(0).getLocations()[0];
       final DatanodeInfo first = locatedblocks.get(0).getLocations()[0];
-      cluster.stopDataNode(first.getName());
+      cluster.stopDataNode(first.getXferAddr());
 
 
       //get checksum again
       //get checksum again
       final FileChecksum cs2 = fs.getFileChecksum(p);
       final FileChecksum cs2 = fs.getFileChecksum(p);
@@ -629,7 +629,7 @@ public class TestDFSClientRetries extends TestCase {
 
 
     final InetSocketAddress addr = NetUtils.getConnectAddress(server);
     final InetSocketAddress addr = NetUtils.getConnectAddress(server);
     DatanodeID fakeDnId = new DatanodeID(
     DatanodeID fakeDnId = new DatanodeID(
-        "localhost:" + addr.getPort(), "fake-storage", 0, addr.getPort());
+        "localhost", "localhost", "fake-storage", addr.getPort(), 0, addr.getPort());
     
     
     ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
     ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
     LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
     LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);

+ 0 - 13
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java

@@ -52,7 +52,6 @@ public class TestDFSUpgradeFromImage extends TestCase {
       .getLog(TestDFSUpgradeFromImage.class);
       .getLog(TestDFSUpgradeFromImage.class);
   private static File TEST_ROOT_DIR =
   private static File TEST_ROOT_DIR =
                       new File(MiniDFSCluster.getBaseDirectory());
                       new File(MiniDFSCluster.getBaseDirectory());
-  private static final String HADOOP14_IMAGE = "hadoop-14-dfs-dir.tgz";
   private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt";
   private static final String HADOOP_DFS_DIR_TXT = "hadoop-dfs-dir.txt";
   private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz";
   private static final String HADOOP22_IMAGE = "hadoop-22-dfs-dir.tgz";
   
   
@@ -68,10 +67,6 @@ public class TestDFSUpgradeFromImage extends TestCase {
   
   
   boolean printChecksum = false;
   boolean printChecksum = false;
   
   
-  public void unpackStorage() throws IOException {
-    unpackStorage(HADOOP14_IMAGE);
-  }
-
   private void unpackStorage(String tarFileName)
   private void unpackStorage(String tarFileName)
       throws IOException {
       throws IOException {
     String tarFile = System.getProperty("test.cache.data", "build/test/cache")
     String tarFile = System.getProperty("test.cache.data", "build/test/cache")
@@ -227,14 +222,6 @@ public class TestDFSUpgradeFromImage extends TestCase {
     }
     }
   }
   }
   
   
-  /**
-   * Test upgrade from an 0.14 image
-   */
-  public void testUpgradeFromRel14Image() throws IOException {
-    unpackStorage();
-    upgradeAndVerify();
-  }
-  
   /**
   /**
    * Test upgrade from 0.22 image
    * Test upgrade from 0.22 image
    */
    */

+ 3 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java

@@ -128,8 +128,7 @@ public class TestDataTransferProtocol extends TestCase {
       
       
       if (eofExpected) {
       if (eofExpected) {
         throw new IOException("Did not recieve IOException when an exception " +
         throw new IOException("Did not recieve IOException when an exception " +
-                              "is expected while reading from " + 
-                              datanode.getName());
+                              "is expected while reading from " + datanode); 
       }
       }
       
       
       byte[] needed = recvBuf.toByteArray();
       byte[] needed = recvBuf.toByteArray();
@@ -215,7 +214,7 @@ public class TestDataTransferProtocol extends TestCase {
       String poolId = cluster.getNamesystem().getBlockPoolId(); 
       String poolId = cluster.getNamesystem().getBlockPoolId(); 
       datanode = DataNodeTestUtils.getDNRegistrationForBP(
       datanode = DataNodeTestUtils.getDNRegistrationForBP(
           cluster.getDataNodes().get(0), poolId);
           cluster.getDataNodes().get(0), poolId);
-      dnAddr = NetUtils.createSocketAddr(datanode.getName());
+      dnAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
       FileSystem fileSys = cluster.getFileSystem();
       FileSystem fileSys = cluster.getFileSystem();
 
 
       /* Test writing to finalized replicas */
       /* Test writing to finalized replicas */
@@ -349,7 +348,7 @@ public class TestDataTransferProtocol extends TestCase {
                  new InetSocketAddress("localhost", cluster.getNameNodePort()),
                  new InetSocketAddress("localhost", cluster.getNameNodePort()),
                  conf);                
                  conf);                
     datanode = dfsClient.datanodeReport(DatanodeReportType.LIVE)[0];
     datanode = dfsClient.datanodeReport(DatanodeReportType.LIVE)[0];
-    dnAddr = NetUtils.createSocketAddr(datanode.getName());
+    dnAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
     FileSystem fileSys = cluster.getFileSystem();
     FileSystem fileSys = cluster.getFileSystem();
     
     
     int fileLen = Math.min(conf.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096), 4096);
     int fileLen = Math.min(conf.getInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096), 4096);

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java

@@ -269,7 +269,7 @@ public class TestDatanodeBlockScanner extends TestCase {
       if (corruptReplica(block, i)) {
       if (corruptReplica(block, i)) {
         corruptReplicasDNIDs[j++] = i;
         corruptReplicasDNIDs[j++] = i;
         LOG.info("successfully corrupted block " + block + " on node " 
         LOG.info("successfully corrupted block " + block + " on node " 
-                 + i + " " + cluster.getDataNodes().get(i).getSelfAddr());
+                 + i + " " + cluster.getDataNodes().get(i).getDisplayName());
       }
       }
     }
     }
     
     
@@ -281,7 +281,7 @@ public class TestDatanodeBlockScanner extends TestCase {
     for (int i = numCorruptReplicas - 1; i >= 0 ; i--) {
     for (int i = numCorruptReplicas - 1; i >= 0 ; i--) {
       LOG.info("restarting node with corrupt replica: position " 
       LOG.info("restarting node with corrupt replica: position " 
           + i + " node " + corruptReplicasDNIDs[i] + " " 
           + i + " node " + corruptReplicasDNIDs[i] + " " 
-          + cluster.getDataNodes().get(corruptReplicasDNIDs[i]).getSelfAddr());
+          + cluster.getDataNodes().get(corruptReplicasDNIDs[i]).getDisplayName());
       cluster.restartDataNode(corruptReplicasDNIDs[i]);
       cluster.restartDataNode(corruptReplicasDNIDs[i]);
     }
     }
 
 
@@ -343,7 +343,7 @@ public class TestDatanodeBlockScanner extends TestCase {
       if (!changeReplicaLength(block, 0, -1)) {
       if (!changeReplicaLength(block, 0, -1)) {
         throw new IOException(
         throw new IOException(
             "failed to find or change length of replica on node 0 "
             "failed to find or change length of replica on node 0 "
-            + cluster.getDataNodes().get(0).getSelfAddr());
+            + cluster.getDataNodes().get(0).getDisplayName());
       }      
       }      
     } finally {
     } finally {
       cluster.shutdown();
       cluster.shutdown();

+ 2 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeDeath.java

@@ -389,9 +389,8 @@ public class TestDatanodeDeath extends TestCase {
         cluster.stopDataNode(victim);
         cluster.stopDataNode(victim);
       } else {
       } else {
         int victim = datanodeToKill;
         int victim = datanodeToKill;
-        System.out.println("SimpleTest stopping datanode " +
-                            targets[victim].getName());
-        cluster.stopDataNode(targets[victim].getName());
+        System.out.println("SimpleTest stopping datanode " + targets[victim]);
+        cluster.stopDataNode(targets[victim].getXferAddr());
       }
       }
       System.out.println("SimpleTest stopping datanode complete");
       System.out.println("SimpleTest stopping datanode complete");
 
 

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java

@@ -151,27 +151,27 @@ public class TestDecommission {
       int hasdown = 0;
       int hasdown = 0;
       DatanodeInfo[] nodes = blk.getLocations();
       DatanodeInfo[] nodes = blk.getLocations();
       for (int j = 0; j < nodes.length; j++) { // for each replica
       for (int j = 0; j < nodes.length; j++) { // for each replica
-        if (isNodeDown && nodes[j].getName().equals(downnode)) {
+        if (isNodeDown && nodes[j].getXferAddr().equals(downnode)) {
           hasdown++;
           hasdown++;
           //Downnode must actually be decommissioned
           //Downnode must actually be decommissioned
           if (!nodes[j].isDecommissioned()) {
           if (!nodes[j].isDecommissioned()) {
             return "For block " + blk.getBlock() + " replica on " +
             return "For block " + blk.getBlock() + " replica on " +
-              nodes[j].getName() + " is given as downnode, " +
+              nodes[j] + " is given as downnode, " +
               "but is not decommissioned";
               "but is not decommissioned";
           }
           }
           //Decommissioned node (if any) should only be last node in list.
           //Decommissioned node (if any) should only be last node in list.
           if (j != nodes.length - 1) {
           if (j != nodes.length - 1) {
             return "For block " + blk.getBlock() + " decommissioned node "
             return "For block " + blk.getBlock() + " decommissioned node "
-              + nodes[j].getName() + " was not last node in list: "
+              + nodes[j] + " was not last node in list: "
               + (j + 1) + " of " + nodes.length;
               + (j + 1) + " of " + nodes.length;
           }
           }
           LOG.info("Block " + blk.getBlock() + " replica on " +
           LOG.info("Block " + blk.getBlock() + " replica on " +
-            nodes[j].getName() + " is decommissioned.");
+            nodes[j] + " is decommissioned.");
         } else {
         } else {
           //Non-downnodes must not be decommissioned
           //Non-downnodes must not be decommissioned
           if (nodes[j].isDecommissioned()) {
           if (nodes[j].isDecommissioned()) {
             return "For block " + blk.getBlock() + " replica on " +
             return "For block " + blk.getBlock() + " replica on " +
-              nodes[j].getName() + " is unexpectedly decommissioned";
+              nodes[j] + " is unexpectedly decommissioned";
           }
           }
         }
         }
       }
       }
@@ -215,7 +215,7 @@ public class TestDecommission {
         found = true;
         found = true;
       }
       }
     }
     }
-    String nodename = info[index].getName();
+    String nodename = info[index].getXferAddr();
     LOG.info("Decommissioning node: " + nodename);
     LOG.info("Decommissioning node: " + nodename);
 
 
     // write nodename into the exclude file.
     // write nodename into the exclude file.
@@ -236,7 +236,7 @@ public class TestDecommission {
 
 
   /* stop decommission of the datanode and wait for each to reach the NORMAL state */
   /* stop decommission of the datanode and wait for each to reach the NORMAL state */
   private void recomissionNode(DatanodeInfo decommissionedNode) throws IOException {
   private void recomissionNode(DatanodeInfo decommissionedNode) throws IOException {
-    LOG.info("Recommissioning node: " + decommissionedNode.getName());
+    LOG.info("Recommissioning node: " + decommissionedNode);
     writeConfigFile(excludeFile, null);
     writeConfigFile(excludeFile, null);
     refreshNodes(cluster.getNamesystem(), conf);
     refreshNodes(cluster.getNamesystem(), conf);
     waitNodeState(decommissionedNode, AdminStates.NORMAL);
     waitNodeState(decommissionedNode, AdminStates.NORMAL);
@@ -373,7 +373,7 @@ public class TestDecommission {
         DFSClient client = getDfsClient(cluster.getNameNode(i), conf);
         DFSClient client = getDfsClient(cluster.getNameNode(i), conf);
         assertEquals("All datanodes must be alive", numDatanodes, 
         assertEquals("All datanodes must be alive", numDatanodes, 
             client.datanodeReport(DatanodeReportType.LIVE).length);
             client.datanodeReport(DatanodeReportType.LIVE).length);
-        assertNull(checkFile(fileSys, file1, replicas, decomNode.getName(), numDatanodes));
+        assertNull(checkFile(fileSys, file1, replicas, decomNode.getXferAddr(), numDatanodes));
         cleanupFile(fileSys, file1);
         cleanupFile(fileSys, file1);
       }
       }
     }
     }
@@ -414,7 +414,7 @@ public class TestDecommission {
       DFSClient client = getDfsClient(cluster.getNameNode(i), conf);
       DFSClient client = getDfsClient(cluster.getNameNode(i), conf);
       assertEquals("All datanodes must be alive", numDatanodes, 
       assertEquals("All datanodes must be alive", numDatanodes, 
           client.datanodeReport(DatanodeReportType.LIVE).length);
           client.datanodeReport(DatanodeReportType.LIVE).length);
-      assertNull(checkFile(fileSys, file1, replicas, decomNode.getName(), numDatanodes));
+      assertNull(checkFile(fileSys, file1, replicas, decomNode.getXferAddr(), numDatanodes));
 
 
       // stop decommission and check if the new replicas are removed
       // stop decommission and check if the new replicas are removed
       recomissionNode(decomNode);
       recomissionNode(decomNode);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java

@@ -844,7 +844,7 @@ public class TestFileCreation extends junit.framework.TestCase {
       LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
       LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
       int successcount = 0;
       int successcount = 0;
       for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) {
       for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) {
-        DataNode datanode = cluster.getDataNode(datanodeinfo.ipcPort);
+        DataNode datanode = cluster.getDataNode(datanodeinfo.getIpcPort());
         ExtendedBlock blk = locatedblock.getBlock();
         ExtendedBlock blk = locatedblock.getBlock();
         Block b = DataNodeTestUtils.getFSDataset(datanode).getStoredBlock(
         Block b = DataNodeTestUtils.getFSDataset(datanode).getStoredBlock(
             blk.getBlockPoolId(), blk.getBlockId());
             blk.getBlockPoolId(), blk.getBlockId());

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java

@@ -147,7 +147,7 @@ public class TestHftpFileSystem {
     // if we were redirected to the right DN.
     // if we were redirected to the right DN.
     BlockLocation[] locations = 
     BlockLocation[] locations = 
         hdfs.getFileBlockLocations(path, 0, 10);
         hdfs.getFileBlockLocations(path, 0, 10);
-    String locationName = locations[0].getNames()[0];
+    String xferAddr = locations[0].getNames()[0];
 
 
     // Connect to the NN to get redirected
     // Connect to the NN to get redirected
     URL u = hftpFs.getNamenodeURL(
     URL u = hftpFs.getNamenodeURL(
@@ -164,7 +164,7 @@ public class TestHftpFileSystem {
     for (DataNode node : cluster.getDataNodes()) {
     for (DataNode node : cluster.getDataNodes()) {
       DatanodeRegistration dnR = 
       DatanodeRegistration dnR = 
         DataNodeTestUtils.getDNRegistrationForBP(node, blockPoolId);
         DataNodeTestUtils.getDNRegistrationForBP(node, blockPoolId);
-      if (dnR.getName().equals(locationName)) {
+      if (dnR.getXferAddr().equals(xferAddr)) {
         checked = true;
         checked = true;
         assertEquals(dnR.getInfoPort(), conn.getURL().getPort());
         assertEquals(dnR.getInfoPort(), conn.getURL().getPort());
       }
       }

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestIsMethodSupported.java

@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.RefreshAuthorizationPolicyProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.RefreshAuthorizationPolicyProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.RefreshUserMappingsProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.protocolPB.RefreshUserMappingsProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
 import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
@@ -58,8 +59,9 @@ public class TestIsMethodSupported {
     cluster = (new MiniDFSCluster.Builder(conf))
     cluster = (new MiniDFSCluster.Builder(conf))
         .numDataNodes(1).build();
         .numDataNodes(1).build();
     nnAddress = cluster.getNameNode().getNameNodeAddress();
     nnAddress = cluster.getNameNode().getNameNodeAddress();
-    dnAddress = new InetSocketAddress(cluster.getDataNodes().get(0)
-        .getDatanodeId().getHost(), cluster.getDataNodes().get(0).getIpcPort());
+    DataNode dn = cluster.getDataNodes().get(0);
+    dnAddress = new InetSocketAddress(dn.getDatanodeId().getIpAddr(),
+                                      dn.getIpcPort());
   }
   }
 
 
   @AfterClass
   @AfterClass

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplication.java

@@ -117,7 +117,7 @@ public class TestReplication extends TestCase {
       isOnSameRack = false;
       isOnSameRack = false;
       isNotOnSameRack = false;
       isNotOnSameRack = false;
       for (int i = 0; i < datanodes.length-1; i++) {
       for (int i = 0; i < datanodes.length-1; i++) {
-        LOG.info("datanode "+ i + ": "+ datanodes[i].getName());
+        LOG.info("datanode "+ i + ": "+ datanodes[i]);
         boolean onRack = false;
         boolean onRack = false;
         for( int j=i+1; j<datanodes.length; j++) {
         for( int j=i+1; j<datanodes.length; j++) {
            if( datanodes[i].getNetworkLocation().equals(
            if( datanodes[i].getNetworkLocation().equals(

+ 17 - 17
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java

@@ -130,19 +130,19 @@ public class TestPBHelper {
 
 
   @Test
   @Test
   public void testConvertDatanodeID() {
   public void testConvertDatanodeID() {
-    DatanodeID dn = new DatanodeID("node", "sid", 1, 2);
+    DatanodeID dn = new DatanodeID("node", "node", "sid", 1, 2, 3);
     DatanodeIDProto dnProto = PBHelper.convert(dn);
     DatanodeIDProto dnProto = PBHelper.convert(dn);
     DatanodeID dn2 = PBHelper.convert(dnProto);
     DatanodeID dn2 = PBHelper.convert(dnProto);
     compare(dn, dn2);
     compare(dn, dn2);
   }
   }
   
   
   void compare(DatanodeID dn, DatanodeID dn2) {
   void compare(DatanodeID dn, DatanodeID dn2) {
-    assertEquals(dn.getHost(), dn2.getHost());
+    assertEquals(dn.getIpAddr(), dn2.getIpAddr());
+    assertEquals(dn.getHostName(), dn2.getHostName());
+    assertEquals(dn.getStorageID(), dn2.getStorageID());
+    assertEquals(dn.getXferPort(), dn2.getXferPort());
     assertEquals(dn.getInfoPort(), dn2.getInfoPort());
     assertEquals(dn.getInfoPort(), dn2.getInfoPort());
     assertEquals(dn.getIpcPort(), dn2.getIpcPort());
     assertEquals(dn.getIpcPort(), dn2.getIpcPort());
-    assertEquals(dn.getName(), dn2.getName());
-    assertEquals(dn.getPort(), dn2.getPort());
-    assertEquals(dn.getStorageID(), dn2.getStorageID());
   }
   }
 
 
   @Test
   @Test
@@ -279,8 +279,8 @@ public class TestPBHelper {
     return new ExtendedBlock("bpid", blkid, 100, 2);
     return new ExtendedBlock("bpid", blkid, 100, 2);
   }
   }
   
   
-  public DatanodeInfo getDNInfo() {
-    return new DatanodeInfo(new DatanodeID("node", "sid", 1, 2));
+  private DatanodeInfo getDNInfo() {
+    return new DatanodeInfo(new DatanodeID("node", "node", "sid", 0, 1, 2));
   }
   }
   
   
   private void compare(DatanodeInfo dn1, DatanodeInfo dn2) {
   private void compare(DatanodeInfo dn1, DatanodeInfo dn2) {
@@ -291,7 +291,7 @@ public class TestPBHelper {
       assertEquals(dn1.getDatanodeReport(), dn2.getDatanodeReport());
       assertEquals(dn1.getDatanodeReport(), dn2.getDatanodeReport());
       assertEquals(dn1.getDfsUsed(), dn1.getDfsUsed());
       assertEquals(dn1.getDfsUsed(), dn1.getDfsUsed());
       assertEquals(dn1.getDfsUsedPercent(), dn1.getDfsUsedPercent());
       assertEquals(dn1.getDfsUsedPercent(), dn1.getDfsUsedPercent());
-      assertEquals(dn1.getHost(), dn2.getHost());
+      assertEquals(dn1.getIpAddr(), dn2.getIpAddr());
       assertEquals(dn1.getHostName(), dn2.getHostName());
       assertEquals(dn1.getHostName(), dn2.getHostName());
       assertEquals(dn1.getInfoPort(), dn2.getInfoPort());
       assertEquals(dn1.getInfoPort(), dn2.getInfoPort());
       assertEquals(dn1.getIpcPort(), dn2.getIpcPort());
       assertEquals(dn1.getIpcPort(), dn2.getIpcPort());
@@ -400,12 +400,12 @@ public class TestPBHelper {
   @Test
   @Test
   public void testConvertLocatedBlock() {
   public void testConvertLocatedBlock() {
     DatanodeInfo [] dnInfos = new DatanodeInfo[3];
     DatanodeInfo [] dnInfos = new DatanodeInfo[3];
-    dnInfos[0] = new DatanodeInfo("host0", "0", 5000, 5001, 20000, 10001, 9999,
-        59, 69, 32, "local", "host0", AdminStates.DECOMMISSION_INPROGRESS);
-    dnInfos[1] = new DatanodeInfo("host1", "1", 5000, 5001, 20000, 10001, 9999,
-        59, 69, 32, "local", "host1", AdminStates.DECOMMISSIONED);
-    dnInfos[2] = new DatanodeInfo("host2", "2", 5000, 5001, 20000, 10001, 9999,
-        59, 69, 32, "local", "host1", AdminStates.NORMAL);
+    dnInfos[0] = new DatanodeInfo("host0", "host0", "0", 5000, 5001, 5002, 20000, 10001, 9999,
+        59, 69, 32, "local", AdminStates.DECOMMISSION_INPROGRESS);
+    dnInfos[1] = new DatanodeInfo("host1", "host1", "1", 5000, 5001, 5002, 20000, 10001, 9999,
+        59, 69, 32, "local", AdminStates.DECOMMISSIONED);
+    dnInfos[2] = new DatanodeInfo("host2", "host2", "2", 5000, 5001, 5002, 20000, 10001, 9999,
+        59, 69, 32, "local", AdminStates.NORMAL);
     LocatedBlock lb = new LocatedBlock(
     LocatedBlock lb = new LocatedBlock(
         new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
         new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
     LocatedBlockProto lbProto = PBHelper.convert(lb);
     LocatedBlockProto lbProto = PBHelper.convert(lb);
@@ -423,7 +423,7 @@ public class TestPBHelper {
   
   
   @Test
   @Test
   public void testConvertDatanodeRegistration() {
   public void testConvertDatanodeRegistration() {
-    DatanodeID dnId = new DatanodeID("host", "xyz", 1, 0);
+    DatanodeID dnId = new DatanodeID("host", "host", "xyz", 0, 1, 0);
     BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
     BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
     ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
     ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
         getBlockKey(1), keys);
         getBlockKey(1), keys);
@@ -431,8 +431,8 @@ public class TestPBHelper {
         new StorageInfo(), expKeys);
         new StorageInfo(), expKeys);
     DatanodeRegistrationProto proto = PBHelper.convert(reg);
     DatanodeRegistrationProto proto = PBHelper.convert(reg);
     DatanodeRegistration reg2 = PBHelper.convert(proto);
     DatanodeRegistration reg2 = PBHelper.convert(proto);
-    compare(reg.storageInfo, reg2.storageInfo);
-    compare(reg.exportedKeys, reg2.exportedKeys);
+    compare(reg.getStorageInfo(), reg2.getStorageInfo());
+    compare(reg.getExportedKeys(), reg2.getExportedKeys());
     compare((DatanodeID)reg, (DatanodeID)reg2);
     compare((DatanodeID)reg, (DatanodeID)reg2);
   }
   }
   
   

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java

@@ -279,8 +279,8 @@ public class TestBlockToken {
     server.start();
     server.start();
 
 
     final InetSocketAddress addr = NetUtils.getConnectAddress(server);
     final InetSocketAddress addr = NetUtils.getConnectAddress(server);
-    DatanodeID fakeDnId = new DatanodeID("localhost:" + addr.getPort(),
-        "fake-storage", 0, addr.getPort());
+    DatanodeID fakeDnId = new DatanodeID("localhost",
+        "localhost", "fake-storage", addr.getPort(), 0, addr.getPort());
 
 
     ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
     ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
     LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
     LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java

@@ -165,7 +165,7 @@ public class BlockManagerTestUtil {
       DatanodeDescriptor[] dnds = hbm.getDatanodes();
       DatanodeDescriptor[] dnds = hbm.getDatanodes();
       DatanodeDescriptor theDND = null;
       DatanodeDescriptor theDND = null;
       for (DatanodeDescriptor dnd : dnds) {
       for (DatanodeDescriptor dnd : dnds) {
-        if (dnd.getName().equals(dnName)) {
+        if (dnd.getXferAddr().equals(dnName)) {
           theDND = dnd;
           theDND = dnd;
         }
         }
       }
       }

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java

@@ -48,12 +48,12 @@ import com.google.common.collect.Lists;
 
 
 public class TestBlockManager {
 public class TestBlockManager {
   private final List<DatanodeDescriptor> nodes = ImmutableList.of( 
   private final List<DatanodeDescriptor> nodes = ImmutableList.of( 
-      new DatanodeDescriptor(new DatanodeID("h1:5020"), "/rackA"),
-      new DatanodeDescriptor(new DatanodeID("h2:5020"), "/rackA"),
-      new DatanodeDescriptor(new DatanodeID("h3:5020"), "/rackA"),
-      new DatanodeDescriptor(new DatanodeID("h4:5020"), "/rackB"),
-      new DatanodeDescriptor(new DatanodeID("h5:5020"), "/rackB"),
-      new DatanodeDescriptor(new DatanodeID("h6:5020"), "/rackB")
+      new DatanodeDescriptor(new DatanodeID("h1", 5020), "/rackA"),
+      new DatanodeDescriptor(new DatanodeID("h2", 5020), "/rackA"),
+      new DatanodeDescriptor(new DatanodeID("h3", 5020), "/rackA"),
+      new DatanodeDescriptor(new DatanodeID("h4", 5020), "/rackB"),
+      new DatanodeDescriptor(new DatanodeID("h5", 5020), "/rackB"),
+      new DatanodeDescriptor(new DatanodeID("h6", 5020), "/rackB")
     );
     );
   private final List<DatanodeDescriptor> rackA = nodes.subList(0, 3);
   private final List<DatanodeDescriptor> rackA = nodes.subList(0, 3);
   private final List<DatanodeDescriptor> rackB = nodes.subList(3, 6);
   private final List<DatanodeDescriptor> rackB = nodes.subList(3, 6);
@@ -272,7 +272,7 @@ public class TestBlockManager {
 
 
     // the block is still under-replicated. Add a new node. This should allow
     // the block is still under-replicated. Add a new node. This should allow
     // the third off-rack replica.
     // the third off-rack replica.
-    DatanodeDescriptor rackCNode = new DatanodeDescriptor(new DatanodeID("h7:5020"), "/rackC");
+    DatanodeDescriptor rackCNode = new DatanodeDescriptor(new DatanodeID("h7", 100), "/rackC");
     addNodes(ImmutableList.of(rackCNode));
     addNodes(ImmutableList.of(rackCNode));
     try {
     try {
       DatanodeDescriptor[] pipeline2 = scheduleSingleReplication(blockInfo);
       DatanodeDescriptor[] pipeline2 = scheduleSingleReplication(blockInfo);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java

@@ -137,7 +137,7 @@ public class TestBlockTokenWithDFS {
     ExtendedBlock block = lblock.getBlock();
     ExtendedBlock block = lblock.getBlock();
     try {
     try {
       DatanodeInfo[] nodes = lblock.getLocations();
       DatanodeInfo[] nodes = lblock.getLocations();
-      targetAddr = NetUtils.createSocketAddr(nodes[0].getName());
+      targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
       s = NetUtils.getDefaultSocketFactory(conf).createSocket();
       s = NetUtils.getDefaultSocketFactory(conf).createSocket();
       s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
       s.connect(targetAddr, HdfsServerConstants.READ_TIMEOUT);
       s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);
       s.setSoTimeout(HdfsServerConstants.READ_TIMEOUT);

+ 19 - 32
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java

@@ -28,13 +28,13 @@ import org.junit.Test;
 public class TestHost2NodesMap {
 public class TestHost2NodesMap {
   private Host2NodesMap map = new Host2NodesMap();
   private Host2NodesMap map = new Host2NodesMap();
   private final DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
   private final DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
-    new DatanodeDescriptor(new DatanodeID("h1:5020"), "/d1/r1"),
-    new DatanodeDescriptor(new DatanodeID("h2:5020"), "/d1/r1"),
-    new DatanodeDescriptor(new DatanodeID("h3:5020"), "/d1/r2"),
-    new DatanodeDescriptor(new DatanodeID("h3:5030"), "/d1/r2"),
+    new DatanodeDescriptor(new DatanodeID("ip1", "h1", "", 5020, -1, -1), "/d1/r1"),
+    new DatanodeDescriptor(new DatanodeID("ip2", "h1", "", 5020, -1, -1), "/d1/r1"),
+    new DatanodeDescriptor(new DatanodeID("ip3", "h1", "", 5020, -1, -1), "/d1/r2"),
+    new DatanodeDescriptor(new DatanodeID("ip3", "h1", "", 5030, -1, -1), "/d1/r2"),
   };
   };
   private final DatanodeDescriptor NULL_NODE = null; 
   private final DatanodeDescriptor NULL_NODE = null; 
-  private final DatanodeDescriptor NODE = new DatanodeDescriptor(new DatanodeID("h3:5040"),
+  private final DatanodeDescriptor NODE = new DatanodeDescriptor(new DatanodeID("h3", 5040),
       "/d1/r4");
       "/d1/r4");
 
 
   @Before
   @Before
@@ -56,24 +56,11 @@ public class TestHost2NodesMap {
 
 
   @Test
   @Test
   public void testGetDatanodeByHost() throws Exception {
   public void testGetDatanodeByHost() throws Exception {
-    assertTrue(map.getDatanodeByHost("h1")==dataNodes[0]);
-    assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
-    DatanodeDescriptor node = map.getDatanodeByHost("h3");
+    assertTrue(map.getDatanodeByHost("ip1")==dataNodes[0]);
+    assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
+    DatanodeDescriptor node = map.getDatanodeByHost("ip3");
     assertTrue(node==dataNodes[2] || node==dataNodes[3]);
     assertTrue(node==dataNodes[2] || node==dataNodes[3]);
-    assertTrue(null==map.getDatanodeByHost("h4"));
-  }
-
-  @Test
-  public void testGetDatanodeByName() throws Exception {
-    assertTrue(map.getDatanodeByName("h1:5020")==dataNodes[0]);
-    assertTrue(map.getDatanodeByName("h1:5030")==null);
-    assertTrue(map.getDatanodeByName("h2:5020")==dataNodes[1]);
-    assertTrue(map.getDatanodeByName("h2:5030")==null);
-    assertTrue(map.getDatanodeByName("h3:5020")==dataNodes[2]);
-    assertTrue(map.getDatanodeByName("h3:5030")==dataNodes[3]);
-    assertTrue(map.getDatanodeByName("h3:5040")==null);
-    assertTrue(map.getDatanodeByName("h4")==null);
-    assertTrue(map.getDatanodeByName(null)==null);
+    assertTrue(null==map.getDatanodeByHost("ip4"));
   }
   }
 
 
   @Test
   @Test
@@ -81,21 +68,21 @@ public class TestHost2NodesMap {
     assertFalse(map.remove(NODE));
     assertFalse(map.remove(NODE));
     
     
     assertTrue(map.remove(dataNodes[0]));
     assertTrue(map.remove(dataNodes[0]));
-    assertTrue(map.getDatanodeByHost("h1")==null);
-    assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
-    DatanodeDescriptor node = map.getDatanodeByHost("h3");
+    assertTrue(map.getDatanodeByHost("ip1")==null);
+    assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
+    DatanodeDescriptor node = map.getDatanodeByHost("ip3");
     assertTrue(node==dataNodes[2] || node==dataNodes[3]);
     assertTrue(node==dataNodes[2] || node==dataNodes[3]);
-    assertTrue(null==map.getDatanodeByHost("h4"));
+    assertTrue(null==map.getDatanodeByHost("ip4"));
     
     
     assertTrue(map.remove(dataNodes[2]));
     assertTrue(map.remove(dataNodes[2]));
-    assertTrue(map.getDatanodeByHost("h1")==null);
-    assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
-    assertTrue(map.getDatanodeByHost("h3")==dataNodes[3]);
+    assertTrue(map.getDatanodeByHost("ip1")==null);
+    assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
+    assertTrue(map.getDatanodeByHost("ip3")==dataNodes[3]);
     
     
     assertTrue(map.remove(dataNodes[3]));
     assertTrue(map.remove(dataNodes[3]));
-    assertTrue(map.getDatanodeByHost("h1")==null);
-    assertTrue(map.getDatanodeByHost("h2")==dataNodes[1]);
-    assertTrue(map.getDatanodeByHost("h3")==null);
+    assertTrue(map.getDatanodeByHost("ip1")==null);
+    assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
+    assertTrue(map.getDatanodeByHost("ip3")==null);
     
     
     assertFalse(map.remove(NULL_NODE));
     assertFalse(map.remove(NULL_NODE));
     assertTrue(map.remove(dataNodes[1]));
     assertTrue(map.remove(dataNodes[1]));

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java

@@ -78,11 +78,11 @@ public class TestNodeCount extends TestCase {
       
       
       // bring down first datanode
       // bring down first datanode
       DatanodeDescriptor datanode = datanodes[0];
       DatanodeDescriptor datanode = datanodes[0];
-      DataNodeProperties dnprop = cluster.stopDataNode(datanode.getName());
+      DataNodeProperties dnprop = cluster.stopDataNode(datanode.getXferAddr());
       
       
       // make sure that NN detects that the datanode is down
       // make sure that NN detects that the datanode is down
       BlockManagerTestUtil.noticeDeadDatanode(
       BlockManagerTestUtil.noticeDeadDatanode(
-          cluster.getNameNode(), datanode.getName());
+          cluster.getNameNode(), datanode.getXferAddr());
       
       
       // the block will be replicated
       // the block will be replicated
       DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
       DFSTestUtil.waitReplication(fs, FILE_PATH, REPLICATION_FACTOR);
@@ -112,10 +112,10 @@ public class TestNodeCount extends TestCase {
       assertTrue(nonExcessDN!=null);
       assertTrue(nonExcessDN!=null);
       
       
       // bring down non excessive datanode
       // bring down non excessive datanode
-      dnprop = cluster.stopDataNode(nonExcessDN.getName());
+      dnprop = cluster.stopDataNode(nonExcessDN.getXferAddr());
       // make sure that NN detects that the datanode is down
       // make sure that NN detects that the datanode is down
       BlockManagerTestUtil.noticeDeadDatanode(
       BlockManagerTestUtil.noticeDeadDatanode(
-          cluster.getNameNode(), nonExcessDN.getName());
+          cluster.getNameNode(), nonExcessDN.getXferAddr());
 
 
       // The block should be replicated
       // The block should be replicated
       initializeTimeout(TIMEOUT);
       initializeTimeout(TIMEOUT);

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java

@@ -91,9 +91,9 @@ public class TestOverReplicatedBlocks extends TestCase {
         synchronized(hm) {
         synchronized(hm) {
           // set live datanode's remaining space to be 0 
           // set live datanode's remaining space to be 0 
           // so they will be chosen to be deleted when over-replication occurs
           // so they will be chosen to be deleted when over-replication occurs
-          String corruptMachineName = corruptDataNode.getName();
+          String corruptMachineName = corruptDataNode.getXferAddr();
           for (DatanodeDescriptor datanode : hm.getDatanodes()) {
           for (DatanodeDescriptor datanode : hm.getDatanodes()) {
-            if (!corruptMachineName.equals(datanode.getName())) {
+            if (!corruptMachineName.equals(datanode.getXferAddr())) {
               datanode.updateHeartbeat(100L, 100L, 0L, 100L, 0, 0);
               datanode.updateHeartbeat(100L, 100L, 0L, 100L, 0, 0);
             }
             }
           }
           }

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java

@@ -40,7 +40,7 @@ public class TestPendingDataNodeMessages {
   private final Block block2Gs1 = new Block(2, 0, 1);
   private final Block block2Gs1 = new Block(2, 0, 1);
   
   
   private final DatanodeDescriptor fakeDN = new DatanodeDescriptor(
   private final DatanodeDescriptor fakeDN = new DatanodeDescriptor(
-      new DatanodeID("fake"));
+      new DatanodeID("fake", 100));
   
   
   @Test
   @Test
   public void testQueues() {
   public void testQueues() {
@@ -56,8 +56,8 @@ public class TestPendingDataNodeMessages {
     Queue<ReportedBlockInfo> q =
     Queue<ReportedBlockInfo> q =
       msgs.takeBlockQueue(block1Gs2DifferentInstance);
       msgs.takeBlockQueue(block1Gs2DifferentInstance);
     assertEquals(
     assertEquals(
-        "ReportedBlockInfo [block=blk_1_1, dn=fake, reportedState=FINALIZED]," +
-        "ReportedBlockInfo [block=blk_1_2, dn=fake, reportedState=FINALIZED]",
+        "ReportedBlockInfo [block=blk_1_1, dn=fake:100, reportedState=FINALIZED]," +
+        "ReportedBlockInfo [block=blk_1_2, dn=fake:100, reportedState=FINALIZED]",
         Joiner.on(",").join(q));
         Joiner.on(",").join(q));
     assertEquals(0, msgs.count());
     assertEquals(0, msgs.count());
     
     

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java

@@ -52,16 +52,16 @@ public class TestReplicationPolicy {
   private static final String filename = "/dummyfile.txt";
   private static final String filename = "/dummyfile.txt";
   private static final DatanodeDescriptor dataNodes[] = 
   private static final DatanodeDescriptor dataNodes[] = 
     new DatanodeDescriptor[] {
     new DatanodeDescriptor[] {
-      new DatanodeDescriptor(new DatanodeID("h1:5020"), "/d1/r1"),
-      new DatanodeDescriptor(new DatanodeID("h2:5020"), "/d1/r1"),
-      new DatanodeDescriptor(new DatanodeID("h3:5020"), "/d1/r2"),
-      new DatanodeDescriptor(new DatanodeID("h4:5020"), "/d1/r2"),
-      new DatanodeDescriptor(new DatanodeID("h5:5020"), "/d2/r3"),
-      new DatanodeDescriptor(new DatanodeID("h6:5020"), "/d2/r3")
+      new DatanodeDescriptor(new DatanodeID("h1", 5020), "/d1/r1"),
+      new DatanodeDescriptor(new DatanodeID("h2", 5020), "/d1/r1"),
+      new DatanodeDescriptor(new DatanodeID("h3", 5020), "/d1/r2"),
+      new DatanodeDescriptor(new DatanodeID("h4", 5020), "/d1/r2"),
+      new DatanodeDescriptor(new DatanodeID("h5", 5020), "/d2/r3"),
+      new DatanodeDescriptor(new DatanodeID("h6", 5020), "/d2/r3")
     };
     };
    
    
   private final static DatanodeDescriptor NODE = 
   private final static DatanodeDescriptor NODE = 
-    new DatanodeDescriptor(new DatanodeID("h7:5020"), "/d2/r4");
+    new DatanodeDescriptor(new DatanodeID("h7", 5020), "/d2/r4");
   
   
   static {
   static {
     try {
     try {

+ 0 - 267
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java

@@ -1,267 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-package org.apache.hadoop.hdfs.server.common;
-
-import static org.apache.hadoop.hdfs.protocol.HdfsConstants.LAYOUT_VERSION;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.TestDFSUpgradeFromImage;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
-import org.apache.hadoop.hdfs.server.datanode.UpgradeObjectDatanode;
-import org.apache.hadoop.hdfs.server.namenode.UpgradeObjectNamenode;
-import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
-import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
-import org.apache.hadoop.hdfs.tools.DFSAdmin;
-import org.apache.hadoop.test.GenericTestUtils;
-
-import org.junit.Test;
-import static org.junit.Assert.*;
-
-/**
- */
-public class TestDistributedUpgrade {
-  private static final Log LOG = LogFactory.getLog(TestDistributedUpgrade.class);
-  private Configuration conf;
-  private int testCounter = 0;
-  private MiniDFSCluster cluster = null;
-  private String clusterId = "testClsterId";
-    
-  /**
-   * Writes an INFO log message containing the parameters.
-   */
-  void log(String label, int numDirs) {
-    LOG.info("============================================================");
-    LOG.info("***TEST " + (testCounter++) + "*** " 
-             + label + ":"
-             + " numDirs="+numDirs);
-  }
-  
-  /**
-   * Attempts to start a NameNode with the given operation.  Starting
-   * the NameNode should throw an exception.
-   */
-  void startNameNodeShouldFail(StartupOption operation,
-      String exceptionSubstring) {
-    try {
-      //cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).startupOption(operation).build(); // should fail
-      // we set manage dirs to true as NN has to start from untar'ed image with 
-      // nn dirs set to name1 and name2
-      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
-                                              .format(false)
-                                              .clusterId(clusterId)
-                                              .startupOption(operation)
-                                              .build(); // should fail
-      throw new AssertionError("NameNode should have failed to start");
-    } catch (Exception expected) {
-      GenericTestUtils.assertExceptionContains(
-          exceptionSubstring, expected);
-    }
-  }
-  
-  /**
-   * Attempts to start a DataNode with the given operation.  Starting
-   * the DataNode should throw an exception.
-   */
-  void startDataNodeShouldFail(StartupOption operation) {
-    try {
-      cluster.startDataNodes(conf, 1, false, operation, null); // should fail
-      throw new AssertionError("DataNode should have failed to start");
-    } catch (Exception expected) {
-      // expected
-      assertFalse(cluster.isDataNodeUp());
-    }
-  }
- 
-  /**
-   */
-  @Test(timeout=300000) // 5 min timeout
-  public void testDistributedUpgrade() throws Exception {
-    int numDirs = 1;
-    TestDFSUpgradeFromImage testImg = new TestDFSUpgradeFromImage();
-    testImg.unpackStorage();
-    int numDNs = testImg.numDataNodes;
-    
-    // register new upgrade objects (ignore all existing)
-    UpgradeObjectCollection.initialize();
-    UpgradeObjectCollection.registerUpgrade(new UO_Datanode1());
-    UpgradeObjectCollection.registerUpgrade(new UO_Namenode1());
-    UpgradeObjectCollection.registerUpgrade(new UO_Datanode2());
-    UpgradeObjectCollection.registerUpgrade(new UO_Namenode2());
-    UpgradeObjectCollection.registerUpgrade(new UO_Datanode3());
-    UpgradeObjectCollection.registerUpgrade(new UO_Namenode3());
-
-    conf = new HdfsConfiguration();
-    if (System.getProperty("test.build.data") == null) { // to test to be run outside of ant
-      System.setProperty("test.build.data", "build/test/data");
-    }
-    conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // block scanning off
-
-    log("NameNode start in regular mode when dustributed upgrade is required", numDirs);
-    startNameNodeShouldFail(StartupOption.REGULAR, "contains an old layout version");
-
-    log("Start NameNode only distributed upgrade", numDirs);
-    // cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false)
-    // .startupOption(StartupOption.UPGRADE).build();
-    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
-                                              .format(false)
-                                              .clusterId(clusterId)
-                                              .startupOption(StartupOption.UPGRADE)
-                                              .build();
-    cluster.shutdown();
-
-    log("NameNode start in regular mode when dustributed upgrade has been started", numDirs);
-    startNameNodeShouldFail(StartupOption.REGULAR,
-        "Previous distributed upgrade was not completed");
-
-    log("NameNode rollback to the old version that require a dustributed upgrade", numDirs);
-    startNameNodeShouldFail(StartupOption.ROLLBACK,
-        "Cannot rollback to storage version -7 using this version");
-
-    log("Normal distributed upgrade for the cluster", numDirs);
-    cluster = new MiniDFSCluster.Builder(conf)
-                                .numDataNodes(numDNs)
-                                .format(false)
-                                .clusterId(clusterId)
-                                .startupOption(StartupOption.UPGRADE)
-                                .build();
-    DFSAdmin dfsAdmin = new DFSAdmin();
-    dfsAdmin.setConf(conf);
-    dfsAdmin.run(new String[] {"-safemode", "wait"});
-    cluster.shutdown();
-
-    // it should be ok to start in regular mode
-    log("NameCluster regular startup after the upgrade", numDirs);
-    cluster = new MiniDFSCluster.Builder(conf)
-                                .numDataNodes(numDNs)
-                                .clusterId(clusterId)
-                                .format(false)
-                                .startupOption(StartupOption.REGULAR)
-                                .build();
-
-    cluster.waitActive();
-    cluster.shutdown();
-  }
-
-  public static void main(String[] args) throws Exception {
-    new TestDistributedUpgrade().testDistributedUpgrade();
-    LOG.info("=== DONE ===");
-  }
-}
-
-/**
- * Upgrade object for data-node
- */
-class UO_Datanode extends UpgradeObjectDatanode {
-  int version;
-
-  UO_Datanode(int v) {
-    this.status = (short)0;
-    version = v;
-  }
-
-  public int getVersion() {
-    return version;
-  }
-
-  public void doUpgrade() throws IOException {
-    this.status = (short)100;
-    DatanodeProtocol nn = getNamenode();
-    nn.processUpgradeCommand(
-        new UpgradeCommand(UpgradeCommand.UC_ACTION_REPORT_STATUS, 
-            getVersion(), getUpgradeStatus()));
-  }
-
-  public UpgradeCommand startUpgrade() throws IOException {
-    return null;
-  }
-}
-
-/**
- * Upgrade object for name-node
- */
-class UO_Namenode extends UpgradeObjectNamenode {
-  int version;
-
-  UO_Namenode(int v) {
-    status = (short)0;
-    version = v;
-  }
-
-  public int getVersion() {
-    return version;
-  }
-
-  synchronized public UpgradeCommand processUpgradeCommand(
-                                  UpgradeCommand command) throws IOException {
-    switch(command.getAction()) {
-      case UpgradeCommand.UC_ACTION_REPORT_STATUS:
-        this.status += command.getCurrentStatus()/8;  // 4 reports needed
-        break;
-      default:
-        this.status++;
-    }
-    return null;
-  }
-
-  public UpgradeCommand completeUpgrade() throws IOException {
-    return null;
-  }
-}
-
-class UO_Datanode1 extends UO_Datanode {
-  UO_Datanode1() {
-    super(LAYOUT_VERSION+1);
-  }
-}
-
-class UO_Namenode1 extends UO_Namenode {
-  UO_Namenode1() {
-    super(LAYOUT_VERSION+1);
-  }
-}
-
-class UO_Datanode2 extends UO_Datanode {
-  UO_Datanode2() {
-    super(LAYOUT_VERSION+2);
-  }
-}
-
-class UO_Namenode2 extends UO_Namenode {
-  UO_Namenode2() {
-    super(LAYOUT_VERSION+2);
-  }
-}
-
-class UO_Datanode3 extends UO_Datanode {
-  UO_Datanode3() {
-    super(LAYOUT_VERSION+3);
-  }
-}
-
-class UO_Namenode3 extends UO_Namenode {
-  UO_Namenode3() {
-    super(LAYOUT_VERSION+3);
-  }
-}

+ 1 - 6
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java

@@ -36,12 +36,7 @@ import com.google.common.base.Preconditions;
  * Utility class for accessing package-private DataNode information during tests.
  * Utility class for accessing package-private DataNode information during tests.
  *
  *
  */
  */
-public class DataNodeTestUtils {
-  public static DatanodeRegistration 
-  getDNRegistrationByMachineName(DataNode dn, String mName) {
-    return dn.getDNRegistrationByMachineName(mName);
-  }
-  
+public class DataNodeTestUtils {  
   public static DatanodeRegistration 
   public static DatanodeRegistration 
   getDNRegistrationForBP(DataNode dn, String bpid) throws IOException {
   getDNRegistrationForBP(DataNode dn, String bpid) throws IOException {
     return dn.getDNRegistrationForBP(bpid);
     return dn.getDNRegistrationForBP(bpid);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java

@@ -383,7 +383,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
   public SimulatedFSDataset(DataNode datanode, DataStorage storage,
   public SimulatedFSDataset(DataNode datanode, DataStorage storage,
       Configuration conf) {
       Configuration conf) {
     if (storage != null) {
     if (storage != null) {
-      storage.createStorageID(datanode.getPort());
+      storage.createStorageID(datanode.getXferPort());
       this.storageId = storage.getStorageID();
       this.storageId = storage.getStorageID();
     } else {
     } else {
       this.storageId = "unknownStorageId" + new Random().nextInt();
       this.storageId = "unknownStorageId" + new Random().nextInt();

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockRecovery.java

@@ -197,9 +197,9 @@ public class TestBlockRecovery {
         locs, RECOVERY_ID);
         locs, RECOVERY_ID);
     ArrayList<BlockRecord> syncList = new ArrayList<BlockRecord>(2);
     ArrayList<BlockRecord> syncList = new ArrayList<BlockRecord>(2);
     BlockRecord record1 = new BlockRecord(
     BlockRecord record1 = new BlockRecord(
-        new DatanodeID("xx", "yy", 44, 55), dn1, replica1);
+        new DatanodeID("xx", "yy", "zz", 1, 2, 3), dn1, replica1);
     BlockRecord record2 = new BlockRecord(
     BlockRecord record2 = new BlockRecord(
-        new DatanodeID("aa", "bb", 11, 22), dn2, replica2);
+        new DatanodeID("aa", "bb", "cc", 1, 2, 3), dn2, replica2);
     syncList.add(record1);
     syncList.add(record1);
     syncList.add(record2);
     syncList.add(record2);
     
     
@@ -402,7 +402,7 @@ public class TestBlockRecovery {
   private Collection<RecoveringBlock> initRecoveringBlocks() throws IOException {
   private Collection<RecoveringBlock> initRecoveringBlocks() throws IOException {
     Collection<RecoveringBlock> blocks = new ArrayList<RecoveringBlock>(1);
     Collection<RecoveringBlock> blocks = new ArrayList<RecoveringBlock>(1);
     DatanodeInfo mockOtherDN = new DatanodeInfo(
     DatanodeInfo mockOtherDN = new DatanodeInfo(
-        new DatanodeID("127.0.0.1", "storage-1234", 0, 0));
+        new DatanodeID("127.0.0.1", "localhost", "storage-1234", 0, 0, 0));
     DatanodeInfo[] locs = new DatanodeInfo[] {
     DatanodeInfo[] locs = new DatanodeInfo[] {
         new DatanodeInfo(dn.getDNRegistrationForBP(block.getBlockPoolId())),
         new DatanodeInfo(dn.getDNRegistrationForBP(block.getBlockPoolId())),
         mockOtherDN };
         mockOtherDN };

+ 9 - 9
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java

@@ -162,16 +162,16 @@ public class TestBlockReplacement extends TestCase {
       
       
       // start to replace the block
       // start to replace the block
       // case 1: proxySource does not contain the block
       // case 1: proxySource does not contain the block
-      LOG.info("Testcase 1: Proxy " + newNode.getName() 
+      LOG.info("Testcase 1: Proxy " + newNode
            + " does not contain the block " + b);
            + " does not contain the block " + b);
       assertFalse(replaceBlock(b, source, newNode, proxies.get(0)));
       assertFalse(replaceBlock(b, source, newNode, proxies.get(0)));
       // case 2: destination already contains the block
       // case 2: destination already contains the block
-      LOG.info("Testcase 2: Destination " + proxies.get(1).getName() 
+      LOG.info("Testcase 2: Destination " + proxies.get(1)
           + " contains the block " + b);
           + " contains the block " + b);
       assertFalse(replaceBlock(b, source, proxies.get(0), proxies.get(1)));
       assertFalse(replaceBlock(b, source, proxies.get(0), proxies.get(1)));
       // case 3: correct case
       // case 3: correct case
-      LOG.info("Testcase 3: Source=" + source.getName() + " Proxy=" + 
-          proxies.get(0).getName() + " Destination=" + newNode.getName() );
+      LOG.info("Testcase 3: Source=" + source + " Proxy=" + 
+          proxies.get(0) + " Destination=" + newNode );
       assertTrue(replaceBlock(b, source, proxies.get(0), newNode));
       assertTrue(replaceBlock(b, source, proxies.get(0), newNode));
       // after cluster has time to resolve the over-replication,
       // after cluster has time to resolve the over-replication,
       // block locations should contain two proxies and newNode
       // block locations should contain two proxies and newNode
@@ -181,7 +181,7 @@ public class TestBlockReplacement extends TestCase {
           DEFAULT_BLOCK_SIZE, REPLICATION_FACTOR, client);
           DEFAULT_BLOCK_SIZE, REPLICATION_FACTOR, client);
       // case 4: proxies.get(0) is not a valid del hint
       // case 4: proxies.get(0) is not a valid del hint
       // expect either source or newNode replica to be deleted instead
       // expect either source or newNode replica to be deleted instead
-      LOG.info("Testcase 4: invalid del hint " + proxies.get(0).getName() );
+      LOG.info("Testcase 4: invalid del hint " + proxies.get(0) );
       assertTrue(replaceBlock(b, proxies.get(0), proxies.get(1), source));
       assertTrue(replaceBlock(b, proxies.get(0), proxies.get(1), source));
       // after cluster has time to resolve the over-replication,
       // after cluster has time to resolve the over-replication,
       // block locations should contain two proxies,
       // block locations should contain two proxies,
@@ -222,7 +222,7 @@ public class TestBlockReplacement extends TestCase {
         for (DatanodeInfo node : includeNodes) {
         for (DatanodeInfo node : includeNodes) {
           if (!nodeLocations.contains(node) ) {
           if (!nodeLocations.contains(node) ) {
             notDone=true; 
             notDone=true; 
-            LOG.info("Block is not located at " + node.getName() );
+            LOG.info("Block is not located at " + node );
             break;
             break;
           }
           }
         }
         }
@@ -231,9 +231,9 @@ public class TestBlockReplacement extends TestCase {
         String expectedNodesList = "";
         String expectedNodesList = "";
         String currentNodesList = "";
         String currentNodesList = "";
         for (DatanodeInfo dn : includeNodes) 
         for (DatanodeInfo dn : includeNodes) 
-          expectedNodesList += dn.getName() + ", ";
+          expectedNodesList += dn + ", ";
         for (DatanodeInfo dn : nodes) 
         for (DatanodeInfo dn : nodes) 
-          currentNodesList += dn.getName() + ", ";
+          currentNodesList += dn + ", ";
         LOG.info("Expected replica nodes are: " + expectedNodesList);
         LOG.info("Expected replica nodes are: " + expectedNodesList);
         LOG.info("Current actual replica nodes are: " + currentNodesList);
         LOG.info("Current actual replica nodes are: " + currentNodesList);
         throw new TimeoutException(
         throw new TimeoutException(
@@ -254,7 +254,7 @@ public class TestBlockReplacement extends TestCase {
       DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
       DatanodeInfo sourceProxy, DatanodeInfo destination) throws IOException {
     Socket sock = new Socket();
     Socket sock = new Socket();
     sock.connect(NetUtils.createSocketAddr(
     sock.connect(NetUtils.createSocketAddr(
-        destination.getName()), HdfsServerConstants.READ_TIMEOUT);
+        destination.getXferAddr()), HdfsServerConstants.READ_TIMEOUT); 
     sock.setKeepAlive(true);
     sock.setKeepAlive(true);
     // sendRequest
     // sendRequest
     DataOutputStream out = new DataOutputStream(sock.getOutputStream());
     DataOutputStream out = new DataOutputStream(sock.getOutputStream());

Some files were not shown because too many files changed in this diff