瀏覽代碼

HDFS-1703. Change start/stop scripts and decommission tools for federation. Contributed by Tanping Wang and Erik Steffl.


git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/branches/HDFS-1052@1080402 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas 14 年之前
父節點
當前提交
a126feae97
共有 5 個文件被更改,包括 100 次插入22 次删除
  1. 3 0
      CHANGES.txt
  2. 1 1
      bin/hdfs
  3. 48 12
      bin/start-dfs.sh
  4. 44 7
      bin/stop-dfs.sh
  5. 4 2
      src/java/org/apache/hadoop/hdfs/tools/GetConf.java

+ 3 - 0
CHANGES.txt

@@ -235,6 +235,9 @@ Trunk (unreleased changes)
 
     HDFS-1746. Federation: TestFileAppend3 fails intermittently. (jitendra)
 
+    HDFS-1703. Improve start/stop scripts and add decommission tool for
+    federation. (Tanping Wang, Erik Steffl via suresh)
+
   IMPROVEMENTS
 
     HDFS-1510. Added test-patch.properties required by test-patch.sh (nigel)

+ 1 - 1
bin/hdfs

@@ -95,7 +95,7 @@ elif [ "$COMMAND" = "oev" ] ; then
   CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer
 elif [ "$COMMAND" = "fetchdt" ] ; then
   CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
-elif [ "$COMMAND" = "getconfig" ] ; then
+elif [ "$COMMAND" = "getconf" ] ; then
   CLASS=org.apache.hadoop.hdfs.tools.GetConf
 else
   echo $COMMAND - invalid command

+ 48 - 12
bin/start-dfs.sh

@@ -25,17 +25,17 @@ usage="Usage: start-dfs.sh [-upgrade|-rollback]"
 bin=`dirname "${BASH_SOURCE-$0}"`
 bin=`cd "$bin"; pwd`
 
-. "$bin"/hdfs-config.sh
+. "$bin/hdfs-config.sh"
 
 # get arguments
 if [ $# -ge 1 ]; then
-	nameStartOpt=$1
+	nameStartOpt="$1"
 	shift
-	case $nameStartOpt in
+	case "$nameStartOpt" in
 	  (-upgrade)
 	  	;;
 	  (-rollback) 
-	  	dataStartOpt=$nameStartOpt
+	  	dataStartOpt="$nameStartOpt"
 	  	;;
 	  (*)
 		  echo $usage
@@ -44,14 +44,50 @@ if [ $# -ge 1 ]; then
 	esac
 fi
 
-# start dfs daemons
-# start namenode after datanodes, to minimize time namenode is up w/o data
-# note: datanodes will log connection errors until namenode starts
-"$HADOOP_COMMON_HOME"/bin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start namenode $nameStartOpt
-#
+#---------------------------------------------------------
+# namenodes
+
+NAMENODES=$($HADOOP_HOME/bin/hdfs getconf -namenodes)
+
+echo "Starting namenodes on [$NAMENODES]"
+
+"$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+  --config "$HADOOP_CONF_DIR" \
+  --hostnames "$NAMENODES" \
+  --script "$bin/hdfs" start namenode $nameStartOpt
+
+#---------------------------------------------------------
+# datanodes (using defalut slaves file)
+
 if [ -n "$HADOOP_SECURE_DN_USER" ]; then
-  echo "Attempting to start secure cluster, skipping datanodes. Run start-secure-dns.sh as root to complete startup."
+  echo \
+    "Attempting to start secure cluster, skipping datanodes. " \
+    "Run start-secure-dns.sh as root to complete startup."
 else
-"$HADOOP_COMMON_HOME"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start datanode $dataStartOpt
+  "$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+    --config "$HADOOP_CONF_DIR" \
+    --script "$bin/hdfs" start datanode $dataStartOpt
 fi
-"$HADOOP_COMMON_HOME"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --hosts masters --script "$bin"/hdfs start secondarynamenode
+
+#---------------------------------------------------------
+# secondary namenodes (if any)
+
+# if there are no secondary namenodes configured it returns
+# 0.0.0.0 or empty string
+SECONDARY_NAMENODES=$($HADOOP_HOME/bin/hdfs getconf -secondarynamenodes 2>&-)
+SECONDARY_NAMENODES=${SECONDARY_NAMENODES:='0.0.0.0'}
+
+if [ "$SECONDARY_NAMENODES" = '0.0.0.0' ] ; then
+  echo \
+    "Secondary namenodes are not configured. " \
+    "Cannot start secondary namenodes."
+else
+  echo "Starting secondary namenodes [$SECONDARY_NAMENODES]"
+
+  "$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+    --config "$HADOOP_CONF_DIR" \
+    --hostnames "$SECONDARY_NAMENODES" \
+    --script "$bin/hdfs" start secondarynamenode
+fi
+
+# eof

+ 44 - 7
bin/stop-dfs.sh

@@ -15,18 +15,55 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
-# Stop hadoop DFS daemons.  Run this on master node.
-
 bin=`dirname "${BASH_SOURCE-$0}"`
 bin=`cd "$bin"; pwd`
 
 . "$bin"/hdfs-config.sh
 
-"$HADOOP_COMMON_HOME"/bin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop namenode
+#---------------------------------------------------------
+# namenodes
+
+NAMENODES=$($HADOOP_HOME/bin/hdfs getconf -namenodes)
+
+echo "Stopping namenodes on [$NAMENODES]"
+
+"$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+  --config "$HADOOP_CONF_DIR" \
+  --hostnames "$NAMENODES" \
+  --script "$bin/hdfs" stop namenode
+
+#---------------------------------------------------------
+# datanodes (using default slaves file)
+
 if [ -n "$HADOOP_SECURE_DN_USER" ]; then
-  echo "Attempting to stop secure cluster, skipping datanodes. Run stop-secure-dns.sh as root to complete shutdown."
+  echo \
+    "Attempting to stop secure cluster, skipping datanodes. " \
+    "Run stop-secure-dns.sh as root to complete shutdown."
 else
-  "$HADOOP_COMMON_HOME"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop datanode
+  "$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+    --config "$HADOOP_CONF_DIR" \
+    --script "$bin/hdfs" stop datanode
 fi
-"$HADOOP_COMMON_HOME"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --hosts masters --script "$bin"/hdfs stop secondarynamenode
+
+#---------------------------------------------------------
+# secondary namenodes (if any)
+
+# if there are no secondary namenodes configured it returns
+# 0.0.0.0 or empty string
+SECONDARY_NAMENODES=$($HADOOP_HOME/bin/hdfs getconf -secondarynamenodes 2>&-)
+SECONDARY_NAMENODES=${SECONDARY_NAMENODES:-'0.0.0.0'}
+
+if [ "$SECONDARY_NAMENODES" = '0.0.0.0' ] ; then
+  echo \
+    "Secondary namenodes are not configured. " \
+    "Cannot stop secondary namenodes."
+else
+  echo "Stopping secondary namenodes [$SECONDARY_NAMENODES]"
+
+  "$HADOOP_COMMON_HOME/bin/hadoop-daemons.sh" \
+    --config "$HADOOP_CONF_DIR" \
+    --hostnames "$SECONDARY_NAMENODES" \
+    --script "$bin/hdfs" stop secondarynamenode
+fi
+
+# eof

+ 4 - 2
src/java/org/apache/hadoop/hdfs/tools/GetConf.java

@@ -60,10 +60,12 @@ public class GetConf extends Configured implements Tool {
         "gets list of secondary namenodes in the cluster."),
     BACKUP("-backupNodes", new BackupNodesCommandHandler(),
         "gets list of backup nodes in the cluster."),
-    INCLUDE_FILE("-includeFile", new CommandHandler("dfs.hosts"),
+    INCLUDE_FILE("-includeFile",
+        new CommandHandler("DFSConfigKeys.DFS_HOSTS"),
         "gets the include file path that defines the datanodes " +
         "that can join the cluster."),
-    EXCLUDE_FILE("-excludeFile", new CommandHandler("dfs.hosts.exlucde"),
+    EXCLUDE_FILE("-excludeFile",
+        new CommandHandler("DFSConfigKeys.DFS_HOSTS_EXCLUDE"),
         "gets the exclude file path that defines the datanodes " +
         "that need to decommissioned.");