فهرست منبع

merge -r 1242976:1242977 from branch-0.23. FIXES: MAPREDUCE-3843

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23.1@1242978 13f79535-47bb-0310-9956-ffa450edef68
Thomas Graves 13 سال پیش
والد
کامیت
8689893d1c

+ 3 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -738,6 +738,9 @@ Release 0.23.1 - 2012-02-08
     MAPREDUCE-3840.  JobEndNotifier doesn't use the proxyToUse during connecting
     (Ravi Prakash via bobby)
 
+    MAPREDUCE-3843. Job summary log file found missing on the RM host 
+    (Anupam Seth via tgraves)
+
 Release 0.23.0 - 2011-11-01 
 
   INCOMPATIBLE CHANGES

+ 6 - 2
hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh

@@ -20,6 +20,9 @@
 #
 # Environment Variables
 #
+#   HADOOP_LOGFILE Hadoop log file.
+#   HADOOP_ROOT_LOGGER Hadoop root logger.
+#   HADOOP_JHS_LOGGER  Hadoop JobSummary logger.
 #   YARN_CONF_DIR  Alternate conf dir. Default is ${YARN_HOME}/conf.
 #   YARN_LOG_DIR   Where log files are stored.  PWD by default.
 #   YARN_MASTER    host:path where hadoop code should be rsync'd from
@@ -86,8 +89,9 @@ if [ "$YARN_PID_DIR" = "" ]; then
 fi
 
 # some variables
-export YARN_LOGFILE=yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.log
-export YARN_ROOT_LOGGER=${YARN_ROOT_LOGGER:-INFO,DRFA}
+export HADOOP_LOGFILE=yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.log
+export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-INFO,DRFA}
+export HADOOP_JHS_LOGGER=${HADOOP_JHS_LOGGER:-INFO,JSA}
 log=$YARN_LOG_DIR/yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.out
 pid=$YARN_PID_DIR/yarn-$YARN_IDENT_STRING-$command.pid
 

+ 22 - 22
hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ClusterSetup.apt.vm

@@ -437,32 +437,32 @@ Hadoop MapReduce Next Generation - Cluster Setup
     Format a new distributed filesystem:
   
 ----
-  $ $HADOOP_PREFIX_HOME/bin/hdfs namenode -format <cluster_name>
+  $ $HADOOP_PREFIX/bin/hdfs namenode -format <cluster_name>
 ----
 
     Start the HDFS with the following command, run on the designated NameNode:
   
 ----
-  $ $HADOOP_PREFIX_HOME/bin/hdfs start namenode --config $HADOOP_CONF_DIR  
+  $ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start namenode
 ----    	  
 
     Run a script to start DataNodes on all slaves:
 
 ----
-  $ $HADOOP_PREFIX_HOME/bin/hdfs start datanode --config $HADOOP_CONF_DIR  
+  $ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start datanode
 ----    	  
   
     Start the YARN with the following command, run on the designated 
     ResourceManager:
   
 ----
-  $ $YARN_HOME/bin/yarn start resourcemanager --config $HADOOP_CONF_DIR  
+  $ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start resourcemanager 
 ----    	  
 
     Run a script to start NodeManagers on all slaves:
 
 ----
-  $ $YARN_HOME/bin/yarn start nodemanager --config $HADOOP_CONF_DIR  
+  $ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start nodemanager 
 ----    	  
 
     Start a standalone WebAppProxy server.  If multiple servers
@@ -476,7 +476,7 @@ Hadoop MapReduce Next Generation - Cluster Setup
     designated server:
   
 ----
-  $ $YARN_HOME/bin/mapred start historyserver --config $YARN_CONF_DIR  
+  $ $HADOOP_PREFIX/sbin/mr-jobhistory-daemon.sh start historyserver --config $HADOOP_CONF_DIR  
 ----    	  
 
     * Hadoop Shutdown      
@@ -485,26 +485,26 @@ Hadoop MapReduce Next Generation - Cluster Setup
     NameNode:
   
 ----
-  $ $HADOOP_PREFIX_HOME/bin/hdfs stop namenode --config $HADOOP_CONF_DIR  
+  $ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs stop namenode
 ----    	  
 
     Run a script to stop DataNodes on all slaves:
 
 ----
-  $ $HADOOP_PREFIX_HOME/bin/hdfs stop datanode --config $HADOOP_CONF_DIR  
+  $ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs stop datanode
 ----    	  
   
     Stop the ResourceManager with the following command, run on the designated 
     ResourceManager:
   
 ----
-  $ $YARN_HOME/bin/yarn stop resourcemanager --config $HADOOP_CONF_DIR  
+  $ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop resourcemanager 
 ----    	  
 
     Run a script to stop NodeManagers on all slaves:
 
 ----
-  $ $YARN_HOME/bin/yarn stop nodemanager --config $HADOOP_CONF_DIR  
+  $ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop nodemanager 
 ----    	  
 
     Stop the WebAppProxy server. If multiple servers are used with load
@@ -519,7 +519,7 @@ Hadoop MapReduce Next Generation - Cluster Setup
     designated server:
   
 ----
-  $ $YARN_HOME/bin/mapred stop historyserver --config $YARN_CONF_DIR  
+  $ $HADOOP_PREFIX/sbin/mr-jobhistory-daemon.sh stop historyserver --config $HADOOP_CONF_DIR  
 ----    	  
 
     
@@ -978,34 +978,34 @@ KVNO Timestamp         Principal
     Format a new distributed filesystem as <hdfs>:
   
 ----
-[hdfs]$ $HADOOP_PREFIX_HOME/bin/hdfs namenode -format <cluster_name>
+[hdfs]$ $HADOOP_PREFIX/bin/hdfs namenode -format <cluster_name>
 ----
 
     Start the HDFS with the following command, run on the designated NameNode
     as <hdfs>:
   
 ----
-[hdfs]$ $HADOOP_PREFIX_HOME/bin/hdfs start namenode --config $HADOOP_CONF_DIR  
+[hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start namenode
 ----    	  
 
     Run a script to start DataNodes on all slaves as <root> with a special
     environment variable <<<HADOOP_SECURE_DN_USER>>> set to <hdfs>:
 
 ----
-[root]$ HADOOP_SECURE_DN_USER=hdfs $HADOOP_PREFIX_HOME/bin/hdfs start datanode --config $HADOOP_CONF_DIR  
+[root]$ HADOOP_SECURE_DN_USER=hdfs $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs start datanode
 ----    	  
   
     Start the YARN with the following command, run on the designated 
     ResourceManager as <yarn>:
   
 ----
-[yarn]$ $YARN_HOME/bin/yarn start resourcemanager --config $HADOOP_CONF_DIR  
+[yarn]$ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start resourcemanager 
 ----    	  
 
     Run a script to start NodeManagers on all slaves as <yarn>:
 
 ----
-[yarn]$ $YARN_HOME/bin/yarn start nodemanager --config $HADOOP_CONF_DIR  
+[yarn]$ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start nodemanager 
 ----    	  
 
     Start a standalone WebAppProxy server. Run on the WebAppProxy 
@@ -1020,7 +1020,7 @@ KVNO Timestamp         Principal
     designated server as <mapred>:
   
 ----
-[mapred]$ $YARN_HOME/bin/mapred start historyserver --config $YARN_CONF_DIR  
+[mapred]$ $HADOOP_PREFIX/sbin/mr-jobhistory-daemon.sh start historyserver --config $HADOOP_CONF_DIR  
 ----    	  
 
     * Hadoop Shutdown      
@@ -1029,26 +1029,26 @@ KVNO Timestamp         Principal
     as <hdfs>:
   
 ----
-[hdfs]$ $HADOOP_PREFIX_HOME/bin/hdfs stop namenode --config $HADOOP_CONF_DIR  
+[hdfs]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs stop namenode
 ----    	  
 
     Run a script to stop DataNodes on all slaves as <root>:
 
 ----
-[root]$ $HADOOP_PREFIX_HOME/bin/hdfs stop datanode --config $HADOOP_CONF_DIR  
+[root]$ $HADOOP_PREFIX/sbin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script hdfs stop datanode
 ----    	  
   
     Stop the ResourceManager with the following command, run on the designated 
     ResourceManager as <yarn>:
   
 ----
-[yarn]$ $YARN_HOME/bin/yarn stop resourcemanager --config $HADOOP_CONF_DIR  
+[yarn]$ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop resourcemanager 
 ----    	  
 
     Run a script to stop NodeManagers on all slaves as <yarn>:
 
 ----
-[yarn]$ $YARN_HOME/bin/yarn stop nodemanager --config $HADOOP_CONF_DIR  
+[yarn]$ $YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop nodemanager 
 ----    	  
 
     Stop the WebAppProxy server. Run on the WebAppProxy  server as
@@ -1063,7 +1063,7 @@ KVNO Timestamp         Principal
     designated server as <mapred>:
 
 ----
-[mapred]$ $YARN_HOME/bin/mapred stop historyserver --config $YARN_CONF_DIR  
+[mapred]$ $HADOOP_PREFIX/sbin/mr-jobhistory-daemon.sh stop historyserver --config $HADOOP_CONF_DIR  
 ----    	  
     
 * {Web Interfaces}