Prechádzať zdrojové kódy

HADOOP-6794. Move configuration and script files post split. Includes HDFS-1181, MAPREDUCE-1033.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/trunk@953490 13f79535-47bb-0310-9956-ffa450edef68
Thomas White 15 rokov pred
rodič
commit
60d8412ae3

+ 2 - 0
CHANGES.txt

@@ -590,6 +590,8 @@ Release 0.21.0 - Unreleased
     HDFS-1161.  Make DN minimum valid volumes configurable.
     (Eli Collins via tomwhite)
 
+    HDFS-1181. Move configuration and script files post split. (tomwhite)
+
   OPTIMIZATIONS
 
     HDFS-946. NameNode should not return full path name when lisitng a

+ 113 - 0
bin/hdfs

@@ -0,0 +1,113 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hdfs-config.sh
+
+function print_usage(){
+  echo "Usage: hdfs [--config confdir] COMMAND"
+  echo "       where COMMAND is one of:"
+  echo "  namenode -format     format the DFS filesystem"
+  echo "  secondarynamenode    run the DFS secondary namenode"
+  echo "  namenode             run the DFS namenode"
+  echo "  datanode             run a DFS datanode"
+  echo "  dfsadmin             run a DFS admin client"
+  echo "  fsck                 run a DFS filesystem checking utility"
+  echo "  balancer             run a cluster balancing utility"
+  echo "  jmxget               get JMX exported values from NameNode or DataNode."
+  echo "  oiv                  apply the offline fsimage viewer to an fsimage"
+  echo "  fetchdt              fetch a delegation token from the NameNode"
+  echo "						Use -help to see options"
+  echo ""
+  echo "Most commands print help when invoked w/o parameters."
+}
+
+if [ $# = 0 ]; then
+  print_usage
+  exit
+fi
+
+COMMAND=$1
+shift
+
+if [ "$COMMAND" = "namenode" ] ; then
+  CLASS='org.apache.hadoop.hdfs.server.namenode.NameNode'
+  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_NAMENODE_OPTS"
+elif [ "$COMMAND" = "secondarynamenode" ] ; then
+  CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
+  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_SECONDARYNAMENODE_OPTS"
+elif [ "$COMMAND" = "datanode" ] ; then
+  CLASS='org.apache.hadoop.hdfs.server.datanode.DataNode'
+  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_DATANODE_OPTS"
+elif [ "$COMMAND" = "dfs" ] ; then
+  CLASS=org.apache.hadoop.fs.FsShell
+  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "dfsadmin" ] ; then
+  CLASS=org.apache.hadoop.hdfs.tools.DFSAdmin
+  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "fsck" ] ; then
+  CLASS=org.apache.hadoop.hdfs.tools.DFSck
+  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "balancer" ] ; then
+  CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer
+  HADOOP_OPTS="$HADOOP_OPTS $HADOOP_BALANCER_OPTS"
+elif [ "$COMMAND" = "jmxget" ] ; then
+  CLASS=org.apache.hadoop.hdfs.tools.JMXGet
+elif [ "$COMMAND" = "oiv" ] ; then
+  CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer
+elif [ "$COMMAND" = "fetchdt" ] ; then
+  CLASS=org.apache.hadoop.hdfs.tools.DelegationTokenFetcher
+else
+  echo $COMMAND - invalid command
+  print_usage
+  exit
+fi
+
+# for developers, add hdfs classes to CLASSPATH
+if [ -d "$HADOOP_HDFS_HOME/build/classes" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build/classes
+fi
+if [ -d "$HADOOP_HDFS_HOME/build/webapps" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build
+fi
+if [ -d "$HADOOP_HDFS_HOME/build/test/classes" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build/test/classes
+fi
+if [ -d "$HADOOP_HDFS_HOME/build/tools" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME/build/tools
+fi
+
+# for releases, add core hdfs jar & webapps to CLASSPATH
+if [ -d "$HADOOP_HDFS_HOME/webapps" ]; then
+  CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME
+fi
+for f in $HADOOP_HDFS_HOME/hadoop-hdfs-*.jar; do
+  CLASSPATH=${CLASSPATH}:$f;
+done
+
+# add libs to CLASSPATH
+for f in $HADOOP_HDFS_HOME/lib/*.jar; do
+  CLASSPATH=${CLASSPATH}:$f;
+done
+
+if $cygwin; then
+  CLASSPATH=`cygpath -p -w "$CLASSPATH"`
+fi
+export CLASSPATH=$CLASSPATH
+exec "$JAVA" $JAVA_HEAP_MAX $HADOOP_OPTS $CLASS "$@"

+ 33 - 0
bin/hdfs-config.sh

@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# included in all the hdfs scripts with source command
+# should not be executed directly
+
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+export HADOOP_HDFS_HOME="${HADOOP_HDFS_HOME:-$bin/..}"
+
+if [ -d "${HADOOP_COMMON_HOME}" ]; then
+  . "$HADOOP_COMMON_HOME"/bin/hadoop-config.sh
+elif [ -d "${HADOOP_HOME}" ]; then
+  . "$HADOOP_HOME"/bin/hadoop-config.sh
+else
+  echo "Hadoop common not found."
+  exit
+fi

+ 25 - 0
bin/start-balancer.sh

@@ -0,0 +1,25 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hdfs-config.sh
+
+# Start balancer daemon.
+
+"$HADOOP_COMMON_HOME"/bin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start balancer $@

+ 52 - 0
bin/start-dfs.sh

@@ -0,0 +1,52 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Start hadoop dfs daemons.
+# Optinally upgrade or rollback dfs state.
+# Run this on master node.
+
+usage="Usage: start-dfs.sh [-upgrade|-rollback]"
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hdfs-config.sh
+
+# get arguments
+if [ $# -ge 1 ]; then
+	nameStartOpt=$1
+	shift
+	case $nameStartOpt in
+	  (-upgrade)
+	  	;;
+	  (-rollback) 
+	  	dataStartOpt=$nameStartOpt
+	  	;;
+	  (*)
+		  echo $usage
+		  exit 1
+	    ;;
+	esac
+fi
+
+# start dfs daemons
+# start namenode after datanodes, to minimize time namenode is up w/o data
+# note: datanodes will log connection errors until namenode starts
+"$HADOOP_COMMON_HOME"/bin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start namenode $nameStartOpt
+"$HADOOP_COMMON_HOME"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs start datanode $dataStartOpt
+"$HADOOP_COMMON_HOME"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --hosts masters --script "$bin"/hdfs start secondarynamenode

+ 26 - 0
bin/stop-balancer.sh

@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hdfs-config.sh
+
+# Stop balancer daemon.
+# Run this on the machine where the balancer is running
+
+"$HADOOP_COMMON_HOME"/bin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop balancer

+ 28 - 0
bin/stop-dfs.sh

@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+# Stop hadoop DFS daemons.  Run this on master node.
+
+bin=`dirname "${BASH_SOURCE-$0}"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hdfs-config.sh
+
+"$HADOOP_COMMON_HOME"/bin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop namenode
+"$HADOOP_COMMON_HOME"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/hdfs stop datanode
+"$HADOOP_COMMON_HOME"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --hosts masters --script "$bin"/hdfs stop secondarynamenode

+ 15 - 0
build.xml

@@ -920,6 +920,10 @@
     <copy todir="${dist.dir}"> 
       <fileset file="${build.dir}/${name}-*.jar"/>
     </copy>
+  	
+    <copy todir="${dist.dir}/bin">
+      <fileset dir="bin"/>
+    </copy>
 
     <copy todir="${dist.dir}/conf">
       <fileset dir="${conf.dir}" excludes="**/*.template"/>
@@ -948,6 +952,7 @@
     <copy todir="${dist.dir}/" file="build.xml"/>
 
     <chmod perm="ugo+x" type="file" parallel="false">
+        <fileset dir="${dist.dir}/bin"/>
         <fileset dir="${dist.dir}/src/contrib/">
           <include name="*/bin/*" />
         </fileset>
@@ -962,10 +967,12 @@
     <macro_tar param.destfile="${build.dir}/${final.name}.tar.gz">
       <param.listofitems>
         <tarfileset dir="${build.dir}" mode="664">
+          <exclude name="${final.name}/bin/*" />
           <exclude name="${final.name}/contrib/*/bin/*" />
           <include name="${final.name}/**" />
         </tarfileset>
         <tarfileset dir="${build.dir}" mode="755">
+          <include name="${final.name}/bin/*" />
           <include name="${final.name}/contrib/*/bin/*" />
         </tarfileset>
       </param.listofitems>
@@ -977,6 +984,7 @@
     <mkdir dir="${dist.dir}"/>
     <mkdir dir="${dist.dir}/lib"/>
     <mkdir dir="${dist.dir}/contrib"/>
+  	<mkdir dir="${dist.dir}/bin"/>
 
     <copy todir="${dist.dir}/lib" includeEmptyDirs="false" flatten="true">
       <fileset dir="${common.ivy.lib.dir}"/>
@@ -1003,6 +1011,10 @@
     <copy todir="${dist.dir}"> 
       <fileset file="${build.dir}/${name}-*.jar"/>
     </copy>
+  	
+    <copy todir="${dist.dir}/bin">
+      <fileset dir="bin"/>
+    </copy>
     
     <copy todir="${dist.dir}/conf">
       <fileset dir="${conf.dir}" excludes="**/*.template"/>
@@ -1022,6 +1034,9 @@
   	
     <copy todir="${dist.dir}/" file="build.xml"/>
 
+    <chmod perm="ugo+x" type="file" parallel="false">
+        <fileset dir="${dist.dir}/bin"/>
+    </chmod>
   </target>
 
   <target name="binary-system" depends="bin-package, jar-system, jar-test-system"

+ 0 - 8
conf/core-site.xml.template

@@ -1,8 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-</configuration>

+ 0 - 54
conf/hadoop-env.sh.template

@@ -1,54 +0,0 @@
-# Set Hadoop-specific environment variables here.
-
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
-
-# The java implementation to use.  Required.
-# export JAVA_HOME=/usr/lib/j2sdk1.5-sun
-
-# Extra Java CLASSPATH elements.  Optional.
-# export HADOOP_CLASSPATH=
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-# export HADOOP_HEAPSIZE=2000
-
-# Extra Java runtime options.  Empty by default.
-# export HADOOP_OPTS=-server
-
-# Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
-export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
-export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
-export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
-export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
-# export HADOOP_TASKTRACKER_OPTS=
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-# export HADOOP_CLIENT_OPTS
-
-# Extra ssh options.  Empty by default.
-# export HADOOP_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HADOOP_CONF_DIR"
-
-# Where log files are stored.  $HADOOP_HOME/logs by default.
-# export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
-
-# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
-# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
-
-# host:path where hadoop code should be rsync'd from.  Unset by default.
-# export HADOOP_MASTER=master:/home/$USER/src/hadoop
-
-# Seconds to sleep between slave commands.  Unset by default.  This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HADOOP_SLAVE_SLEEP=0.1
-
-# The directory where pid files are stored. /tmp by default.
-# export HADOOP_PID_DIR=/var/hadoop/pids
-
-# A string representing this instance of hadoop. $USER by default.
-# export HADOOP_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes.  See 'man nice'.
-# export HADOOP_NICENESS=10

+ 0 - 53
conf/hadoop-metrics.properties

@@ -1,53 +0,0 @@
-# Configuration of the "dfs" context for null
-dfs.class=org.apache.hadoop.metrics.spi.NullContext
-
-# Configuration of the "dfs" context for file
-#dfs.class=org.apache.hadoop.metrics.file.FileContext
-#dfs.period=10
-#dfs.fileName=/tmp/dfsmetrics.log
-
-# Configuration of the "dfs" context for ganglia
-# dfs.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-# dfs.period=10
-# dfs.servers=localhost:8649
-
-
-# Configuration of the "mapred" context for null
-mapred.class=org.apache.hadoop.metrics.spi.NullContext
-
-# Configuration of the "mapred" context for file
-#mapred.class=org.apache.hadoop.metrics.file.FileContext
-#mapred.period=10
-#mapred.fileName=/tmp/mrmetrics.log
-
-# Configuration of the "mapred" context for ganglia
-# mapred.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-# mapred.period=10
-# mapred.servers=localhost:8649
-
-
-# Configuration of the "jvm" context for null
-jvm.class=org.apache.hadoop.metrics.spi.NullContext
-
-# Configuration of the "jvm" context for file
-#jvm.class=org.apache.hadoop.metrics.file.FileContext
-#jvm.period=10
-#jvm.fileName=/tmp/jvmmetrics.log
-
-# Configuration of the "jvm" context for ganglia
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-# jvm.period=10
-# jvm.servers=localhost:8649
-
-# Configuration of the "rpc" context for null
-rpc.class=org.apache.hadoop.metrics.spi.NullContext
-
-# Configuration of the "rpc" context for file
-#rpc.class=org.apache.hadoop.metrics.file.FileContext
-#rpc.period=10
-#rpc.fileName=/tmp/rpcmetrics.log
-
-# Configuration of the "rpc" context for ganglia
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-# rpc.period=10
-# rpc.servers=localhost:8649

+ 0 - 97
conf/hadoop-policy.xml.template

@@ -1,97 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code 
-    via the DistributedFileSystem. 
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol 
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to 
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.tracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterTrackerProtocol, used by the tasktrackers to 
-    communicate with the jobtracker.
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.submission.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for JobSubmissionProtocol, used by job clients to 
-    communciate with the jobtracker for job submission, querying job status etc.
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.task.umbilical.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce 
-    tasks to communicate with the parent tasktracker. 
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the 
-    dfsadmin and mradmin commands to refresh the security policy in-effect. 
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-</configuration>

+ 0 - 94
conf/log4j.properties

@@ -1,94 +0,0 @@
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-# Rolling File Appender
-#
-
-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-#log4j.appender.RFA.MaxFileSize=1MB
-#log4j.appender.RFA.MaxBackupIndex=30
-
-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-#
-# FSNamesystem Audit logging
-# All audit events are logged at INFO level
-#
-log4j.logger.org.apache.hadoop.fs.FSNamesystem.audit=WARN
-
-# Custom Logging levels
-
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.metrics.jvm.EventCounter

+ 0 - 1
conf/masters.template

@@ -1 +0,0 @@
-localhost

+ 0 - 1
conf/slaves.template

@@ -1 +0,0 @@
-localhost

+ 0 - 57
conf/ssl-client.xml.example

@@ -1,57 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<configuration>
-
-<property>
-  <name>ssl.client.truststore.location</name>
-  <value></value>
-  <description>Truststore to be used by clients like distcp. Must be
-  specified.
-  </description>
-</property>
-
-<property>
-  <name>ssl.client.truststore.password</name>
-  <value></value>
-  <description>Optional. Default value is "".
-  </description>
-</property>
-
-<property>
-  <name>ssl.client.truststore.type</name>
-  <value>jks</value>
-  <description>Optional. Default value is "jks".
-  </description>
-</property>
-
-<property>
-  <name>ssl.client.keystore.location</name>
-  <value></value>
-  <description>Keystore to be used by clients like distcp. Must be
-  specified.
-  </description>
-</property>
-
-<property>
-  <name>ssl.client.keystore.password</name>
-  <value></value>
-  <description>Optional. Default value is "".
-  </description>
-</property>
-
-<property>
-  <name>ssl.client.keystore.keypassword</name>
-  <value></value>
-  <description>Optional. Default value is "".
-  </description>
-</property>
-
-<property>
-  <name>ssl.client.keystore.type</name>
-  <value>jks</value>
-  <description>Optional. Default value is "jks".
-  </description>
-</property>
-
-</configuration>

+ 0 - 55
conf/ssl-server.xml.example

@@ -1,55 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<configuration>
-
-<property>
-  <name>ssl.server.truststore.location</name>
-  <value></value>
-  <description>Truststore to be used by NN and DN. Must be specified.
-  </description>
-</property>
-
-<property>
-  <name>ssl.server.truststore.password</name>
-  <value></value>
-  <description>Optional. Default value is "".
-  </description>
-</property>
-
-<property>
-  <name>ssl.server.truststore.type</name>
-  <value>jks</value>
-  <description>Optional. Default value is "jks".
-  </description>
-</property>
-
-<property>
-  <name>ssl.server.keystore.location</name>
-  <value></value>
-  <description>Keystore to be used by NN and DN. Must be specified.
-  </description>
-</property>
-
-<property>
-  <name>ssl.server.keystore.password</name>
-  <value></value>
-  <description>Must be specified.
-  </description>
-</property>
-
-<property>
-  <name>ssl.server.keystore.keypassword</name>
-  <value></value>
-  <description>Must be specified.
-  </description>
-</property>
-
-<property>
-  <name>ssl.server.keystore.type</name>
-  <value>jks</value>
-  <description>Optional. Default value is "jks".
-  </description>
-</property>
-
-</configuration>

+ 0 - 3
conf/taskcontroller.cfg

@@ -1,3 +0,0 @@
-mapred.local.dir=#configured value of hadoop.tmp.dir it can be a list of paths comma seperated
-hadoop.pid.dir=#configured HADOOP_PID_DIR
-hadoop.indent.str=#configured HADOOP_IDENT_STR