Browse Source

commit d52b82f73196974144eb6a1de8923ca6d7db9287
Author: Chris Douglas <cdouglas@apache.org>
Date: Thu Sep 16 23:06:32 2010 -0700

, : Write task initialization to avoid race conditions
leading to privilege escalation and resource leakage by performing more actions
as the user. Owen O'Malley, Devaraj Das, Chris Douglas


git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.20-security-patches@1077679 13f79535-47bb-0310-9956-ffa450edef68

Owen O'Malley 14 years ago
parent
commit
f84cb82518
61 changed files with 5006 additions and 9746 deletions
  1. 1 0
      .gitignore
  2. 28 55
      build.xml
  3. 42 0
      src/c++/task-controller/.autom4te.cfg
  4. 13 0
      src/c++/task-controller/.gitignore
  5. 32 0
      src/c++/task-controller/Makefile.am
  6. 0 54
      src/c++/task-controller/Makefile.in
  7. 0 5237
      src/c++/task-controller/configure
  8. 21 32
      src/c++/task-controller/configure.ac
  9. 106 49
      src/c++/task-controller/impl/configuration.c
  10. 16 36
      src/c++/task-controller/impl/configuration.h
  11. 191 0
      src/c++/task-controller/impl/main.c
  12. 1044 0
      src/c++/task-controller/impl/task-controller.c
  13. 149 0
      src/c++/task-controller/impl/task-controller.h
  14. 0 242
      src/c++/task-controller/main.c
  15. 0 1270
      src/c++/task-controller/task-controller.c
  16. 0 141
      src/c++/task-controller/task-controller.h
  17. 763 0
      src/c++/task-controller/test/test-task-controller.c
  18. 0 243
      src/c++/task-controller/tests/test-task-controller.c
  19. 89 12
      src/core/org/apache/hadoop/fs/LocalDirAllocator.java
  20. 33 128
      src/core/org/apache/hadoop/util/ProcessTree.java
  21. 2 169
      src/core/org/apache/hadoop/util/ProcfsBasedProcessTree.java
  22. 65 214
      src/mapred/org/apache/hadoop/filecache/DistributedCache.java
  23. 58 30
      src/mapred/org/apache/hadoop/filecache/TaskDistributedCacheManager.java
  24. 169 122
      src/mapred/org/apache/hadoop/filecache/TrackerDistributedCacheManager.java
  25. 43 6
      src/mapred/org/apache/hadoop/mapred/Child.java
  26. 11 27
      src/mapred/org/apache/hadoop/mapred/CleanupQueue.java
  27. 166 122
      src/mapred/org/apache/hadoop/mapred/DefaultTaskController.java
  28. 17 1
      src/mapred/org/apache/hadoop/mapred/IsolationRunner.java
  29. 1 2
      src/mapred/org/apache/hadoop/mapred/JobInProgress.java
  30. 531 0
      src/mapred/org/apache/hadoop/mapred/JobLocalizer.java
  31. 110 79
      src/mapred/org/apache/hadoop/mapred/JvmManager.java
  32. 170 494
      src/mapred/org/apache/hadoop/mapred/LinuxTaskController.java
  33. 18 10
      src/mapred/org/apache/hadoop/mapred/LocalJobRunner.java
  34. 4 2
      src/mapred/org/apache/hadoop/mapred/MapTask.java
  35. 4 2
      src/mapred/org/apache/hadoop/mapred/MapTaskRunner.java
  36. 4 3
      src/mapred/org/apache/hadoop/mapred/ReduceTask.java
  37. 3 2
      src/mapred/org/apache/hadoop/mapred/ReduceTaskRunner.java
  38. 3 1
      src/mapred/org/apache/hadoop/mapred/Task.java
  39. 120 322
      src/mapred/org/apache/hadoop/mapred/TaskController.java
  40. 33 9
      src/mapred/org/apache/hadoop/mapred/TaskLog.java
  41. 3 12
      src/mapred/org/apache/hadoop/mapred/TaskMemoryManagerThread.java
  42. 86 87
      src/mapred/org/apache/hadoop/mapred/TaskRunner.java
  43. 275 372
      src/mapred/org/apache/hadoop/mapred/TaskTracker.java
  44. 9 0
      src/mapred/org/apache/hadoop/mapred/TaskUmbilicalProtocol.java
  45. 23 22
      src/mapred/org/apache/hadoop/mapred/UserLogCleaner.java
  46. 1 1
      src/mapred/org/apache/hadoop/mapreduce/security/TokenCache.java
  47. 4 16
      src/mapred/org/apache/hadoop/mapreduce/server/tasktracker/Localizer.java
  48. 40 5
      src/mapred/org/apache/hadoop/mapreduce/server/tasktracker/userlogs/UserLogManager.java
  49. 103 61
      src/test/org/apache/hadoop/filecache/TestTrackerDistributedCacheManager.java
  50. 3 3
      src/test/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java
  51. 5 3
      src/test/org/apache/hadoop/mapred/TestJobTrackerSafeMode.java
  52. 46 9
      src/test/org/apache/hadoop/mapred/TestJvmManager.java
  53. 3 7
      src/test/org/apache/hadoop/mapred/TestLinuxTaskController.java
  54. 0 1
      src/test/org/apache/hadoop/mapred/TestMiniMRWithDFS.java
  55. 288 0
      src/test/org/apache/hadoop/mapred/TestTaskCommit.java
  56. 13 4
      src/test/org/apache/hadoop/mapred/TestTaskEnvironment.java
  57. 15 19
      src/test/org/apache/hadoop/mapred/TestTaskTrackerLocalization.java
  58. 2 1
      src/test/org/apache/hadoop/mapred/TestTrackerDistributedCacheManagerWithLinuxTaskController.java
  59. 6 3
      src/test/org/apache/hadoop/mapred/TestUserLogCleanup.java
  60. 14 1
      src/test/org/apache/hadoop/mapred/UtilsForTests.java
  61. 7 3
      src/test/org/apache/hadoop/util/TestProcfsBasedProcessTree.java

+ 1 - 0
.gitignore

@@ -52,3 +52,4 @@ src/docs/build
 src/docs/cn/build
 src/docs/cn/src/documentation/sitemap.xmap
 src/docs/cn/uming.conf
+src/contrib/hdfsproxy/src/test/resources

+ 28 - 55
build.xml

@@ -177,9 +177,10 @@
     stored for compilation -->
   <property name="build.c++.task-controller" 
     value="${build.c++}/task-controller" />
-  <!-- the default install dir is build directory override it using
-   -Dtask-controller.install.dir=$HADOOP_HOME/bin -->
-  <property name="task-controller.install.dir" value="${dist.dir}/bin" />
+  <property name="task-controller.prefix.dir" value="${dist.dir}" />
+  <!-- the configuration directory for the linux task controller -->
+  <property name="hadoop.conf.dir" value="/etc/hadoop"/>
+
   <!-- end of task-controller properties -->
 
   <!-- jsvc properties set here -->
@@ -2272,62 +2273,34 @@
   </target>
 
   <!-- taskcontroller targets -->
-  <target name="init-task-controller-build">
-    <mkdir dir="${build.c++.task-controller}" />
-    <copy todir="${build.c++.task-controller}">
-      <fileset dir="${c++.task-controller.src}" includes="*.c"/>
-      <fileset dir="${c++.task-controller.src}" includes="*.h"/>
-    </copy>
-    <chmod file="${c++.task-controller.src}/configure" perm="ugo+x"/> 
-    <condition property="task-controller.conf.dir.passed">
-      <not>
-        <equals arg1="${hadoop.conf.dir}" arg2="$${hadoop.conf.dir}"/>
-      </not>
-    </condition>
-  </target>
-  <target name="configure-task-controller" depends="init,
-          init-task-controller-build,
-          task-controller-configuration-with-confdir,
-          task-controller-configuration-with-no-confdir">
-  </target>
-  <target name="task-controller-configuration-with-confdir" 
-          if="task-controller.conf.dir.passed" >
-    <exec executable="${c++.task-controller.src}/configure" 
-          dir="${build.c++.task-controller}" failonerror="yes">
-      <arg value="--prefix=${task-controller.install.dir}" />
-      <arg value="--with-confdir=${hadoop.conf.dir}" />
+  <target name="task-controller" depends="init">
+    <exec executable="autoreconf" 
+          dir="${c++.task-controller.src}"
+          searchpath="yes" failonerror="yes">
+      <arg value="-i"/>
     </exec>
-  </target>
-  <target name="task-controller-configuration-with-no-confdir" 
-          unless="task-controller.conf.dir.passed">
-    <exec executable="${c++.task-controller.src}/configure" 
-          dir="${build.c++.task-controller}" failonerror="yes">
-      <arg value="--prefix=${task-controller.install.dir}" />
+    <mkdir dir="${build.c++.task-controller}" />
+    <exec executable="${c++.task-controller.src}/configure"
+          dir="${build.c++.task-controller}">
+      <arg value="--prefix=${task-controller.prefix.dir}"/>
+      <env key="CFLAGS" 
+           value="-DHADOOP_CONF_DIR=${hadoop.conf.dir}"/>
     </exec>
-  </target>
-  <!--
-    * Create the installation directory.
-    * Do a make install.
-   -->
-  <target name="task-controller" depends="configure-task-controller">
-    <mkdir dir="${task-controller.install.dir}" />
-    <exec executable="${make.cmd}" dir="${build.c++.task-controller}" 
-        searchpath="yes" failonerror="yes">
-      <arg value="install" />
+    <!-- delete main in case HADOOP_CONF_DIR is different -->
+    <delete file="${build.c++.task-controller}/impl/main.o"
+            quiet="true" failonerror="false"/>
+    <exec executable="make"
+          dir="${build.c++.task-controller}"
+          searchpath="yes" failonerror="yes">
+      <arg value="install"/>
     </exec>
   </target>
-  <target name="test-task-controller" depends="task-controller">
-    <copy todir="${build.c++.task-controller}" verbose="true">
-      <fileset dir="${c++.task-controller.src}" includes="tests/"/>
-    </copy>
-    <exec executable="${make.cmd}" dir="${build.c++.task-controller}" 
-        searchpath="yes" failonerror="yes">
-      <arg value="clean" />
-      <arg value="test" />
-    </exec>
-    <exec executable="${build.c++.task-controller}/tests/test-task-controller"
-        dir="${build.c++.task-controller}/tests/"
-        failonerror="yes">
+
+  <target name="test-task-controller" depends="init,task-controller">
+    <exec executable="make"
+          dir="${build.c++.task-controller}"
+          searchpath="yes" failonerror="yes">
+      <arg value="check"/>
     </exec>
   </target>
 

+ 42 - 0
src/c++/task-controller/.autom4te.cfg

@@ -0,0 +1,42 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#
+# autom4te configuration for hadoop utils library
+#
+
+begin-language: "Autoheader-preselections"
+args: --no-cache 
+end-language: "Autoheader-preselections"
+
+begin-language: "Automake-preselections"
+args: --no-cache 
+end-language: "Automake-preselections"
+
+begin-language: "Autoreconf-preselections"
+args: --no-cache 
+end-language: "Autoreconf-preselections"
+
+begin-language: "Autoconf-without-aclocal-m4"
+args: --no-cache 
+end-language: "Autoconf-without-aclocal-m4"
+
+begin-language: "Autoconf"
+args: --no-cache 
+end-language: "Autoconf"
+

+ 13 - 0
src/c++/task-controller/.gitignore

@@ -0,0 +1,13 @@
+Makefile
+install-sh
+aclocal.m4
+compile
+config.guess
+config.sub
+configure
+depcomp
+install-sh
+ltmain.sh
+Makefile.in
+missing
+stamp-h1

+ 32 - 0
src/c++/task-controller/Makefile.am

@@ -0,0 +1,32 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+AM_CFLAGS=-I$(srcdir)/impl -Wall -g -Werror
+
+# Define the programs that need to be built
+bin_PROGRAMS = task-controller
+check_PROGRAMS = test-task-controller
+
+TESTS = test-task-controller
+
+# Define the sources for the common files
+common_SOURCES = impl/configuration.c impl/task-controller.c
+
+# Define the sources for the real executable
+task_controller_SOURCES = $(common_SOURCES) impl/main.c
+
+# Define the sources for the test executable
+test_task_controller_SOURCES = $(common_SOURCES) test/test-task-controller.c

+ 0 - 54
src/c++/task-controller/Makefile.in

@@ -1,54 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-OBJS=main.o task-controller.o configuration.o
-CC=@CC@
-CFLAGS = @CFLAGS@
-BINARY=task-controller
-installdir = @prefix@
-
-testdir = tests
-TESTOBJS=${testdir}/test-task-controller.o task-controller.o configuration.o
-TESTBINARY=${testdir}/test-task-controller
-
-all: $(OBJS)
-	$(CC) $(CFLAG) -o $(BINARY) $(OBJS)
-
-main.o: main.c task-controller.h
-	$(CC) $(CFLAG) -o main.o -c main.c
-
-task-controller.o: task-controller.c task-controller.h 
-	$(CC) $(CFLAG) -o task-controller.o -c task-controller.c
-
-configuration.o: configuration.h configuration.c
-	$(CC) $(CFLAG) -o configuration.o -c configuration.c
-
-${testdir}/test-task-controller.o: task-controller.c task-controller.h
-	$(CC) $(CFLAG) -o ${testdir}/test-task-controller.o -c ${testdir}/test-task-controller.c
-
-test: $(TESTOBJS)
-	$(CC) $(CFLAG) -o $(TESTBINARY) $(TESTOBJS)
-
-clean:
-	rm -rf $(BINARY) $(OBJS) $(TESTOBJS)
-
-install: all
-	cp $(BINARY) $(installdir)
-
-uninstall:
-	rm -rf $(installdir)/$(BINARY)
-	rm -rf $(BINARY)

+ 0 - 5237
src/c++/task-controller/configure

@@ -1,5237 +0,0 @@
-#! /bin/sh
-# Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.61 for task-controller 0.1.
-#
-# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
-# 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
-# This configure script is free software; the Free Software Foundation
-# gives unlimited permission to copy, distribute and modify it.
-## --------------------- ##
-## M4sh Initialization.  ##
-## --------------------- ##
-
-# Be more Bourne compatible
-DUALCASE=1; export DUALCASE # for MKS sh
-if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
-  emulate sh
-  NULLCMD=:
-  # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
-  # is contrary to our usage.  Disable this feature.
-  alias -g '${1+"$@"}'='"$@"'
-  setopt NO_GLOB_SUBST
-else
-  case `(set -o) 2>/dev/null` in
-  *posix*) set -o posix ;;
-esac
-
-fi
-
-
-
-
-# PATH needs CR
-# Avoid depending upon Character Ranges.
-as_cr_letters='abcdefghijklmnopqrstuvwxyz'
-as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
-as_cr_Letters=$as_cr_letters$as_cr_LETTERS
-as_cr_digits='0123456789'
-as_cr_alnum=$as_cr_Letters$as_cr_digits
-
-# The user is always right.
-if test "${PATH_SEPARATOR+set}" != set; then
-  echo "#! /bin/sh" >conf$$.sh
-  echo  "exit 0"   >>conf$$.sh
-  chmod +x conf$$.sh
-  if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then
-    PATH_SEPARATOR=';'
-  else
-    PATH_SEPARATOR=:
-  fi
-  rm -f conf$$.sh
-fi
-
-# Support unset when possible.
-if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
-  as_unset=unset
-else
-  as_unset=false
-fi
-
-
-# IFS
-# We need space, tab and new line, in precisely that order.  Quoting is
-# there to prevent editors from complaining about space-tab.
-# (If _AS_PATH_WALK were called with IFS unset, it would disable word
-# splitting by setting IFS to empty value.)
-as_nl='
-'
-IFS=" ""	$as_nl"
-
-# Find who we are.  Look in the path if we contain no directory separator.
-case $0 in
-  *[\\/]* ) as_myself=$0 ;;
-  *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-  test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
-done
-IFS=$as_save_IFS
-
-     ;;
-esac
-# We did not find ourselves, most probably we were run as `sh COMMAND'
-# in which case we are not to be found in the path.
-if test "x$as_myself" = x; then
-  as_myself=$0
-fi
-if test ! -f "$as_myself"; then
-  echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
-  { (exit 1); exit 1; }
-fi
-
-# Work around bugs in pre-3.0 UWIN ksh.
-for as_var in ENV MAIL MAILPATH
-do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
-done
-PS1='$ '
-PS2='> '
-PS4='+ '
-
-# NLS nuisances.
-for as_var in \
-  LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \
-  LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \
-  LC_TELEPHONE LC_TIME
-do
-  if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then
-    eval $as_var=C; export $as_var
-  else
-    ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
-  fi
-done
-
-# Required to use basename.
-if expr a : '\(a\)' >/dev/null 2>&1 &&
-   test "X`expr 00001 : '.*\(...\)'`" = X001; then
-  as_expr=expr
-else
-  as_expr=false
-fi
-
-if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
-  as_basename=basename
-else
-  as_basename=false
-fi
-
-
-# Name of the executable.
-as_me=`$as_basename -- "$0" ||
-$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
-	 X"$0" : 'X\(//\)$' \| \
-	 X"$0" : 'X\(/\)' \| . 2>/dev/null ||
-echo X/"$0" |
-    sed '/^.*\/\([^/][^/]*\)\/*$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\/\(\/\/\)$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\/\(\/\).*/{
-	    s//\1/
-	    q
-	  }
-	  s/.*/./; q'`
-
-# CDPATH.
-$as_unset CDPATH
-
-
-if test "x$CONFIG_SHELL" = x; then
-  if (eval ":") 2>/dev/null; then
-  as_have_required=yes
-else
-  as_have_required=no
-fi
-
-  if test $as_have_required = yes && 	 (eval ":
-(as_func_return () {
-  (exit \$1)
-}
-as_func_success () {
-  as_func_return 0
-}
-as_func_failure () {
-  as_func_return 1
-}
-as_func_ret_success () {
-  return 0
-}
-as_func_ret_failure () {
-  return 1
-}
-
-exitcode=0
-if as_func_success; then
-  :
-else
-  exitcode=1
-  echo as_func_success failed.
-fi
-
-if as_func_failure; then
-  exitcode=1
-  echo as_func_failure succeeded.
-fi
-
-if as_func_ret_success; then
-  :
-else
-  exitcode=1
-  echo as_func_ret_success failed.
-fi
-
-if as_func_ret_failure; then
-  exitcode=1
-  echo as_func_ret_failure succeeded.
-fi
-
-if ( set x; as_func_ret_success y && test x = \"\$1\" ); then
-  :
-else
-  exitcode=1
-  echo positional parameters were not saved.
-fi
-
-test \$exitcode = 0) || { (exit 1); exit 1; }
-
-(
-  as_lineno_1=\$LINENO
-  as_lineno_2=\$LINENO
-  test \"x\$as_lineno_1\" != \"x\$as_lineno_2\" &&
-  test \"x\`expr \$as_lineno_1 + 1\`\" = \"x\$as_lineno_2\") || { (exit 1); exit 1; }
-") 2> /dev/null; then
-  :
-else
-  as_candidate_shells=
-    as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-  case $as_dir in
-	 /*)
-	   for as_base in sh bash ksh sh5; do
-	     as_candidate_shells="$as_candidate_shells $as_dir/$as_base"
-	   done;;
-       esac
-done
-IFS=$as_save_IFS
-
-
-      for as_shell in $as_candidate_shells $SHELL; do
-	 # Try only shells that exist, to save several forks.
-	 if { test -f "$as_shell" || test -f "$as_shell.exe"; } &&
-		{ ("$as_shell") 2> /dev/null <<\_ASEOF
-if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
-  emulate sh
-  NULLCMD=:
-  # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
-  # is contrary to our usage.  Disable this feature.
-  alias -g '${1+"$@"}'='"$@"'
-  setopt NO_GLOB_SUBST
-else
-  case `(set -o) 2>/dev/null` in
-  *posix*) set -o posix ;;
-esac
-
-fi
-
-
-:
-_ASEOF
-}; then
-  CONFIG_SHELL=$as_shell
-	       as_have_required=yes
-	       if { "$as_shell" 2> /dev/null <<\_ASEOF
-if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
-  emulate sh
-  NULLCMD=:
-  # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
-  # is contrary to our usage.  Disable this feature.
-  alias -g '${1+"$@"}'='"$@"'
-  setopt NO_GLOB_SUBST
-else
-  case `(set -o) 2>/dev/null` in
-  *posix*) set -o posix ;;
-esac
-
-fi
-
-
-:
-(as_func_return () {
-  (exit $1)
-}
-as_func_success () {
-  as_func_return 0
-}
-as_func_failure () {
-  as_func_return 1
-}
-as_func_ret_success () {
-  return 0
-}
-as_func_ret_failure () {
-  return 1
-}
-
-exitcode=0
-if as_func_success; then
-  :
-else
-  exitcode=1
-  echo as_func_success failed.
-fi
-
-if as_func_failure; then
-  exitcode=1
-  echo as_func_failure succeeded.
-fi
-
-if as_func_ret_success; then
-  :
-else
-  exitcode=1
-  echo as_func_ret_success failed.
-fi
-
-if as_func_ret_failure; then
-  exitcode=1
-  echo as_func_ret_failure succeeded.
-fi
-
-if ( set x; as_func_ret_success y && test x = "$1" ); then
-  :
-else
-  exitcode=1
-  echo positional parameters were not saved.
-fi
-
-test $exitcode = 0) || { (exit 1); exit 1; }
-
-(
-  as_lineno_1=$LINENO
-  as_lineno_2=$LINENO
-  test "x$as_lineno_1" != "x$as_lineno_2" &&
-  test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2") || { (exit 1); exit 1; }
-
-_ASEOF
-}; then
-  break
-fi
-
-fi
-
-      done
-
-      if test "x$CONFIG_SHELL" != x; then
-  for as_var in BASH_ENV ENV
-        do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
-        done
-        export CONFIG_SHELL
-        exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"}
-fi
-
-
-    if test $as_have_required = no; then
-  echo This script requires a shell more modern than all the
-      echo shells that I found on your system.  Please install a
-      echo modern shell, or manually run the script under such a
-      echo shell if you do have one.
-      { (exit 1); exit 1; }
-fi
-
-
-fi
-
-fi
-
-
-
-(eval "as_func_return () {
-  (exit \$1)
-}
-as_func_success () {
-  as_func_return 0
-}
-as_func_failure () {
-  as_func_return 1
-}
-as_func_ret_success () {
-  return 0
-}
-as_func_ret_failure () {
-  return 1
-}
-
-exitcode=0
-if as_func_success; then
-  :
-else
-  exitcode=1
-  echo as_func_success failed.
-fi
-
-if as_func_failure; then
-  exitcode=1
-  echo as_func_failure succeeded.
-fi
-
-if as_func_ret_success; then
-  :
-else
-  exitcode=1
-  echo as_func_ret_success failed.
-fi
-
-if as_func_ret_failure; then
-  exitcode=1
-  echo as_func_ret_failure succeeded.
-fi
-
-if ( set x; as_func_ret_success y && test x = \"\$1\" ); then
-  :
-else
-  exitcode=1
-  echo positional parameters were not saved.
-fi
-
-test \$exitcode = 0") || {
-  echo No shell found that supports shell functions.
-  echo Please tell autoconf@gnu.org about your system,
-  echo including any error possibly output before this
-  echo message
-}
-
-
-
-  as_lineno_1=$LINENO
-  as_lineno_2=$LINENO
-  test "x$as_lineno_1" != "x$as_lineno_2" &&
-  test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || {
-
-  # Create $as_me.lineno as a copy of $as_myself, but with $LINENO
-  # uniformly replaced by the line number.  The first 'sed' inserts a
-  # line-number line after each line using $LINENO; the second 'sed'
-  # does the real work.  The second script uses 'N' to pair each
-  # line-number line with the line containing $LINENO, and appends
-  # trailing '-' during substitution so that $LINENO is not a special
-  # case at line end.
-  # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the
-  # scripts with optimization help from Paolo Bonzini.  Blame Lee
-  # E. McMahon (1931-1989) for sed's syntax.  :-)
-  sed -n '
-    p
-    /[$]LINENO/=
-  ' <$as_myself |
-    sed '
-      s/[$]LINENO.*/&-/
-      t lineno
-      b
-      :lineno
-      N
-      :loop
-      s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
-      t loop
-      s/-\n.*//
-    ' >$as_me.lineno &&
-  chmod +x "$as_me.lineno" ||
-    { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2
-   { (exit 1); exit 1; }; }
-
-  # Don't try to exec as it changes $[0], causing all sort of problems
-  # (the dirname of $[0] is not the place where we might find the
-  # original and so on.  Autoconf is especially sensitive to this).
-  . "./$as_me.lineno"
-  # Exit status is that of the last command.
-  exit
-}
-
-
-if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
-  as_dirname=dirname
-else
-  as_dirname=false
-fi
-
-ECHO_C= ECHO_N= ECHO_T=
-case `echo -n x` in
--n*)
-  case `echo 'x\c'` in
-  *c*) ECHO_T='	';;	# ECHO_T is single tab character.
-  *)   ECHO_C='\c';;
-  esac;;
-*)
-  ECHO_N='-n';;
-esac
-
-if expr a : '\(a\)' >/dev/null 2>&1 &&
-   test "X`expr 00001 : '.*\(...\)'`" = X001; then
-  as_expr=expr
-else
-  as_expr=false
-fi
-
-rm -f conf$$ conf$$.exe conf$$.file
-if test -d conf$$.dir; then
-  rm -f conf$$.dir/conf$$.file
-else
-  rm -f conf$$.dir
-  mkdir conf$$.dir
-fi
-echo >conf$$.file
-if ln -s conf$$.file conf$$ 2>/dev/null; then
-  as_ln_s='ln -s'
-  # ... but there are two gotchas:
-  # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
-  # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
-  # In both cases, we have to default to `cp -p'.
-  ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
-    as_ln_s='cp -p'
-elif ln conf$$.file conf$$ 2>/dev/null; then
-  as_ln_s=ln
-else
-  as_ln_s='cp -p'
-fi
-rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
-rmdir conf$$.dir 2>/dev/null
-
-if mkdir -p . 2>/dev/null; then
-  as_mkdir_p=:
-else
-  test -d ./-p && rmdir ./-p
-  as_mkdir_p=false
-fi
-
-if test -x / >/dev/null 2>&1; then
-  as_test_x='test -x'
-else
-  if ls -dL / >/dev/null 2>&1; then
-    as_ls_L_option=L
-  else
-    as_ls_L_option=
-  fi
-  as_test_x='
-    eval sh -c '\''
-      if test -d "$1"; then
-        test -d "$1/.";
-      else
-	case $1 in
-        -*)set "./$1";;
-	esac;
-	case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in
-	???[sx]*):;;*)false;;esac;fi
-    '\'' sh
-  '
-fi
-as_executable_p=$as_test_x
-
-# Sed expression to map a string onto a valid CPP name.
-as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
-
-# Sed expression to map a string onto a valid variable name.
-as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
-
-
-
-exec 7<&0 </dev/null 6>&1
-
-# Name of the host.
-# hostname on some systems (SVR3.2, Linux) returns a bogus exit status,
-# so uname gets run too.
-ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
-
-#
-# Initializations.
-#
-ac_default_prefix=/usr/local
-ac_clean_files=
-ac_config_libobj_dir=.
-LIBOBJS=
-cross_compiling=no
-subdirs=
-MFLAGS=
-MAKEFLAGS=
-SHELL=${CONFIG_SHELL-/bin/sh}
-
-# Identity of this package.
-PACKAGE_NAME='task-controller'
-PACKAGE_TARNAME='task-controller'
-PACKAGE_VERSION='0.1'
-PACKAGE_STRING='task-controller 0.1'
-PACKAGE_BUGREPORT=''
-
-ac_default_prefix=.
-ac_unique_file="task-controller.h"
-# Factoring default headers for most tests.
-ac_includes_default="\
-#include <stdio.h>
-#ifdef HAVE_SYS_TYPES_H
-# include <sys/types.h>
-#endif
-#ifdef HAVE_SYS_STAT_H
-# include <sys/stat.h>
-#endif
-#ifdef STDC_HEADERS
-# include <stdlib.h>
-# include <stddef.h>
-#else
-# ifdef HAVE_STDLIB_H
-#  include <stdlib.h>
-# endif
-#endif
-#ifdef HAVE_STRING_H
-# if !defined STDC_HEADERS && defined HAVE_MEMORY_H
-#  include <memory.h>
-# endif
-# include <string.h>
-#endif
-#ifdef HAVE_STRINGS_H
-# include <strings.h>
-#endif
-#ifdef HAVE_INTTYPES_H
-# include <inttypes.h>
-#endif
-#ifdef HAVE_STDINT_H
-# include <stdint.h>
-#endif
-#ifdef HAVE_UNISTD_H
-# include <unistd.h>
-#endif"
-
-ac_subst_vars='SHELL
-PATH_SEPARATOR
-PACKAGE_NAME
-PACKAGE_TARNAME
-PACKAGE_VERSION
-PACKAGE_STRING
-PACKAGE_BUGREPORT
-exec_prefix
-prefix
-program_transform_name
-bindir
-sbindir
-libexecdir
-datarootdir
-datadir
-sysconfdir
-sharedstatedir
-localstatedir
-includedir
-oldincludedir
-docdir
-infodir
-htmldir
-dvidir
-pdfdir
-psdir
-libdir
-localedir
-mandir
-DEFS
-ECHO_C
-ECHO_N
-ECHO_T
-LIBS
-build_alias
-host_alias
-target_alias
-CC
-CFLAGS
-LDFLAGS
-CPPFLAGS
-ac_ct_CC
-EXEEXT
-OBJEXT
-CPP
-GREP
-EGREP
-LIBOBJS
-LTLIBOBJS'
-ac_subst_files=''
-      ac_precious_vars='build_alias
-host_alias
-target_alias
-CC
-CFLAGS
-LDFLAGS
-LIBS
-CPPFLAGS
-CPP'
-
-
-# Initialize some variables set by options.
-ac_init_help=
-ac_init_version=false
-# The variables have the same names as the options, with
-# dashes changed to underlines.
-cache_file=/dev/null
-exec_prefix=NONE
-no_create=
-no_recursion=
-prefix=NONE
-program_prefix=NONE
-program_suffix=NONE
-program_transform_name=s,x,x,
-silent=
-site=
-srcdir=
-verbose=
-x_includes=NONE
-x_libraries=NONE
-
-# Installation directory options.
-# These are left unexpanded so users can "make install exec_prefix=/foo"
-# and all the variables that are supposed to be based on exec_prefix
-# by default will actually change.
-# Use braces instead of parens because sh, perl, etc. also accept them.
-# (The list follows the same order as the GNU Coding Standards.)
-bindir='${exec_prefix}/bin'
-sbindir='${exec_prefix}/sbin'
-libexecdir='${exec_prefix}/libexec'
-datarootdir='${prefix}/share'
-datadir='${datarootdir}'
-sysconfdir='${prefix}/etc'
-sharedstatedir='${prefix}/com'
-localstatedir='${prefix}/var'
-includedir='${prefix}/include'
-oldincludedir='/usr/include'
-docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
-infodir='${datarootdir}/info'
-htmldir='${docdir}'
-dvidir='${docdir}'
-pdfdir='${docdir}'
-psdir='${docdir}'
-libdir='${exec_prefix}/lib'
-localedir='${datarootdir}/locale'
-mandir='${datarootdir}/man'
-
-ac_prev=
-ac_dashdash=
-for ac_option
-do
-  # If the previous option needs an argument, assign it.
-  if test -n "$ac_prev"; then
-    eval $ac_prev=\$ac_option
-    ac_prev=
-    continue
-  fi
-
-  case $ac_option in
-  *=*)	ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
-  *)	ac_optarg=yes ;;
-  esac
-
-  # Accept the important Cygnus configure options, so we can diagnose typos.
-
-  case $ac_dashdash$ac_option in
-  --)
-    ac_dashdash=yes ;;
-
-  -bindir | --bindir | --bindi | --bind | --bin | --bi)
-    ac_prev=bindir ;;
-  -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
-    bindir=$ac_optarg ;;
-
-  -build | --build | --buil | --bui | --bu)
-    ac_prev=build_alias ;;
-  -build=* | --build=* | --buil=* | --bui=* | --bu=*)
-    build_alias=$ac_optarg ;;
-
-  -cache-file | --cache-file | --cache-fil | --cache-fi \
-  | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c)
-    ac_prev=cache_file ;;
-  -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \
-  | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*)
-    cache_file=$ac_optarg ;;
-
-  --config-cache | -C)
-    cache_file=config.cache ;;
-
-  -datadir | --datadir | --datadi | --datad)
-    ac_prev=datadir ;;
-  -datadir=* | --datadir=* | --datadi=* | --datad=*)
-    datadir=$ac_optarg ;;
-
-  -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \
-  | --dataroo | --dataro | --datar)
-    ac_prev=datarootdir ;;
-  -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \
-  | --dataroot=* | --dataroo=* | --dataro=* | --datar=*)
-    datarootdir=$ac_optarg ;;
-
-  -disable-* | --disable-*)
-    ac_feature=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
-    # Reject names that are not valid shell variable names.
-    expr "x$ac_feature" : ".*[^-._$as_cr_alnum]" >/dev/null &&
-      { echo "$as_me: error: invalid feature name: $ac_feature" >&2
-   { (exit 1); exit 1; }; }
-    ac_feature=`echo $ac_feature | sed 's/[-.]/_/g'`
-    eval enable_$ac_feature=no ;;
-
-  -docdir | --docdir | --docdi | --doc | --do)
-    ac_prev=docdir ;;
-  -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*)
-    docdir=$ac_optarg ;;
-
-  -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv)
-    ac_prev=dvidir ;;
-  -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*)
-    dvidir=$ac_optarg ;;
-
-  -enable-* | --enable-*)
-    ac_feature=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
-    # Reject names that are not valid shell variable names.
-    expr "x$ac_feature" : ".*[^-._$as_cr_alnum]" >/dev/null &&
-      { echo "$as_me: error: invalid feature name: $ac_feature" >&2
-   { (exit 1); exit 1; }; }
-    ac_feature=`echo $ac_feature | sed 's/[-.]/_/g'`
-    eval enable_$ac_feature=\$ac_optarg ;;
-
-  -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
-  | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
-  | --exec | --exe | --ex)
-    ac_prev=exec_prefix ;;
-  -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
-  | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
-  | --exec=* | --exe=* | --ex=*)
-    exec_prefix=$ac_optarg ;;
-
-  -gas | --gas | --ga | --g)
-    # Obsolete; use --with-gas.
-    with_gas=yes ;;
-
-  -help | --help | --hel | --he | -h)
-    ac_init_help=long ;;
-  -help=r* | --help=r* | --hel=r* | --he=r* | -hr*)
-    ac_init_help=recursive ;;
-  -help=s* | --help=s* | --hel=s* | --he=s* | -hs*)
-    ac_init_help=short ;;
-
-  -host | --host | --hos | --ho)
-    ac_prev=host_alias ;;
-  -host=* | --host=* | --hos=* | --ho=*)
-    host_alias=$ac_optarg ;;
-
-  -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht)
-    ac_prev=htmldir ;;
-  -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \
-  | --ht=*)
-    htmldir=$ac_optarg ;;
-
-  -includedir | --includedir | --includedi | --included | --include \
-  | --includ | --inclu | --incl | --inc)
-    ac_prev=includedir ;;
-  -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
-  | --includ=* | --inclu=* | --incl=* | --inc=*)
-    includedir=$ac_optarg ;;
-
-  -infodir | --infodir | --infodi | --infod | --info | --inf)
-    ac_prev=infodir ;;
-  -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
-    infodir=$ac_optarg ;;
-
-  -libdir | --libdir | --libdi | --libd)
-    ac_prev=libdir ;;
-  -libdir=* | --libdir=* | --libdi=* | --libd=*)
-    libdir=$ac_optarg ;;
-
-  -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
-  | --libexe | --libex | --libe)
-    ac_prev=libexecdir ;;
-  -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
-  | --libexe=* | --libex=* | --libe=*)
-    libexecdir=$ac_optarg ;;
-
-  -localedir | --localedir | --localedi | --localed | --locale)
-    ac_prev=localedir ;;
-  -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*)
-    localedir=$ac_optarg ;;
-
-  -localstatedir | --localstatedir | --localstatedi | --localstated \
-  | --localstate | --localstat | --localsta | --localst | --locals)
-    ac_prev=localstatedir ;;
-  -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
-  | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*)
-    localstatedir=$ac_optarg ;;
-
-  -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
-    ac_prev=mandir ;;
-  -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
-    mandir=$ac_optarg ;;
-
-  -nfp | --nfp | --nf)
-    # Obsolete; use --without-fp.
-    with_fp=no ;;
-
-  -no-create | --no-create | --no-creat | --no-crea | --no-cre \
-  | --no-cr | --no-c | -n)
-    no_create=yes ;;
-
-  -no-recursion | --no-recursion | --no-recursio | --no-recursi \
-  | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r)
-    no_recursion=yes ;;
-
-  -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \
-  | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \
-  | --oldin | --oldi | --old | --ol | --o)
-    ac_prev=oldincludedir ;;
-  -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \
-  | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \
-  | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*)
-    oldincludedir=$ac_optarg ;;
-
-  -prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
-    ac_prev=prefix ;;
-  -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
-    prefix=$ac_optarg ;;
-
-  -program-prefix | --program-prefix | --program-prefi | --program-pref \
-  | --program-pre | --program-pr | --program-p)
-    ac_prev=program_prefix ;;
-  -program-prefix=* | --program-prefix=* | --program-prefi=* \
-  | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*)
-    program_prefix=$ac_optarg ;;
-
-  -program-suffix | --program-suffix | --program-suffi | --program-suff \
-  | --program-suf | --program-su | --program-s)
-    ac_prev=program_suffix ;;
-  -program-suffix=* | --program-suffix=* | --program-suffi=* \
-  | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*)
-    program_suffix=$ac_optarg ;;
-
-  -program-transform-name | --program-transform-name \
-  | --program-transform-nam | --program-transform-na \
-  | --program-transform-n | --program-transform- \
-  | --program-transform | --program-transfor \
-  | --program-transfo | --program-transf \
-  | --program-trans | --program-tran \
-  | --progr-tra | --program-tr | --program-t)
-    ac_prev=program_transform_name ;;
-  -program-transform-name=* | --program-transform-name=* \
-  | --program-transform-nam=* | --program-transform-na=* \
-  | --program-transform-n=* | --program-transform-=* \
-  | --program-transform=* | --program-transfor=* \
-  | --program-transfo=* | --program-transf=* \
-  | --program-trans=* | --program-tran=* \
-  | --progr-tra=* | --program-tr=* | --program-t=*)
-    program_transform_name=$ac_optarg ;;
-
-  -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd)
-    ac_prev=pdfdir ;;
-  -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*)
-    pdfdir=$ac_optarg ;;
-
-  -psdir | --psdir | --psdi | --psd | --ps)
-    ac_prev=psdir ;;
-  -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*)
-    psdir=$ac_optarg ;;
-
-  -q | -quiet | --quiet | --quie | --qui | --qu | --q \
-  | -silent | --silent | --silen | --sile | --sil)
-    silent=yes ;;
-
-  -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
-    ac_prev=sbindir ;;
-  -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
-  | --sbi=* | --sb=*)
-    sbindir=$ac_optarg ;;
-
-  -sharedstatedir | --sharedstatedir | --sharedstatedi \
-  | --sharedstated | --sharedstate | --sharedstat | --sharedsta \
-  | --sharedst | --shareds | --shared | --share | --shar \
-  | --sha | --sh)
-    ac_prev=sharedstatedir ;;
-  -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
-  | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
-  | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
-  | --sha=* | --sh=*)
-    sharedstatedir=$ac_optarg ;;
-
-  -site | --site | --sit)
-    ac_prev=site ;;
-  -site=* | --site=* | --sit=*)
-    site=$ac_optarg ;;
-
-  -srcdir | --srcdir | --srcdi | --srcd | --src | --sr)
-    ac_prev=srcdir ;;
-  -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*)
-    srcdir=$ac_optarg ;;
-
-  -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
-  | --syscon | --sysco | --sysc | --sys | --sy)
-    ac_prev=sysconfdir ;;
-  -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
-  | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
-    sysconfdir=$ac_optarg ;;
-
-  -target | --target | --targe | --targ | --tar | --ta | --t)
-    ac_prev=target_alias ;;
-  -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
-    target_alias=$ac_optarg ;;
-
-  -v | -verbose | --verbose | --verbos | --verbo | --verb)
-    verbose=yes ;;
-
-  -version | --version | --versio | --versi | --vers | -V)
-    ac_init_version=: ;;
-
-  -with-* | --with-*)
-    ac_package=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
-    # Reject names that are not valid shell variable names.
-    expr "x$ac_package" : ".*[^-._$as_cr_alnum]" >/dev/null &&
-      { echo "$as_me: error: invalid package name: $ac_package" >&2
-   { (exit 1); exit 1; }; }
-    ac_package=`echo $ac_package | sed 's/[-.]/_/g'`
-    eval with_$ac_package=\$ac_optarg ;;
-
-  -without-* | --without-*)
-    ac_package=`expr "x$ac_option" : 'x-*without-\(.*\)'`
-    # Reject names that are not valid shell variable names.
-    expr "x$ac_package" : ".*[^-._$as_cr_alnum]" >/dev/null &&
-      { echo "$as_me: error: invalid package name: $ac_package" >&2
-   { (exit 1); exit 1; }; }
-    ac_package=`echo $ac_package | sed 's/[-.]/_/g'`
-    eval with_$ac_package=no ;;
-
-  --x)
-    # Obsolete; use --with-x.
-    with_x=yes ;;
-
-  -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \
-  | --x-incl | --x-inc | --x-in | --x-i)
-    ac_prev=x_includes ;;
-  -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \
-  | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*)
-    x_includes=$ac_optarg ;;
-
-  -x-libraries | --x-libraries | --x-librarie | --x-librari \
-  | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l)
-    ac_prev=x_libraries ;;
-  -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \
-  | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
-    x_libraries=$ac_optarg ;;
-
-  -*) { echo "$as_me: error: unrecognized option: $ac_option
-Try \`$0 --help' for more information." >&2
-   { (exit 1); exit 1; }; }
-    ;;
-
-  *=*)
-    ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='`
-    # Reject names that are not valid shell variable names.
-    expr "x$ac_envvar" : ".*[^_$as_cr_alnum]" >/dev/null &&
-      { echo "$as_me: error: invalid variable name: $ac_envvar" >&2
-   { (exit 1); exit 1; }; }
-    eval $ac_envvar=\$ac_optarg
-    export $ac_envvar ;;
-
-  *)
-    # FIXME: should be removed in autoconf 3.0.
-    echo "$as_me: WARNING: you should use --build, --host, --target" >&2
-    expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
-      echo "$as_me: WARNING: invalid host type: $ac_option" >&2
-    : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}
-    ;;
-
-  esac
-done
-
-if test -n "$ac_prev"; then
-  ac_option=--`echo $ac_prev | sed 's/_/-/g'`
-  { echo "$as_me: error: missing argument to $ac_option" >&2
-   { (exit 1); exit 1; }; }
-fi
-
-# Be sure to have absolute directory names.
-for ac_var in	exec_prefix prefix bindir sbindir libexecdir datarootdir \
-		datadir sysconfdir sharedstatedir localstatedir includedir \
-		oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
-		libdir localedir mandir
-do
-  eval ac_val=\$$ac_var
-  case $ac_val in
-    [\\/$]* | ?:[\\/]* )  continue;;
-    NONE | '' ) case $ac_var in *prefix ) continue;; esac;;
-  esac
-  { echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2
-   { (exit 1); exit 1; }; }
-done
-
-# There might be people who depend on the old broken behavior: `$host'
-# used to hold the argument of --host etc.
-# FIXME: To remove some day.
-build=$build_alias
-host=$host_alias
-target=$target_alias
-
-# FIXME: To remove some day.
-if test "x$host_alias" != x; then
-  if test "x$build_alias" = x; then
-    cross_compiling=maybe
-    echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host.
-    If a cross compiler is detected then cross compile mode will be used." >&2
-  elif test "x$build_alias" != "x$host_alias"; then
-    cross_compiling=yes
-  fi
-fi
-
-ac_tool_prefix=
-test -n "$host_alias" && ac_tool_prefix=$host_alias-
-
-test "$silent" = yes && exec 6>/dev/null
-
-
-ac_pwd=`pwd` && test -n "$ac_pwd" &&
-ac_ls_di=`ls -di .` &&
-ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` ||
-  { echo "$as_me: error: Working directory cannot be determined" >&2
-   { (exit 1); exit 1; }; }
-test "X$ac_ls_di" = "X$ac_pwd_ls_di" ||
-  { echo "$as_me: error: pwd does not report name of working directory" >&2
-   { (exit 1); exit 1; }; }
-
-
-# Find the source files, if location was not specified.
-if test -z "$srcdir"; then
-  ac_srcdir_defaulted=yes
-  # Try the directory containing this script, then the parent directory.
-  ac_confdir=`$as_dirname -- "$0" ||
-$as_expr X"$0" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
-	 X"$0" : 'X\(//\)[^/]' \| \
-	 X"$0" : 'X\(//\)$' \| \
-	 X"$0" : 'X\(/\)' \| . 2>/dev/null ||
-echo X"$0" |
-    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)[^/].*/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\).*/{
-	    s//\1/
-	    q
-	  }
-	  s/.*/./; q'`
-  srcdir=$ac_confdir
-  if test ! -r "$srcdir/$ac_unique_file"; then
-    srcdir=..
-  fi
-else
-  ac_srcdir_defaulted=no
-fi
-if test ! -r "$srcdir/$ac_unique_file"; then
-  test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .."
-  { echo "$as_me: error: cannot find sources ($ac_unique_file) in $srcdir" >&2
-   { (exit 1); exit 1; }; }
-fi
-ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work"
-ac_abs_confdir=`(
-	cd "$srcdir" && test -r "./$ac_unique_file" || { echo "$as_me: error: $ac_msg" >&2
-   { (exit 1); exit 1; }; }
-	pwd)`
-# When building in place, set srcdir=.
-if test "$ac_abs_confdir" = "$ac_pwd"; then
-  srcdir=.
-fi
-# Remove unnecessary trailing slashes from srcdir.
-# Double slashes in file names in object file debugging info
-# mess up M-x gdb in Emacs.
-case $srcdir in
-*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;;
-esac
-for ac_var in $ac_precious_vars; do
-  eval ac_env_${ac_var}_set=\${${ac_var}+set}
-  eval ac_env_${ac_var}_value=\$${ac_var}
-  eval ac_cv_env_${ac_var}_set=\${${ac_var}+set}
-  eval ac_cv_env_${ac_var}_value=\$${ac_var}
-done
-
-#
-# Report the --help message.
-#
-if test "$ac_init_help" = "long"; then
-  # Omit some internal or obsolete options to make the list less imposing.
-  # This message is too long to be a string in the A/UX 3.1 sh.
-  cat <<_ACEOF
-\`configure' configures task-controller 0.1 to adapt to many kinds of systems.
-
-Usage: $0 [OPTION]... [VAR=VALUE]...
-
-To assign environment variables (e.g., CC, CFLAGS...), specify them as
-VAR=VALUE.  See below for descriptions of some of the useful variables.
-
-Defaults for the options are specified in brackets.
-
-Configuration:
-  -h, --help              display this help and exit
-      --help=short        display options specific to this package
-      --help=recursive    display the short help of all the included packages
-  -V, --version           display version information and exit
-  -q, --quiet, --silent   do not print \`checking...' messages
-      --cache-file=FILE   cache test results in FILE [disabled]
-  -C, --config-cache      alias for \`--cache-file=config.cache'
-  -n, --no-create         do not create output files
-      --srcdir=DIR        find the sources in DIR [configure dir or \`..']
-
-Installation directories:
-  --prefix=PREFIX         install architecture-independent files in PREFIX
-			  [$ac_default_prefix]
-  --exec-prefix=EPREFIX   install architecture-dependent files in EPREFIX
-			  [PREFIX]
-
-By default, \`make install' will install all the files in
-\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc.  You can specify
-an installation prefix other than \`$ac_default_prefix' using \`--prefix',
-for instance \`--prefix=\$HOME'.
-
-For better control, use the options below.
-
-Fine tuning of the installation directories:
-  --bindir=DIR           user executables [EPREFIX/bin]
-  --sbindir=DIR          system admin executables [EPREFIX/sbin]
-  --libexecdir=DIR       program executables [EPREFIX/libexec]
-  --sysconfdir=DIR       read-only single-machine data [PREFIX/etc]
-  --sharedstatedir=DIR   modifiable architecture-independent data [PREFIX/com]
-  --localstatedir=DIR    modifiable single-machine data [PREFIX/var]
-  --libdir=DIR           object code libraries [EPREFIX/lib]
-  --includedir=DIR       C header files [PREFIX/include]
-  --oldincludedir=DIR    C header files for non-gcc [/usr/include]
-  --datarootdir=DIR      read-only arch.-independent data root [PREFIX/share]
-  --datadir=DIR          read-only architecture-independent data [DATAROOTDIR]
-  --infodir=DIR          info documentation [DATAROOTDIR/info]
-  --localedir=DIR        locale-dependent data [DATAROOTDIR/locale]
-  --mandir=DIR           man documentation [DATAROOTDIR/man]
-  --docdir=DIR           documentation root [DATAROOTDIR/doc/task-controller]
-  --htmldir=DIR          html documentation [DOCDIR]
-  --dvidir=DIR           dvi documentation [DOCDIR]
-  --pdfdir=DIR           pdf documentation [DOCDIR]
-  --psdir=DIR            ps documentation [DOCDIR]
-_ACEOF
-
-  cat <<\_ACEOF
-_ACEOF
-fi
-
-if test -n "$ac_init_help"; then
-  case $ac_init_help in
-     short | recursive ) echo "Configuration of task-controller 0.1:";;
-   esac
-  cat <<\_ACEOF
-
-Optional Packages:
-  --with-PACKAGE[=ARG]    use PACKAGE [ARG=yes]
-  --without-PACKAGE       do not use PACKAGE (same as --with-PACKAGE=no)
---with-confdir path to hadoop conf dir
-
-Some influential environment variables:
-  CC          C compiler command
-  CFLAGS      C compiler flags
-  LDFLAGS     linker flags, e.g. -L<lib dir> if you have libraries in a
-              nonstandard directory <lib dir>
-  LIBS        libraries to pass to the linker, e.g. -l<library>
-  CPPFLAGS    C/C++/Objective C preprocessor flags, e.g. -I<include dir> if
-              you have headers in a nonstandard directory <include dir>
-  CPP         C preprocessor
-
-Use these variables to override the choices made by `configure' or to help
-it to find libraries and programs with nonstandard names/locations.
-
-_ACEOF
-ac_status=$?
-fi
-
-if test "$ac_init_help" = "recursive"; then
-  # If there are subdirs, report their specific --help.
-  for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue
-    test -d "$ac_dir" || continue
-    ac_builddir=.
-
-case "$ac_dir" in
-.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
-*)
-  ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'`
-  # A ".." for each directory in $ac_dir_suffix.
-  ac_top_builddir_sub=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,/..,g;s,/,,'`
-  case $ac_top_builddir_sub in
-  "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
-  *)  ac_top_build_prefix=$ac_top_builddir_sub/ ;;
-  esac ;;
-esac
-ac_abs_top_builddir=$ac_pwd
-ac_abs_builddir=$ac_pwd$ac_dir_suffix
-# for backward compatibility:
-ac_top_builddir=$ac_top_build_prefix
-
-case $srcdir in
-  .)  # We are building in place.
-    ac_srcdir=.
-    ac_top_srcdir=$ac_top_builddir_sub
-    ac_abs_top_srcdir=$ac_pwd ;;
-  [\\/]* | ?:[\\/]* )  # Absolute name.
-    ac_srcdir=$srcdir$ac_dir_suffix;
-    ac_top_srcdir=$srcdir
-    ac_abs_top_srcdir=$srcdir ;;
-  *) # Relative name.
-    ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
-    ac_top_srcdir=$ac_top_build_prefix$srcdir
-    ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
-esac
-ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
-
-    cd "$ac_dir" || { ac_status=$?; continue; }
-    # Check for guested configure.
-    if test -f "$ac_srcdir/configure.gnu"; then
-      echo &&
-      $SHELL "$ac_srcdir/configure.gnu" --help=recursive
-    elif test -f "$ac_srcdir/configure"; then
-      echo &&
-      $SHELL "$ac_srcdir/configure" --help=recursive
-    else
-      echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
-    fi || ac_status=$?
-    cd "$ac_pwd" || { ac_status=$?; break; }
-  done
-fi
-
-test -n "$ac_init_help" && exit $ac_status
-if $ac_init_version; then
-  cat <<\_ACEOF
-task-controller configure 0.1
-generated by GNU Autoconf 2.61
-
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
-2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
-This configure script is free software; the Free Software Foundation
-gives unlimited permission to copy, distribute and modify it.
-_ACEOF
-  exit
-fi
-cat >config.log <<_ACEOF
-This file contains any messages produced by compilers while
-running configure, to aid debugging if configure makes a mistake.
-
-It was created by task-controller $as_me 0.1, which was
-generated by GNU Autoconf 2.61.  Invocation command line was
-
-  $ $0 $@
-
-_ACEOF
-exec 5>>config.log
-{
-cat <<_ASUNAME
-## --------- ##
-## Platform. ##
-## --------- ##
-
-hostname = `(hostname || uname -n) 2>/dev/null | sed 1q`
-uname -m = `(uname -m) 2>/dev/null || echo unknown`
-uname -r = `(uname -r) 2>/dev/null || echo unknown`
-uname -s = `(uname -s) 2>/dev/null || echo unknown`
-uname -v = `(uname -v) 2>/dev/null || echo unknown`
-
-/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown`
-/bin/uname -X     = `(/bin/uname -X) 2>/dev/null     || echo unknown`
-
-/bin/arch              = `(/bin/arch) 2>/dev/null              || echo unknown`
-/usr/bin/arch -k       = `(/usr/bin/arch -k) 2>/dev/null       || echo unknown`
-/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown`
-/usr/bin/hostinfo      = `(/usr/bin/hostinfo) 2>/dev/null      || echo unknown`
-/bin/machine           = `(/bin/machine) 2>/dev/null           || echo unknown`
-/usr/bin/oslevel       = `(/usr/bin/oslevel) 2>/dev/null       || echo unknown`
-/bin/universe          = `(/bin/universe) 2>/dev/null          || echo unknown`
-
-_ASUNAME
-
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-  echo "PATH: $as_dir"
-done
-IFS=$as_save_IFS
-
-} >&5
-
-cat >&5 <<_ACEOF
-
-
-## ----------- ##
-## Core tests. ##
-## ----------- ##
-
-_ACEOF
-
-
-# Keep a trace of the command line.
-# Strip out --no-create and --no-recursion so they do not pile up.
-# Strip out --silent because we don't want to record it for future runs.
-# Also quote any args containing shell meta-characters.
-# Make two passes to allow for proper duplicate-argument suppression.
-ac_configure_args=
-ac_configure_args0=
-ac_configure_args1=
-ac_must_keep_next=false
-for ac_pass in 1 2
-do
-  for ac_arg
-  do
-    case $ac_arg in
-    -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;;
-    -q | -quiet | --quiet | --quie | --qui | --qu | --q \
-    | -silent | --silent | --silen | --sile | --sil)
-      continue ;;
-    *\'*)
-      ac_arg=`echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
-    esac
-    case $ac_pass in
-    1) ac_configure_args0="$ac_configure_args0 '$ac_arg'" ;;
-    2)
-      ac_configure_args1="$ac_configure_args1 '$ac_arg'"
-      if test $ac_must_keep_next = true; then
-	ac_must_keep_next=false # Got value, back to normal.
-      else
-	case $ac_arg in
-	  *=* | --config-cache | -C | -disable-* | --disable-* \
-	  | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \
-	  | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \
-	  | -with-* | --with-* | -without-* | --without-* | --x)
-	    case "$ac_configure_args0 " in
-	      "$ac_configure_args1"*" '$ac_arg' "* ) continue ;;
-	    esac
-	    ;;
-	  -* ) ac_must_keep_next=true ;;
-	esac
-      fi
-      ac_configure_args="$ac_configure_args '$ac_arg'"
-      ;;
-    esac
-  done
-done
-$as_unset ac_configure_args0 || test "${ac_configure_args0+set}" != set || { ac_configure_args0=; export ac_configure_args0; }
-$as_unset ac_configure_args1 || test "${ac_configure_args1+set}" != set || { ac_configure_args1=; export ac_configure_args1; }
-
-# When interrupted or exit'd, cleanup temporary files, and complete
-# config.log.  We remove comments because anyway the quotes in there
-# would cause problems or look ugly.
-# WARNING: Use '\'' to represent an apostrophe within the trap.
-# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug.
-trap 'exit_status=$?
-  # Save into config.log some information that might help in debugging.
-  {
-    echo
-
-    cat <<\_ASBOX
-## ---------------- ##
-## Cache variables. ##
-## ---------------- ##
-_ASBOX
-    echo
-    # The following way of writing the cache mishandles newlines in values,
-(
-  for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do
-    eval ac_val=\$$ac_var
-    case $ac_val in #(
-    *${as_nl}*)
-      case $ac_var in #(
-      *_cv_*) { echo "$as_me:$LINENO: WARNING: Cache variable $ac_var contains a newline." >&5
-echo "$as_me: WARNING: Cache variable $ac_var contains a newline." >&2;} ;;
-      esac
-      case $ac_var in #(
-      _ | IFS | as_nl) ;; #(
-      *) $as_unset $ac_var ;;
-      esac ;;
-    esac
-  done
-  (set) 2>&1 |
-    case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #(
-    *${as_nl}ac_space=\ *)
-      sed -n \
-	"s/'\''/'\''\\\\'\'''\''/g;
-	  s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p"
-      ;; #(
-    *)
-      sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
-      ;;
-    esac |
-    sort
-)
-    echo
-
-    cat <<\_ASBOX
-## ----------------- ##
-## Output variables. ##
-## ----------------- ##
-_ASBOX
-    echo
-    for ac_var in $ac_subst_vars
-    do
-      eval ac_val=\$$ac_var
-      case $ac_val in
-      *\'\''*) ac_val=`echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
-      esac
-      echo "$ac_var='\''$ac_val'\''"
-    done | sort
-    echo
-
-    if test -n "$ac_subst_files"; then
-      cat <<\_ASBOX
-## ------------------- ##
-## File substitutions. ##
-## ------------------- ##
-_ASBOX
-      echo
-      for ac_var in $ac_subst_files
-      do
-	eval ac_val=\$$ac_var
-	case $ac_val in
-	*\'\''*) ac_val=`echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
-	esac
-	echo "$ac_var='\''$ac_val'\''"
-      done | sort
-      echo
-    fi
-
-    if test -s confdefs.h; then
-      cat <<\_ASBOX
-## ----------- ##
-## confdefs.h. ##
-## ----------- ##
-_ASBOX
-      echo
-      cat confdefs.h
-      echo
-    fi
-    test "$ac_signal" != 0 &&
-      echo "$as_me: caught signal $ac_signal"
-    echo "$as_me: exit $exit_status"
-  } >&5
-  rm -f core *.core core.conftest.* &&
-    rm -f -r conftest* confdefs* conf$$* $ac_clean_files &&
-    exit $exit_status
-' 0
-for ac_signal in 1 2 13 15; do
-  trap 'ac_signal='$ac_signal'; { (exit 1); exit 1; }' $ac_signal
-done
-ac_signal=0
-
-# confdefs.h avoids OS command line length limits that DEFS can exceed.
-rm -f -r conftest* confdefs.h
-
-# Predefined preprocessor variables.
-
-cat >>confdefs.h <<_ACEOF
-#define PACKAGE_NAME "$PACKAGE_NAME"
-_ACEOF
-
-
-cat >>confdefs.h <<_ACEOF
-#define PACKAGE_TARNAME "$PACKAGE_TARNAME"
-_ACEOF
-
-
-cat >>confdefs.h <<_ACEOF
-#define PACKAGE_VERSION "$PACKAGE_VERSION"
-_ACEOF
-
-
-cat >>confdefs.h <<_ACEOF
-#define PACKAGE_STRING "$PACKAGE_STRING"
-_ACEOF
-
-
-cat >>confdefs.h <<_ACEOF
-#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT"
-_ACEOF
-
-
-# Let the site file select an alternate cache file if it wants to.
-# Prefer explicitly selected file to automatically selected ones.
-if test -n "$CONFIG_SITE"; then
-  set x "$CONFIG_SITE"
-elif test "x$prefix" != xNONE; then
-  set x "$prefix/share/config.site" "$prefix/etc/config.site"
-else
-  set x "$ac_default_prefix/share/config.site" \
-	"$ac_default_prefix/etc/config.site"
-fi
-shift
-for ac_site_file
-do
-  if test -r "$ac_site_file"; then
-    { echo "$as_me:$LINENO: loading site script $ac_site_file" >&5
-echo "$as_me: loading site script $ac_site_file" >&6;}
-    sed 's/^/| /' "$ac_site_file" >&5
-    . "$ac_site_file"
-  fi
-done
-
-if test -r "$cache_file"; then
-  # Some versions of bash will fail to source /dev/null (special
-  # files actually), so we avoid doing that.
-  if test -f "$cache_file"; then
-    { echo "$as_me:$LINENO: loading cache $cache_file" >&5
-echo "$as_me: loading cache $cache_file" >&6;}
-    case $cache_file in
-      [\\/]* | ?:[\\/]* ) . "$cache_file";;
-      *)                      . "./$cache_file";;
-    esac
-  fi
-else
-  { echo "$as_me:$LINENO: creating cache $cache_file" >&5
-echo "$as_me: creating cache $cache_file" >&6;}
-  >$cache_file
-fi
-
-# Check that the precious variables saved in the cache have kept the same
-# value.
-ac_cache_corrupted=false
-for ac_var in $ac_precious_vars; do
-  eval ac_old_set=\$ac_cv_env_${ac_var}_set
-  eval ac_new_set=\$ac_env_${ac_var}_set
-  eval ac_old_val=\$ac_cv_env_${ac_var}_value
-  eval ac_new_val=\$ac_env_${ac_var}_value
-  case $ac_old_set,$ac_new_set in
-    set,)
-      { echo "$as_me:$LINENO: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
-echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
-      ac_cache_corrupted=: ;;
-    ,set)
-      { echo "$as_me:$LINENO: error: \`$ac_var' was not set in the previous run" >&5
-echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
-      ac_cache_corrupted=: ;;
-    ,);;
-    *)
-      if test "x$ac_old_val" != "x$ac_new_val"; then
-	{ echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5
-echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
-	{ echo "$as_me:$LINENO:   former value:  $ac_old_val" >&5
-echo "$as_me:   former value:  $ac_old_val" >&2;}
-	{ echo "$as_me:$LINENO:   current value: $ac_new_val" >&5
-echo "$as_me:   current value: $ac_new_val" >&2;}
-	ac_cache_corrupted=:
-      fi;;
-  esac
-  # Pass precious variables to config.status.
-  if test "$ac_new_set" = set; then
-    case $ac_new_val in
-    *\'*) ac_arg=$ac_var=`echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
-    *) ac_arg=$ac_var=$ac_new_val ;;
-    esac
-    case " $ac_configure_args " in
-      *" '$ac_arg' "*) ;; # Avoid dups.  Use of quotes ensures accuracy.
-      *) ac_configure_args="$ac_configure_args '$ac_arg'" ;;
-    esac
-  fi
-done
-if $ac_cache_corrupted; then
-  { echo "$as_me:$LINENO: error: changes in the environment can compromise the build" >&5
-echo "$as_me: error: changes in the environment can compromise the build" >&2;}
-  { { echo "$as_me:$LINENO: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&5
-echo "$as_me: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&2;}
-   { (exit 1); exit 1; }; }
-fi
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-
-
-
-#changing default prefix value to empty string, so that binary does not
-#gets installed within system
-
-
-#add new argument called -with-confdir
-
-# Check whether --with-confdir was given.
-if test "${with_confdir+set}" = set; then
-  withval=$with_confdir;
-fi
-
-
-ac_config_headers="$ac_config_headers configuration.h"
-
-
-# Checks for programs.
-ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-if test -n "$ac_tool_prefix"; then
-  # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
-set dummy ${ac_tool_prefix}gcc; ac_word=$2
-{ echo "$as_me:$LINENO: checking for $ac_word" >&5
-echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
-if test "${ac_cv_prog_CC+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  if test -n "$CC"; then
-  ac_cv_prog_CC="$CC" # Let the user override the test.
-else
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-  for ac_exec_ext in '' $ac_executable_extensions; do
-  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
-    ac_cv_prog_CC="${ac_tool_prefix}gcc"
-    echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-done
-IFS=$as_save_IFS
-
-fi
-fi
-CC=$ac_cv_prog_CC
-if test -n "$CC"; then
-  { echo "$as_me:$LINENO: result: $CC" >&5
-echo "${ECHO_T}$CC" >&6; }
-else
-  { echo "$as_me:$LINENO: result: no" >&5
-echo "${ECHO_T}no" >&6; }
-fi
-
-
-fi
-if test -z "$ac_cv_prog_CC"; then
-  ac_ct_CC=$CC
-  # Extract the first word of "gcc", so it can be a program name with args.
-set dummy gcc; ac_word=$2
-{ echo "$as_me:$LINENO: checking for $ac_word" >&5
-echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
-if test "${ac_cv_prog_ac_ct_CC+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  if test -n "$ac_ct_CC"; then
-  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
-else
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-  for ac_exec_ext in '' $ac_executable_extensions; do
-  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
-    ac_cv_prog_ac_ct_CC="gcc"
-    echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-done
-IFS=$as_save_IFS
-
-fi
-fi
-ac_ct_CC=$ac_cv_prog_ac_ct_CC
-if test -n "$ac_ct_CC"; then
-  { echo "$as_me:$LINENO: result: $ac_ct_CC" >&5
-echo "${ECHO_T}$ac_ct_CC" >&6; }
-else
-  { echo "$as_me:$LINENO: result: no" >&5
-echo "${ECHO_T}no" >&6; }
-fi
-
-  if test "x$ac_ct_CC" = x; then
-    CC=""
-  else
-    case $cross_compiling:$ac_tool_warned in
-yes:)
-{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools
-whose name does not start with the host triplet.  If you think this
-configuration is useful to you, please write to autoconf@gnu.org." >&5
-echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools
-whose name does not start with the host triplet.  If you think this
-configuration is useful to you, please write to autoconf@gnu.org." >&2;}
-ac_tool_warned=yes ;;
-esac
-    CC=$ac_ct_CC
-  fi
-else
-  CC="$ac_cv_prog_CC"
-fi
-
-if test -z "$CC"; then
-          if test -n "$ac_tool_prefix"; then
-    # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
-set dummy ${ac_tool_prefix}cc; ac_word=$2
-{ echo "$as_me:$LINENO: checking for $ac_word" >&5
-echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
-if test "${ac_cv_prog_CC+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  if test -n "$CC"; then
-  ac_cv_prog_CC="$CC" # Let the user override the test.
-else
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-  for ac_exec_ext in '' $ac_executable_extensions; do
-  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
-    ac_cv_prog_CC="${ac_tool_prefix}cc"
-    echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-done
-IFS=$as_save_IFS
-
-fi
-fi
-CC=$ac_cv_prog_CC
-if test -n "$CC"; then
-  { echo "$as_me:$LINENO: result: $CC" >&5
-echo "${ECHO_T}$CC" >&6; }
-else
-  { echo "$as_me:$LINENO: result: no" >&5
-echo "${ECHO_T}no" >&6; }
-fi
-
-
-  fi
-fi
-if test -z "$CC"; then
-  # Extract the first word of "cc", so it can be a program name with args.
-set dummy cc; ac_word=$2
-{ echo "$as_me:$LINENO: checking for $ac_word" >&5
-echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
-if test "${ac_cv_prog_CC+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  if test -n "$CC"; then
-  ac_cv_prog_CC="$CC" # Let the user override the test.
-else
-  ac_prog_rejected=no
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-  for ac_exec_ext in '' $ac_executable_extensions; do
-  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
-    if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
-       ac_prog_rejected=yes
-       continue
-     fi
-    ac_cv_prog_CC="cc"
-    echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-done
-IFS=$as_save_IFS
-
-if test $ac_prog_rejected = yes; then
-  # We found a bogon in the path, so make sure we never use it.
-  set dummy $ac_cv_prog_CC
-  shift
-  if test $# != 0; then
-    # We chose a different compiler from the bogus one.
-    # However, it has the same basename, so the bogon will be chosen
-    # first if we set CC to just the basename; use the full file name.
-    shift
-    ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@"
-  fi
-fi
-fi
-fi
-CC=$ac_cv_prog_CC
-if test -n "$CC"; then
-  { echo "$as_me:$LINENO: result: $CC" >&5
-echo "${ECHO_T}$CC" >&6; }
-else
-  { echo "$as_me:$LINENO: result: no" >&5
-echo "${ECHO_T}no" >&6; }
-fi
-
-
-fi
-if test -z "$CC"; then
-  if test -n "$ac_tool_prefix"; then
-  for ac_prog in cl.exe
-  do
-    # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
-set dummy $ac_tool_prefix$ac_prog; ac_word=$2
-{ echo "$as_me:$LINENO: checking for $ac_word" >&5
-echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
-if test "${ac_cv_prog_CC+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  if test -n "$CC"; then
-  ac_cv_prog_CC="$CC" # Let the user override the test.
-else
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-  for ac_exec_ext in '' $ac_executable_extensions; do
-  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
-    ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
-    echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-done
-IFS=$as_save_IFS
-
-fi
-fi
-CC=$ac_cv_prog_CC
-if test -n "$CC"; then
-  { echo "$as_me:$LINENO: result: $CC" >&5
-echo "${ECHO_T}$CC" >&6; }
-else
-  { echo "$as_me:$LINENO: result: no" >&5
-echo "${ECHO_T}no" >&6; }
-fi
-
-
-    test -n "$CC" && break
-  done
-fi
-if test -z "$CC"; then
-  ac_ct_CC=$CC
-  for ac_prog in cl.exe
-do
-  # Extract the first word of "$ac_prog", so it can be a program name with args.
-set dummy $ac_prog; ac_word=$2
-{ echo "$as_me:$LINENO: checking for $ac_word" >&5
-echo $ECHO_N "checking for $ac_word... $ECHO_C" >&6; }
-if test "${ac_cv_prog_ac_ct_CC+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  if test -n "$ac_ct_CC"; then
-  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
-else
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-  for ac_exec_ext in '' $ac_executable_extensions; do
-  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
-    ac_cv_prog_ac_ct_CC="$ac_prog"
-    echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-done
-IFS=$as_save_IFS
-
-fi
-fi
-ac_ct_CC=$ac_cv_prog_ac_ct_CC
-if test -n "$ac_ct_CC"; then
-  { echo "$as_me:$LINENO: result: $ac_ct_CC" >&5
-echo "${ECHO_T}$ac_ct_CC" >&6; }
-else
-  { echo "$as_me:$LINENO: result: no" >&5
-echo "${ECHO_T}no" >&6; }
-fi
-
-
-  test -n "$ac_ct_CC" && break
-done
-
-  if test "x$ac_ct_CC" = x; then
-    CC=""
-  else
-    case $cross_compiling:$ac_tool_warned in
-yes:)
-{ echo "$as_me:$LINENO: WARNING: In the future, Autoconf will not detect cross-tools
-whose name does not start with the host triplet.  If you think this
-configuration is useful to you, please write to autoconf@gnu.org." >&5
-echo "$as_me: WARNING: In the future, Autoconf will not detect cross-tools
-whose name does not start with the host triplet.  If you think this
-configuration is useful to you, please write to autoconf@gnu.org." >&2;}
-ac_tool_warned=yes ;;
-esac
-    CC=$ac_ct_CC
-  fi
-fi
-
-fi
-
-
-test -z "$CC" && { { echo "$as_me:$LINENO: error: no acceptable C compiler found in \$PATH
-See \`config.log' for more details." >&5
-echo "$as_me: error: no acceptable C compiler found in \$PATH
-See \`config.log' for more details." >&2;}
-   { (exit 1); exit 1; }; }
-
-# Provide some information about the compiler.
-echo "$as_me:$LINENO: checking for C compiler version" >&5
-ac_compiler=`set X $ac_compile; echo $2`
-{ (ac_try="$ac_compiler --version >&5"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_compiler --version >&5") 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }
-{ (ac_try="$ac_compiler -v >&5"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_compiler -v >&5") 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }
-{ (ac_try="$ac_compiler -V >&5"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_compiler -V >&5") 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }
-
-cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-
-int
-main ()
-{
-
-  ;
-  return 0;
-}
-_ACEOF
-ac_clean_files_save=$ac_clean_files
-ac_clean_files="$ac_clean_files a.out a.exe b.out"
-# Try to create an executable without -o first, disregard a.out.
-# It will help us diagnose broken compilers, and finding out an intuition
-# of exeext.
-{ echo "$as_me:$LINENO: checking for C compiler default output file name" >&5
-echo $ECHO_N "checking for C compiler default output file name... $ECHO_C" >&6; }
-ac_link_default=`echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'`
-#
-# List of possible output files, starting from the most likely.
-# The algorithm is not robust to junk in `.', hence go to wildcards (a.*)
-# only as a last resort.  b.out is created by i960 compilers.
-ac_files='a_out.exe a.exe conftest.exe a.out conftest a.* conftest.* b.out'
-#
-# The IRIX 6 linker writes into existing files which may not be
-# executable, retaining their permissions.  Remove them first so a
-# subsequent execution test works.
-ac_rmfiles=
-for ac_file in $ac_files
-do
-  case $ac_file in
-    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.o | *.obj ) ;;
-    * ) ac_rmfiles="$ac_rmfiles $ac_file";;
-  esac
-done
-rm -f $ac_rmfiles
-
-if { (ac_try="$ac_link_default"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_link_default") 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; then
-  # Autoconf-2.13 could set the ac_cv_exeext variable to `no'.
-# So ignore a value of `no', otherwise this would lead to `EXEEXT = no'
-# in a Makefile.  We should not override ac_cv_exeext if it was cached,
-# so that the user can short-circuit this test for compilers unknown to
-# Autoconf.
-for ac_file in $ac_files ''
-do
-  test -f "$ac_file" || continue
-  case $ac_file in
-    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.o | *.obj )
-	;;
-    [ab].out )
-	# We found the default executable, but exeext='' is most
-	# certainly right.
-	break;;
-    *.* )
-        if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no;
-	then :; else
-	   ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
-	fi
-	# We set ac_cv_exeext here because the later test for it is not
-	# safe: cross compilers may not add the suffix if given an `-o'
-	# argument, so we may need to know it at that point already.
-	# Even if this section looks crufty: it has the advantage of
-	# actually working.
-	break;;
-    * )
-	break;;
-  esac
-done
-test "$ac_cv_exeext" = no && ac_cv_exeext=
-
-else
-  ac_file=''
-fi
-
-{ echo "$as_me:$LINENO: result: $ac_file" >&5
-echo "${ECHO_T}$ac_file" >&6; }
-if test -z "$ac_file"; then
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-{ { echo "$as_me:$LINENO: error: C compiler cannot create executables
-See \`config.log' for more details." >&5
-echo "$as_me: error: C compiler cannot create executables
-See \`config.log' for more details." >&2;}
-   { (exit 77); exit 77; }; }
-fi
-
-ac_exeext=$ac_cv_exeext
-
-# Check that the compiler produces executables we can run.  If not, either
-# the compiler is broken, or we cross compile.
-{ echo "$as_me:$LINENO: checking whether the C compiler works" >&5
-echo $ECHO_N "checking whether the C compiler works... $ECHO_C" >&6; }
-# FIXME: These cross compiler hacks should be removed for Autoconf 3.0
-# If not cross compiling, check that we can run a simple program.
-if test "$cross_compiling" != yes; then
-  if { ac_try='./$ac_file'
-  { (case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_try") 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; }; then
-    cross_compiling=no
-  else
-    if test "$cross_compiling" = maybe; then
-	cross_compiling=yes
-    else
-	{ { echo "$as_me:$LINENO: error: cannot run C compiled programs.
-If you meant to cross compile, use \`--host'.
-See \`config.log' for more details." >&5
-echo "$as_me: error: cannot run C compiled programs.
-If you meant to cross compile, use \`--host'.
-See \`config.log' for more details." >&2;}
-   { (exit 1); exit 1; }; }
-    fi
-  fi
-fi
-{ echo "$as_me:$LINENO: result: yes" >&5
-echo "${ECHO_T}yes" >&6; }
-
-rm -f a.out a.exe conftest$ac_cv_exeext b.out
-ac_clean_files=$ac_clean_files_save
-# Check that the compiler produces executables we can run.  If not, either
-# the compiler is broken, or we cross compile.
-{ echo "$as_me:$LINENO: checking whether we are cross compiling" >&5
-echo $ECHO_N "checking whether we are cross compiling... $ECHO_C" >&6; }
-{ echo "$as_me:$LINENO: result: $cross_compiling" >&5
-echo "${ECHO_T}$cross_compiling" >&6; }
-
-{ echo "$as_me:$LINENO: checking for suffix of executables" >&5
-echo $ECHO_N "checking for suffix of executables... $ECHO_C" >&6; }
-if { (ac_try="$ac_link"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_link") 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; then
-  # If both `conftest.exe' and `conftest' are `present' (well, observable)
-# catch `conftest.exe'.  For instance with Cygwin, `ls conftest' will
-# work properly (i.e., refer to `conftest.exe'), while it won't with
-# `rm'.
-for ac_file in conftest.exe conftest conftest.*; do
-  test -f "$ac_file" || continue
-  case $ac_file in
-    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.o | *.obj ) ;;
-    *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
-	  break;;
-    * ) break;;
-  esac
-done
-else
-  { { echo "$as_me:$LINENO: error: cannot compute suffix of executables: cannot compile and link
-See \`config.log' for more details." >&5
-echo "$as_me: error: cannot compute suffix of executables: cannot compile and link
-See \`config.log' for more details." >&2;}
-   { (exit 1); exit 1; }; }
-fi
-
-rm -f conftest$ac_cv_exeext
-{ echo "$as_me:$LINENO: result: $ac_cv_exeext" >&5
-echo "${ECHO_T}$ac_cv_exeext" >&6; }
-
-rm -f conftest.$ac_ext
-EXEEXT=$ac_cv_exeext
-ac_exeext=$EXEEXT
-{ echo "$as_me:$LINENO: checking for suffix of object files" >&5
-echo $ECHO_N "checking for suffix of object files... $ECHO_C" >&6; }
-if test "${ac_cv_objext+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-
-int
-main ()
-{
-
-  ;
-  return 0;
-}
-_ACEOF
-rm -f conftest.o conftest.obj
-if { (ac_try="$ac_compile"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_compile") 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; then
-  for ac_file in conftest.o conftest.obj conftest.*; do
-  test -f "$ac_file" || continue;
-  case $ac_file in
-    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf ) ;;
-    *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'`
-       break;;
-  esac
-done
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-{ { echo "$as_me:$LINENO: error: cannot compute suffix of object files: cannot compile
-See \`config.log' for more details." >&5
-echo "$as_me: error: cannot compute suffix of object files: cannot compile
-See \`config.log' for more details." >&2;}
-   { (exit 1); exit 1; }; }
-fi
-
-rm -f conftest.$ac_cv_objext conftest.$ac_ext
-fi
-{ echo "$as_me:$LINENO: result: $ac_cv_objext" >&5
-echo "${ECHO_T}$ac_cv_objext" >&6; }
-OBJEXT=$ac_cv_objext
-ac_objext=$OBJEXT
-{ echo "$as_me:$LINENO: checking whether we are using the GNU C compiler" >&5
-echo $ECHO_N "checking whether we are using the GNU C compiler... $ECHO_C" >&6; }
-if test "${ac_cv_c_compiler_gnu+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-
-int
-main ()
-{
-#ifndef __GNUC__
-       choke me
-#endif
-
-  ;
-  return 0;
-}
-_ACEOF
-rm -f conftest.$ac_objext
-if { (ac_try="$ac_compile"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_compile") 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } && {
-	 test -z "$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest.$ac_objext; then
-  ac_compiler_gnu=yes
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-	ac_compiler_gnu=no
-fi
-
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-ac_cv_c_compiler_gnu=$ac_compiler_gnu
-
-fi
-{ echo "$as_me:$LINENO: result: $ac_cv_c_compiler_gnu" >&5
-echo "${ECHO_T}$ac_cv_c_compiler_gnu" >&6; }
-GCC=`test $ac_compiler_gnu = yes && echo yes`
-ac_test_CFLAGS=${CFLAGS+set}
-ac_save_CFLAGS=$CFLAGS
-{ echo "$as_me:$LINENO: checking whether $CC accepts -g" >&5
-echo $ECHO_N "checking whether $CC accepts -g... $ECHO_C" >&6; }
-if test "${ac_cv_prog_cc_g+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  ac_save_c_werror_flag=$ac_c_werror_flag
-   ac_c_werror_flag=yes
-   ac_cv_prog_cc_g=no
-   CFLAGS="-g"
-   cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-
-int
-main ()
-{
-
-  ;
-  return 0;
-}
-_ACEOF
-rm -f conftest.$ac_objext
-if { (ac_try="$ac_compile"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_compile") 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } && {
-	 test -z "$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest.$ac_objext; then
-  ac_cv_prog_cc_g=yes
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-	CFLAGS=""
-      cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-
-int
-main ()
-{
-
-  ;
-  return 0;
-}
-_ACEOF
-rm -f conftest.$ac_objext
-if { (ac_try="$ac_compile"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_compile") 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } && {
-	 test -z "$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest.$ac_objext; then
-  :
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-	ac_c_werror_flag=$ac_save_c_werror_flag
-	 CFLAGS="-g"
-	 cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-
-int
-main ()
-{
-
-  ;
-  return 0;
-}
-_ACEOF
-rm -f conftest.$ac_objext
-if { (ac_try="$ac_compile"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_compile") 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } && {
-	 test -z "$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest.$ac_objext; then
-  ac_cv_prog_cc_g=yes
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-
-fi
-
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-   ac_c_werror_flag=$ac_save_c_werror_flag
-fi
-{ echo "$as_me:$LINENO: result: $ac_cv_prog_cc_g" >&5
-echo "${ECHO_T}$ac_cv_prog_cc_g" >&6; }
-if test "$ac_test_CFLAGS" = set; then
-  CFLAGS=$ac_save_CFLAGS
-elif test $ac_cv_prog_cc_g = yes; then
-  if test "$GCC" = yes; then
-    CFLAGS="-g -O2"
-  else
-    CFLAGS="-g"
-  fi
-else
-  if test "$GCC" = yes; then
-    CFLAGS="-O2"
-  else
-    CFLAGS=
-  fi
-fi
-{ echo "$as_me:$LINENO: checking for $CC option to accept ISO C89" >&5
-echo $ECHO_N "checking for $CC option to accept ISO C89... $ECHO_C" >&6; }
-if test "${ac_cv_prog_cc_c89+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  ac_cv_prog_cc_c89=no
-ac_save_CC=$CC
-cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-#include <stdarg.h>
-#include <stdio.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-/* Most of the following tests are stolen from RCS 5.7's src/conf.sh.  */
-struct buf { int x; };
-FILE * (*rcsopen) (struct buf *, struct stat *, int);
-static char *e (p, i)
-     char **p;
-     int i;
-{
-  return p[i];
-}
-static char *f (char * (*g) (char **, int), char **p, ...)
-{
-  char *s;
-  va_list v;
-  va_start (v,p);
-  s = g (p, va_arg (v,int));
-  va_end (v);
-  return s;
-}
-
-/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default.  It has
-   function prototypes and stuff, but not '\xHH' hex character constants.
-   These don't provoke an error unfortunately, instead are silently treated
-   as 'x'.  The following induces an error, until -std is added to get
-   proper ANSI mode.  Curiously '\x00'!='x' always comes out true, for an
-   array size at least.  It's necessary to write '\x00'==0 to get something
-   that's true only with -std.  */
-int osf4_cc_array ['\x00' == 0 ? 1 : -1];
-
-/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters
-   inside strings and character constants.  */
-#define FOO(x) 'x'
-int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1];
-
-int test (int i, double x);
-struct s1 {int (*f) (int a);};
-struct s2 {int (*f) (double a);};
-int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int);
-int argc;
-char **argv;
-int
-main ()
-{
-return f (e, argv, 0) != argv[0]  ||  f (e, argv, 1) != argv[1];
-  ;
-  return 0;
-}
-_ACEOF
-for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \
-	-Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
-do
-  CC="$ac_save_CC $ac_arg"
-  rm -f conftest.$ac_objext
-if { (ac_try="$ac_compile"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_compile") 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } && {
-	 test -z "$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest.$ac_objext; then
-  ac_cv_prog_cc_c89=$ac_arg
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-
-fi
-
-rm -f core conftest.err conftest.$ac_objext
-  test "x$ac_cv_prog_cc_c89" != "xno" && break
-done
-rm -f conftest.$ac_ext
-CC=$ac_save_CC
-
-fi
-# AC_CACHE_VAL
-case "x$ac_cv_prog_cc_c89" in
-  x)
-    { echo "$as_me:$LINENO: result: none needed" >&5
-echo "${ECHO_T}none needed" >&6; } ;;
-  xno)
-    { echo "$as_me:$LINENO: result: unsupported" >&5
-echo "${ECHO_T}unsupported" >&6; } ;;
-  *)
-    CC="$CC $ac_cv_prog_cc_c89"
-    { echo "$as_me:$LINENO: result: $ac_cv_prog_cc_c89" >&5
-echo "${ECHO_T}$ac_cv_prog_cc_c89" >&6; } ;;
-esac
-
-
-ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-
-
-# Checks for libraries.
-
-# Checks for header files.
-
-ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-{ echo "$as_me:$LINENO: checking how to run the C preprocessor" >&5
-echo $ECHO_N "checking how to run the C preprocessor... $ECHO_C" >&6; }
-# On Suns, sometimes $CPP names a directory.
-if test -n "$CPP" && test -d "$CPP"; then
-  CPP=
-fi
-if test -z "$CPP"; then
-  if test "${ac_cv_prog_CPP+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-      # Double quotes because CPP needs to be expanded
-    for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp"
-    do
-      ac_preproc_ok=false
-for ac_c_preproc_warn_flag in '' yes
-do
-  # Use a header file that comes with gcc, so configuring glibc
-  # with a fresh cross-compiler works.
-  # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
-  # <limits.h> exists even on freestanding compilers.
-  # On the NeXT, cc -E runs the code through the compiler's parser,
-  # not just through cpp. "Syntax error" is here to catch this case.
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-#ifdef __STDC__
-# include <limits.h>
-#else
-# include <assert.h>
-#endif
-		     Syntax error
-_ACEOF
-if { (ac_try="$ac_cpp conftest.$ac_ext"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } >/dev/null && {
-	 test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       }; then
-  :
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-  # Broken: fails on valid input.
-continue
-fi
-
-rm -f conftest.err conftest.$ac_ext
-
-  # OK, works on sane cases.  Now check whether nonexistent headers
-  # can be detected and how.
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-#include <ac_nonexistent.h>
-_ACEOF
-if { (ac_try="$ac_cpp conftest.$ac_ext"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } >/dev/null && {
-	 test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       }; then
-  # Broken: success on invalid input.
-continue
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-  # Passes both tests.
-ac_preproc_ok=:
-break
-fi
-
-rm -f conftest.err conftest.$ac_ext
-
-done
-# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
-rm -f conftest.err conftest.$ac_ext
-if $ac_preproc_ok; then
-  break
-fi
-
-    done
-    ac_cv_prog_CPP=$CPP
-
-fi
-  CPP=$ac_cv_prog_CPP
-else
-  ac_cv_prog_CPP=$CPP
-fi
-{ echo "$as_me:$LINENO: result: $CPP" >&5
-echo "${ECHO_T}$CPP" >&6; }
-ac_preproc_ok=false
-for ac_c_preproc_warn_flag in '' yes
-do
-  # Use a header file that comes with gcc, so configuring glibc
-  # with a fresh cross-compiler works.
-  # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
-  # <limits.h> exists even on freestanding compilers.
-  # On the NeXT, cc -E runs the code through the compiler's parser,
-  # not just through cpp. "Syntax error" is here to catch this case.
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-#ifdef __STDC__
-# include <limits.h>
-#else
-# include <assert.h>
-#endif
-		     Syntax error
-_ACEOF
-if { (ac_try="$ac_cpp conftest.$ac_ext"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } >/dev/null && {
-	 test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       }; then
-  :
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-  # Broken: fails on valid input.
-continue
-fi
-
-rm -f conftest.err conftest.$ac_ext
-
-  # OK, works on sane cases.  Now check whether nonexistent headers
-  # can be detected and how.
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-#include <ac_nonexistent.h>
-_ACEOF
-if { (ac_try="$ac_cpp conftest.$ac_ext"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } >/dev/null && {
-	 test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       }; then
-  # Broken: success on invalid input.
-continue
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-  # Passes both tests.
-ac_preproc_ok=:
-break
-fi
-
-rm -f conftest.err conftest.$ac_ext
-
-done
-# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
-rm -f conftest.err conftest.$ac_ext
-if $ac_preproc_ok; then
-  :
-else
-  { { echo "$as_me:$LINENO: error: C preprocessor \"$CPP\" fails sanity check
-See \`config.log' for more details." >&5
-echo "$as_me: error: C preprocessor \"$CPP\" fails sanity check
-See \`config.log' for more details." >&2;}
-   { (exit 1); exit 1; }; }
-fi
-
-ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-
-
-{ echo "$as_me:$LINENO: checking for grep that handles long lines and -e" >&5
-echo $ECHO_N "checking for grep that handles long lines and -e... $ECHO_C" >&6; }
-if test "${ac_cv_path_GREP+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  # Extract the first word of "grep ggrep" to use in msg output
-if test -z "$GREP"; then
-set dummy grep ggrep; ac_prog_name=$2
-if test "${ac_cv_path_GREP+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  ac_path_GREP_found=false
-# Loop through the user's path and test for each of PROGNAME-LIST
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-  for ac_prog in grep ggrep; do
-  for ac_exec_ext in '' $ac_executable_extensions; do
-    ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext"
-    { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue
-    # Check for GNU ac_path_GREP and select it if it is found.
-  # Check for GNU $ac_path_GREP
-case `"$ac_path_GREP" --version 2>&1` in
-*GNU*)
-  ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;;
-*)
-  ac_count=0
-  echo $ECHO_N "0123456789$ECHO_C" >"conftest.in"
-  while :
-  do
-    cat "conftest.in" "conftest.in" >"conftest.tmp"
-    mv "conftest.tmp" "conftest.in"
-    cp "conftest.in" "conftest.nl"
-    echo 'GREP' >> "conftest.nl"
-    "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break
-    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
-    ac_count=`expr $ac_count + 1`
-    if test $ac_count -gt ${ac_path_GREP_max-0}; then
-      # Best one so far, save it but keep looking for a better one
-      ac_cv_path_GREP="$ac_path_GREP"
-      ac_path_GREP_max=$ac_count
-    fi
-    # 10*(2^10) chars as input seems more than enough
-    test $ac_count -gt 10 && break
-  done
-  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
-esac
-
-
-    $ac_path_GREP_found && break 3
-  done
-done
-
-done
-IFS=$as_save_IFS
-
-
-fi
-
-GREP="$ac_cv_path_GREP"
-if test -z "$GREP"; then
-  { { echo "$as_me:$LINENO: error: no acceptable $ac_prog_name could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5
-echo "$as_me: error: no acceptable $ac_prog_name could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;}
-   { (exit 1); exit 1; }; }
-fi
-
-else
-  ac_cv_path_GREP=$GREP
-fi
-
-
-fi
-{ echo "$as_me:$LINENO: result: $ac_cv_path_GREP" >&5
-echo "${ECHO_T}$ac_cv_path_GREP" >&6; }
- GREP="$ac_cv_path_GREP"
-
-
-{ echo "$as_me:$LINENO: checking for egrep" >&5
-echo $ECHO_N "checking for egrep... $ECHO_C" >&6; }
-if test "${ac_cv_path_EGREP+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  if echo a | $GREP -E '(a|b)' >/dev/null 2>&1
-   then ac_cv_path_EGREP="$GREP -E"
-   else
-     # Extract the first word of "egrep" to use in msg output
-if test -z "$EGREP"; then
-set dummy egrep; ac_prog_name=$2
-if test "${ac_cv_path_EGREP+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  ac_path_EGREP_found=false
-# Loop through the user's path and test for each of PROGNAME-LIST
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-  for ac_prog in egrep; do
-  for ac_exec_ext in '' $ac_executable_extensions; do
-    ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext"
-    { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue
-    # Check for GNU ac_path_EGREP and select it if it is found.
-  # Check for GNU $ac_path_EGREP
-case `"$ac_path_EGREP" --version 2>&1` in
-*GNU*)
-  ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;;
-*)
-  ac_count=0
-  echo $ECHO_N "0123456789$ECHO_C" >"conftest.in"
-  while :
-  do
-    cat "conftest.in" "conftest.in" >"conftest.tmp"
-    mv "conftest.tmp" "conftest.in"
-    cp "conftest.in" "conftest.nl"
-    echo 'EGREP' >> "conftest.nl"
-    "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break
-    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
-    ac_count=`expr $ac_count + 1`
-    if test $ac_count -gt ${ac_path_EGREP_max-0}; then
-      # Best one so far, save it but keep looking for a better one
-      ac_cv_path_EGREP="$ac_path_EGREP"
-      ac_path_EGREP_max=$ac_count
-    fi
-    # 10*(2^10) chars as input seems more than enough
-    test $ac_count -gt 10 && break
-  done
-  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
-esac
-
-
-    $ac_path_EGREP_found && break 3
-  done
-done
-
-done
-IFS=$as_save_IFS
-
-
-fi
-
-EGREP="$ac_cv_path_EGREP"
-if test -z "$EGREP"; then
-  { { echo "$as_me:$LINENO: error: no acceptable $ac_prog_name could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5
-echo "$as_me: error: no acceptable $ac_prog_name could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;}
-   { (exit 1); exit 1; }; }
-fi
-
-else
-  ac_cv_path_EGREP=$EGREP
-fi
-
-
-   fi
-fi
-{ echo "$as_me:$LINENO: result: $ac_cv_path_EGREP" >&5
-echo "${ECHO_T}$ac_cv_path_EGREP" >&6; }
- EGREP="$ac_cv_path_EGREP"
-
-
-{ echo "$as_me:$LINENO: checking for ANSI C header files" >&5
-echo $ECHO_N "checking for ANSI C header files... $ECHO_C" >&6; }
-if test "${ac_cv_header_stdc+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-#include <stdlib.h>
-#include <stdarg.h>
-#include <string.h>
-#include <float.h>
-
-int
-main ()
-{
-
-  ;
-  return 0;
-}
-_ACEOF
-rm -f conftest.$ac_objext
-if { (ac_try="$ac_compile"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_compile") 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } && {
-	 test -z "$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest.$ac_objext; then
-  ac_cv_header_stdc=yes
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-	ac_cv_header_stdc=no
-fi
-
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-
-if test $ac_cv_header_stdc = yes; then
-  # SunOS 4.x string.h does not declare mem*, contrary to ANSI.
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-#include <string.h>
-
-_ACEOF
-if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
-  $EGREP "memchr" >/dev/null 2>&1; then
-  :
-else
-  ac_cv_header_stdc=no
-fi
-rm -f conftest*
-
-fi
-
-if test $ac_cv_header_stdc = yes; then
-  # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI.
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-#include <stdlib.h>
-
-_ACEOF
-if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
-  $EGREP "free" >/dev/null 2>&1; then
-  :
-else
-  ac_cv_header_stdc=no
-fi
-rm -f conftest*
-
-fi
-
-if test $ac_cv_header_stdc = yes; then
-  # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi.
-  if test "$cross_compiling" = yes; then
-  :
-else
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-#include <ctype.h>
-#include <stdlib.h>
-#if ((' ' & 0x0FF) == 0x020)
-# define ISLOWER(c) ('a' <= (c) && (c) <= 'z')
-# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c))
-#else
-# define ISLOWER(c) \
-		   (('a' <= (c) && (c) <= 'i') \
-		     || ('j' <= (c) && (c) <= 'r') \
-		     || ('s' <= (c) && (c) <= 'z'))
-# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c))
-#endif
-
-#define XOR(e, f) (((e) && !(f)) || (!(e) && (f)))
-int
-main ()
-{
-  int i;
-  for (i = 0; i < 256; i++)
-    if (XOR (islower (i), ISLOWER (i))
-	|| toupper (i) != TOUPPER (i))
-      return 2;
-  return 0;
-}
-_ACEOF
-rm -f conftest$ac_exeext
-if { (ac_try="$ac_link"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_link") 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
-  { (case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_try") 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; }; then
-  :
-else
-  echo "$as_me: program exited with status $ac_status" >&5
-echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-( exit $ac_status )
-ac_cv_header_stdc=no
-fi
-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
-fi
-
-
-fi
-fi
-{ echo "$as_me:$LINENO: result: $ac_cv_header_stdc" >&5
-echo "${ECHO_T}$ac_cv_header_stdc" >&6; }
-if test $ac_cv_header_stdc = yes; then
-
-cat >>confdefs.h <<\_ACEOF
-#define STDC_HEADERS 1
-_ACEOF
-
-fi
-
-# On IRIX 5.3, sys/types and inttypes.h are conflicting.
-
-
-
-
-
-
-
-
-
-for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \
-		  inttypes.h stdint.h unistd.h
-do
-as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh`
-{ echo "$as_me:$LINENO: checking for $ac_header" >&5
-echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; }
-if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-$ac_includes_default
-
-#include <$ac_header>
-_ACEOF
-rm -f conftest.$ac_objext
-if { (ac_try="$ac_compile"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_compile") 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } && {
-	 test -z "$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest.$ac_objext; then
-  eval "$as_ac_Header=yes"
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-	eval "$as_ac_Header=no"
-fi
-
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-ac_res=`eval echo '${'$as_ac_Header'}'`
-	       { echo "$as_me:$LINENO: result: $ac_res" >&5
-echo "${ECHO_T}$ac_res" >&6; }
-if test `eval echo '${'$as_ac_Header'}'` = yes; then
-  cat >>confdefs.h <<_ACEOF
-#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1
-_ACEOF
-
-fi
-
-done
-
-
-
-
-
-for ac_header in stdlib.h string.h unistd.h
-do
-as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh`
-if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
-  { echo "$as_me:$LINENO: checking for $ac_header" >&5
-echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; }
-if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-fi
-ac_res=`eval echo '${'$as_ac_Header'}'`
-	       { echo "$as_me:$LINENO: result: $ac_res" >&5
-echo "${ECHO_T}$ac_res" >&6; }
-else
-  # Is the header compilable?
-{ echo "$as_me:$LINENO: checking $ac_header usability" >&5
-echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6; }
-cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-$ac_includes_default
-#include <$ac_header>
-_ACEOF
-rm -f conftest.$ac_objext
-if { (ac_try="$ac_compile"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_compile") 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } && {
-	 test -z "$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest.$ac_objext; then
-  ac_header_compiler=yes
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-	ac_header_compiler=no
-fi
-
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-{ echo "$as_me:$LINENO: result: $ac_header_compiler" >&5
-echo "${ECHO_T}$ac_header_compiler" >&6; }
-
-# Is the header present?
-{ echo "$as_me:$LINENO: checking $ac_header presence" >&5
-echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6; }
-cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-#include <$ac_header>
-_ACEOF
-if { (ac_try="$ac_cpp conftest.$ac_ext"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } >/dev/null && {
-	 test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       }; then
-  ac_header_preproc=yes
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-  ac_header_preproc=no
-fi
-
-rm -f conftest.err conftest.$ac_ext
-{ echo "$as_me:$LINENO: result: $ac_header_preproc" >&5
-echo "${ECHO_T}$ac_header_preproc" >&6; }
-
-# So?  What about this header?
-case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in
-  yes:no: )
-    { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5
-echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;}
-    { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5
-echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;}
-    ac_header_preproc=yes
-    ;;
-  no:yes:* )
-    { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5
-echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;}
-    { echo "$as_me:$LINENO: WARNING: $ac_header:     check for missing prerequisite headers?" >&5
-echo "$as_me: WARNING: $ac_header:     check for missing prerequisite headers?" >&2;}
-    { echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5
-echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;}
-    { echo "$as_me:$LINENO: WARNING: $ac_header:     section \"Present But Cannot Be Compiled\"" >&5
-echo "$as_me: WARNING: $ac_header:     section \"Present But Cannot Be Compiled\"" >&2;}
-    { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5
-echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;}
-    { echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5
-echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;}
-
-    ;;
-esac
-{ echo "$as_me:$LINENO: checking for $ac_header" >&5
-echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; }
-if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  eval "$as_ac_Header=\$ac_header_preproc"
-fi
-ac_res=`eval echo '${'$as_ac_Header'}'`
-	       { echo "$as_me:$LINENO: result: $ac_res" >&5
-echo "${ECHO_T}$ac_res" >&6; }
-
-fi
-if test `eval echo '${'$as_ac_Header'}'` = yes; then
-  cat >>confdefs.h <<_ACEOF
-#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1
-_ACEOF
-
-fi
-
-done
-
-
-#check for HADOOP_CONF_DIR
-
-
-if test "$with_confdir" != ""
-then
-cat >>confdefs.h <<_ACEOF
-#define HADOOP_CONF_DIR "$with_confdir"
-_ACEOF
-
-fi
-# Checks for typedefs, structures, and compiler characteristics.
-{ echo "$as_me:$LINENO: checking for an ANSI C-conforming const" >&5
-echo $ECHO_N "checking for an ANSI C-conforming const... $ECHO_C" >&6; }
-if test "${ac_cv_c_const+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-
-int
-main ()
-{
-/* FIXME: Include the comments suggested by Paul. */
-#ifndef __cplusplus
-  /* Ultrix mips cc rejects this.  */
-  typedef int charset[2];
-  const charset cs;
-  /* SunOS 4.1.1 cc rejects this.  */
-  char const *const *pcpcc;
-  char **ppc;
-  /* NEC SVR4.0.2 mips cc rejects this.  */
-  struct point {int x, y;};
-  static struct point const zero = {0,0};
-  /* AIX XL C 1.02.0.0 rejects this.
-     It does not let you subtract one const X* pointer from another in
-     an arm of an if-expression whose if-part is not a constant
-     expression */
-  const char *g = "string";
-  pcpcc = &g + (g ? g-g : 0);
-  /* HPUX 7.0 cc rejects these. */
-  ++pcpcc;
-  ppc = (char**) pcpcc;
-  pcpcc = (char const *const *) ppc;
-  { /* SCO 3.2v4 cc rejects this.  */
-    char *t;
-    char const *s = 0 ? (char *) 0 : (char const *) 0;
-
-    *t++ = 0;
-    if (s) return 0;
-  }
-  { /* Someone thinks the Sun supposedly-ANSI compiler will reject this.  */
-    int x[] = {25, 17};
-    const int *foo = &x[0];
-    ++foo;
-  }
-  { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */
-    typedef const int *iptr;
-    iptr p = 0;
-    ++p;
-  }
-  { /* AIX XL C 1.02.0.0 rejects this saying
-       "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */
-    struct s { int j; const int *ap[3]; };
-    struct s *b; b->j = 5;
-  }
-  { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */
-    const int foo = 10;
-    if (!foo) return 0;
-  }
-  return !cs[0] && !zero.x;
-#endif
-
-  ;
-  return 0;
-}
-_ACEOF
-rm -f conftest.$ac_objext
-if { (ac_try="$ac_compile"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_compile") 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } && {
-	 test -z "$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest.$ac_objext; then
-  ac_cv_c_const=yes
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-	ac_cv_c_const=no
-fi
-
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-{ echo "$as_me:$LINENO: result: $ac_cv_c_const" >&5
-echo "${ECHO_T}$ac_cv_c_const" >&6; }
-if test $ac_cv_c_const = no; then
-
-cat >>confdefs.h <<\_ACEOF
-#define const
-_ACEOF
-
-fi
-
-{ echo "$as_me:$LINENO: checking for pid_t" >&5
-echo $ECHO_N "checking for pid_t... $ECHO_C" >&6; }
-if test "${ac_cv_type_pid_t+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-$ac_includes_default
-typedef pid_t ac__type_new_;
-int
-main ()
-{
-if ((ac__type_new_ *) 0)
-  return 0;
-if (sizeof (ac__type_new_))
-  return 0;
-  ;
-  return 0;
-}
-_ACEOF
-rm -f conftest.$ac_objext
-if { (ac_try="$ac_compile"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_compile") 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } && {
-	 test -z "$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest.$ac_objext; then
-  ac_cv_type_pid_t=yes
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-	ac_cv_type_pid_t=no
-fi
-
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-{ echo "$as_me:$LINENO: result: $ac_cv_type_pid_t" >&5
-echo "${ECHO_T}$ac_cv_type_pid_t" >&6; }
-if test $ac_cv_type_pid_t = yes; then
-  :
-else
-
-cat >>confdefs.h <<_ACEOF
-#define pid_t int
-_ACEOF
-
-fi
-
-
-# Checks for library functions.
-
-for ac_header in stdlib.h
-do
-as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh`
-if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
-  { echo "$as_me:$LINENO: checking for $ac_header" >&5
-echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; }
-if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-fi
-ac_res=`eval echo '${'$as_ac_Header'}'`
-	       { echo "$as_me:$LINENO: result: $ac_res" >&5
-echo "${ECHO_T}$ac_res" >&6; }
-else
-  # Is the header compilable?
-{ echo "$as_me:$LINENO: checking $ac_header usability" >&5
-echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6; }
-cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-$ac_includes_default
-#include <$ac_header>
-_ACEOF
-rm -f conftest.$ac_objext
-if { (ac_try="$ac_compile"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_compile") 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } && {
-	 test -z "$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest.$ac_objext; then
-  ac_header_compiler=yes
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-	ac_header_compiler=no
-fi
-
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-{ echo "$as_me:$LINENO: result: $ac_header_compiler" >&5
-echo "${ECHO_T}$ac_header_compiler" >&6; }
-
-# Is the header present?
-{ echo "$as_me:$LINENO: checking $ac_header presence" >&5
-echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6; }
-cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-#include <$ac_header>
-_ACEOF
-if { (ac_try="$ac_cpp conftest.$ac_ext"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } >/dev/null && {
-	 test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       }; then
-  ac_header_preproc=yes
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-  ac_header_preproc=no
-fi
-
-rm -f conftest.err conftest.$ac_ext
-{ echo "$as_me:$LINENO: result: $ac_header_preproc" >&5
-echo "${ECHO_T}$ac_header_preproc" >&6; }
-
-# So?  What about this header?
-case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in
-  yes:no: )
-    { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5
-echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;}
-    { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5
-echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;}
-    ac_header_preproc=yes
-    ;;
-  no:yes:* )
-    { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5
-echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;}
-    { echo "$as_me:$LINENO: WARNING: $ac_header:     check for missing prerequisite headers?" >&5
-echo "$as_me: WARNING: $ac_header:     check for missing prerequisite headers?" >&2;}
-    { echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5
-echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;}
-    { echo "$as_me:$LINENO: WARNING: $ac_header:     section \"Present But Cannot Be Compiled\"" >&5
-echo "$as_me: WARNING: $ac_header:     section \"Present But Cannot Be Compiled\"" >&2;}
-    { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5
-echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;}
-    { echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5
-echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;}
-
-    ;;
-esac
-{ echo "$as_me:$LINENO: checking for $ac_header" >&5
-echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; }
-if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  eval "$as_ac_Header=\$ac_header_preproc"
-fi
-ac_res=`eval echo '${'$as_ac_Header'}'`
-	       { echo "$as_me:$LINENO: result: $ac_res" >&5
-echo "${ECHO_T}$ac_res" >&6; }
-
-fi
-if test `eval echo '${'$as_ac_Header'}'` = yes; then
-  cat >>confdefs.h <<_ACEOF
-#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1
-_ACEOF
-
-fi
-
-done
-
-{ echo "$as_me:$LINENO: checking for GNU libc compatible malloc" >&5
-echo $ECHO_N "checking for GNU libc compatible malloc... $ECHO_C" >&6; }
-if test "${ac_cv_func_malloc_0_nonnull+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  if test "$cross_compiling" = yes; then
-  ac_cv_func_malloc_0_nonnull=no
-else
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-#if defined STDC_HEADERS || defined HAVE_STDLIB_H
-# include <stdlib.h>
-#else
-char *malloc ();
-#endif
-
-int
-main ()
-{
-return ! malloc (0);
-  ;
-  return 0;
-}
-_ACEOF
-rm -f conftest$ac_exeext
-if { (ac_try="$ac_link"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_link") 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
-  { (case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_try") 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; }; then
-  ac_cv_func_malloc_0_nonnull=yes
-else
-  echo "$as_me: program exited with status $ac_status" >&5
-echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-( exit $ac_status )
-ac_cv_func_malloc_0_nonnull=no
-fi
-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
-fi
-
-
-fi
-{ echo "$as_me:$LINENO: result: $ac_cv_func_malloc_0_nonnull" >&5
-echo "${ECHO_T}$ac_cv_func_malloc_0_nonnull" >&6; }
-if test $ac_cv_func_malloc_0_nonnull = yes; then
-
-cat >>confdefs.h <<\_ACEOF
-#define HAVE_MALLOC 1
-_ACEOF
-
-else
-  cat >>confdefs.h <<\_ACEOF
-#define HAVE_MALLOC 0
-_ACEOF
-
-   case " $LIBOBJS " in
-  *" malloc.$ac_objext "* ) ;;
-  *) LIBOBJS="$LIBOBJS malloc.$ac_objext"
- ;;
-esac
-
-
-cat >>confdefs.h <<\_ACEOF
-#define malloc rpl_malloc
-_ACEOF
-
-fi
-
-
-
-
-for ac_header in stdlib.h
-do
-as_ac_Header=`echo "ac_cv_header_$ac_header" | $as_tr_sh`
-if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
-  { echo "$as_me:$LINENO: checking for $ac_header" >&5
-echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; }
-if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-fi
-ac_res=`eval echo '${'$as_ac_Header'}'`
-	       { echo "$as_me:$LINENO: result: $ac_res" >&5
-echo "${ECHO_T}$ac_res" >&6; }
-else
-  # Is the header compilable?
-{ echo "$as_me:$LINENO: checking $ac_header usability" >&5
-echo $ECHO_N "checking $ac_header usability... $ECHO_C" >&6; }
-cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-$ac_includes_default
-#include <$ac_header>
-_ACEOF
-rm -f conftest.$ac_objext
-if { (ac_try="$ac_compile"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_compile") 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } && {
-	 test -z "$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest.$ac_objext; then
-  ac_header_compiler=yes
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-	ac_header_compiler=no
-fi
-
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-{ echo "$as_me:$LINENO: result: $ac_header_compiler" >&5
-echo "${ECHO_T}$ac_header_compiler" >&6; }
-
-# Is the header present?
-{ echo "$as_me:$LINENO: checking $ac_header presence" >&5
-echo $ECHO_N "checking $ac_header presence... $ECHO_C" >&6; }
-cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-#include <$ac_header>
-_ACEOF
-if { (ac_try="$ac_cpp conftest.$ac_ext"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } >/dev/null && {
-	 test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       }; then
-  ac_header_preproc=yes
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-  ac_header_preproc=no
-fi
-
-rm -f conftest.err conftest.$ac_ext
-{ echo "$as_me:$LINENO: result: $ac_header_preproc" >&5
-echo "${ECHO_T}$ac_header_preproc" >&6; }
-
-# So?  What about this header?
-case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in
-  yes:no: )
-    { echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5
-echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;}
-    { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5
-echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;}
-    ac_header_preproc=yes
-    ;;
-  no:yes:* )
-    { echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5
-echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;}
-    { echo "$as_me:$LINENO: WARNING: $ac_header:     check for missing prerequisite headers?" >&5
-echo "$as_me: WARNING: $ac_header:     check for missing prerequisite headers?" >&2;}
-    { echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5
-echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;}
-    { echo "$as_me:$LINENO: WARNING: $ac_header:     section \"Present But Cannot Be Compiled\"" >&5
-echo "$as_me: WARNING: $ac_header:     section \"Present But Cannot Be Compiled\"" >&2;}
-    { echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5
-echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;}
-    { echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5
-echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;}
-
-    ;;
-esac
-{ echo "$as_me:$LINENO: checking for $ac_header" >&5
-echo $ECHO_N "checking for $ac_header... $ECHO_C" >&6; }
-if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  eval "$as_ac_Header=\$ac_header_preproc"
-fi
-ac_res=`eval echo '${'$as_ac_Header'}'`
-	       { echo "$as_me:$LINENO: result: $ac_res" >&5
-echo "${ECHO_T}$ac_res" >&6; }
-
-fi
-if test `eval echo '${'$as_ac_Header'}'` = yes; then
-  cat >>confdefs.h <<_ACEOF
-#define `echo "HAVE_$ac_header" | $as_tr_cpp` 1
-_ACEOF
-
-fi
-
-done
-
-{ echo "$as_me:$LINENO: checking for GNU libc compatible realloc" >&5
-echo $ECHO_N "checking for GNU libc compatible realloc... $ECHO_C" >&6; }
-if test "${ac_cv_func_realloc_0_nonnull+set}" = set; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  if test "$cross_compiling" = yes; then
-  ac_cv_func_realloc_0_nonnull=no
-else
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-#if defined STDC_HEADERS || defined HAVE_STDLIB_H
-# include <stdlib.h>
-#else
-char *realloc ();
-#endif
-
-int
-main ()
-{
-return ! realloc (0, 0);
-  ;
-  return 0;
-}
-_ACEOF
-rm -f conftest$ac_exeext
-if { (ac_try="$ac_link"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_link") 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } && { ac_try='./conftest$ac_exeext'
-  { (case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_try") 2>&5
-  ac_status=$?
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); }; }; then
-  ac_cv_func_realloc_0_nonnull=yes
-else
-  echo "$as_me: program exited with status $ac_status" >&5
-echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-( exit $ac_status )
-ac_cv_func_realloc_0_nonnull=no
-fi
-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext
-fi
-
-
-fi
-{ echo "$as_me:$LINENO: result: $ac_cv_func_realloc_0_nonnull" >&5
-echo "${ECHO_T}$ac_cv_func_realloc_0_nonnull" >&6; }
-if test $ac_cv_func_realloc_0_nonnull = yes; then
-
-cat >>confdefs.h <<\_ACEOF
-#define HAVE_REALLOC 1
-_ACEOF
-
-else
-  cat >>confdefs.h <<\_ACEOF
-#define HAVE_REALLOC 0
-_ACEOF
-
-   case " $LIBOBJS " in
-  *" realloc.$ac_objext "* ) ;;
-  *) LIBOBJS="$LIBOBJS realloc.$ac_objext"
- ;;
-esac
-
-
-cat >>confdefs.h <<\_ACEOF
-#define realloc rpl_realloc
-_ACEOF
-
-fi
-
-
-
-
-for ac_func in strerror
-do
-as_ac_var=`echo "ac_cv_func_$ac_func" | $as_tr_sh`
-{ echo "$as_me:$LINENO: checking for $ac_func" >&5
-echo $ECHO_N "checking for $ac_func... $ECHO_C" >&6; }
-if { as_var=$as_ac_var; eval "test \"\${$as_var+set}\" = set"; }; then
-  echo $ECHO_N "(cached) $ECHO_C" >&6
-else
-  cat >conftest.$ac_ext <<_ACEOF
-/* confdefs.h.  */
-_ACEOF
-cat confdefs.h >>conftest.$ac_ext
-cat >>conftest.$ac_ext <<_ACEOF
-/* end confdefs.h.  */
-/* Define $ac_func to an innocuous variant, in case <limits.h> declares $ac_func.
-   For example, HP-UX 11i <limits.h> declares gettimeofday.  */
-#define $ac_func innocuous_$ac_func
-
-/* System header to define __stub macros and hopefully few prototypes,
-    which can conflict with char $ac_func (); below.
-    Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
-    <limits.h> exists even on freestanding compilers.  */
-
-#ifdef __STDC__
-# include <limits.h>
-#else
-# include <assert.h>
-#endif
-
-#undef $ac_func
-
-/* Override any GCC internal prototype to avoid an error.
-   Use char because int might match the return type of a GCC
-   builtin and then its argument prototype would still apply.  */
-#ifdef __cplusplus
-extern "C"
-#endif
-char $ac_func ();
-/* The GNU C library defines this for functions which it implements
-    to always fail with ENOSYS.  Some functions are actually named
-    something starting with __ and the normal name is an alias.  */
-#if defined __stub_$ac_func || defined __stub___$ac_func
-choke me
-#endif
-
-int
-main ()
-{
-return $ac_func ();
-  ;
-  return 0;
-}
-_ACEOF
-rm -f conftest.$ac_objext conftest$ac_exeext
-if { (ac_try="$ac_link"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
-  (eval "$ac_link") 2>conftest.er1
-  ac_status=$?
-  grep -v '^ *+' conftest.er1 >conftest.err
-  rm -f conftest.er1
-  cat conftest.err >&5
-  echo "$as_me:$LINENO: \$? = $ac_status" >&5
-  (exit $ac_status); } && {
-	 test -z "$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest$ac_exeext &&
-       $as_test_x conftest$ac_exeext; then
-  eval "$as_ac_var=yes"
-else
-  echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-	eval "$as_ac_var=no"
-fi
-
-rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \
-      conftest$ac_exeext conftest.$ac_ext
-fi
-ac_res=`eval echo '${'$as_ac_var'}'`
-	       { echo "$as_me:$LINENO: result: $ac_res" >&5
-echo "${ECHO_T}$ac_res" >&6; }
-if test `eval echo '${'$as_ac_var'}'` = yes; then
-  cat >>confdefs.h <<_ACEOF
-#define `echo "HAVE_$ac_func" | $as_tr_cpp` 1
-_ACEOF
-
-fi
-done
-
-
-ac_config_files="$ac_config_files Makefile"
-
-cat >confcache <<\_ACEOF
-# This file is a shell script that caches the results of configure
-# tests run on this system so they can be shared between configure
-# scripts and configure runs, see configure's option --config-cache.
-# It is not useful on other systems.  If it contains results you don't
-# want to keep, you may remove or edit it.
-#
-# config.status only pays attention to the cache file if you give it
-# the --recheck option to rerun configure.
-#
-# `ac_cv_env_foo' variables (set or unset) will be overridden when
-# loading this file, other *unset* `ac_cv_foo' will be assigned the
-# following values.
-
-_ACEOF
-
-# The following way of writing the cache mishandles newlines in values,
-# but we know of no workaround that is simple, portable, and efficient.
-# So, we kill variables containing newlines.
-# Ultrix sh set writes to stderr and can't be redirected directly,
-# and sets the high bit in the cache file unless we assign to the vars.
-(
-  for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do
-    eval ac_val=\$$ac_var
-    case $ac_val in #(
-    *${as_nl}*)
-      case $ac_var in #(
-      *_cv_*) { echo "$as_me:$LINENO: WARNING: Cache variable $ac_var contains a newline." >&5
-echo "$as_me: WARNING: Cache variable $ac_var contains a newline." >&2;} ;;
-      esac
-      case $ac_var in #(
-      _ | IFS | as_nl) ;; #(
-      *) $as_unset $ac_var ;;
-      esac ;;
-    esac
-  done
-
-  (set) 2>&1 |
-    case $as_nl`(ac_space=' '; set) 2>&1` in #(
-    *${as_nl}ac_space=\ *)
-      # `set' does not quote correctly, so add quotes (double-quote
-      # substitution turns \\\\ into \\, and sed turns \\ into \).
-      sed -n \
-	"s/'/'\\\\''/g;
-	  s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
-      ;; #(
-    *)
-      # `set' quotes correctly as required by POSIX, so do not add quotes.
-      sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
-      ;;
-    esac |
-    sort
-) |
-  sed '
-     /^ac_cv_env_/b end
-     t clear
-     :clear
-     s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/
-     t end
-     s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
-     :end' >>confcache
-if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
-  if test -w "$cache_file"; then
-    test "x$cache_file" != "x/dev/null" &&
-      { echo "$as_me:$LINENO: updating cache $cache_file" >&5
-echo "$as_me: updating cache $cache_file" >&6;}
-    cat confcache >$cache_file
-  else
-    { echo "$as_me:$LINENO: not updating unwritable cache $cache_file" >&5
-echo "$as_me: not updating unwritable cache $cache_file" >&6;}
-  fi
-fi
-rm -f confcache
-
-test "x$prefix" = xNONE && prefix=$ac_default_prefix
-# Let make expand exec_prefix.
-test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
-
-DEFS=-DHAVE_CONFIG_H
-
-ac_libobjs=
-ac_ltlibobjs=
-for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
-  # 1. Remove the extension, and $U if already installed.
-  ac_script='s/\$U\././;s/\.o$//;s/\.obj$//'
-  ac_i=`echo "$ac_i" | sed "$ac_script"`
-  # 2. Prepend LIBOBJDIR.  When used with automake>=1.10 LIBOBJDIR
-  #    will be set to the directory where LIBOBJS objects are built.
-  ac_libobjs="$ac_libobjs \${LIBOBJDIR}$ac_i\$U.$ac_objext"
-  ac_ltlibobjs="$ac_ltlibobjs \${LIBOBJDIR}$ac_i"'$U.lo'
-done
-LIBOBJS=$ac_libobjs
-
-LTLIBOBJS=$ac_ltlibobjs
-
-
-
-: ${CONFIG_STATUS=./config.status}
-ac_clean_files_save=$ac_clean_files
-ac_clean_files="$ac_clean_files $CONFIG_STATUS"
-{ echo "$as_me:$LINENO: creating $CONFIG_STATUS" >&5
-echo "$as_me: creating $CONFIG_STATUS" >&6;}
-cat >$CONFIG_STATUS <<_ACEOF
-#! $SHELL
-# Generated by $as_me.
-# Run this file to recreate the current configuration.
-# Compiler output produced by configure, useful for debugging
-# configure, is in config.log if it exists.
-
-debug=false
-ac_cs_recheck=false
-ac_cs_silent=false
-SHELL=\${CONFIG_SHELL-$SHELL}
-_ACEOF
-
-cat >>$CONFIG_STATUS <<\_ACEOF
-## --------------------- ##
-## M4sh Initialization.  ##
-## --------------------- ##
-
-# Be more Bourne compatible
-DUALCASE=1; export DUALCASE # for MKS sh
-if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then
-  emulate sh
-  NULLCMD=:
-  # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which
-  # is contrary to our usage.  Disable this feature.
-  alias -g '${1+"$@"}'='"$@"'
-  setopt NO_GLOB_SUBST
-else
-  case `(set -o) 2>/dev/null` in
-  *posix*) set -o posix ;;
-esac
-
-fi
-
-
-
-
-# PATH needs CR
-# Avoid depending upon Character Ranges.
-as_cr_letters='abcdefghijklmnopqrstuvwxyz'
-as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
-as_cr_Letters=$as_cr_letters$as_cr_LETTERS
-as_cr_digits='0123456789'
-as_cr_alnum=$as_cr_Letters$as_cr_digits
-
-# The user is always right.
-if test "${PATH_SEPARATOR+set}" != set; then
-  echo "#! /bin/sh" >conf$$.sh
-  echo  "exit 0"   >>conf$$.sh
-  chmod +x conf$$.sh
-  if (PATH="/nonexistent;."; conf$$.sh) >/dev/null 2>&1; then
-    PATH_SEPARATOR=';'
-  else
-    PATH_SEPARATOR=:
-  fi
-  rm -f conf$$.sh
-fi
-
-# Support unset when possible.
-if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
-  as_unset=unset
-else
-  as_unset=false
-fi
-
-
-# IFS
-# We need space, tab and new line, in precisely that order.  Quoting is
-# there to prevent editors from complaining about space-tab.
-# (If _AS_PATH_WALK were called with IFS unset, it would disable word
-# splitting by setting IFS to empty value.)
-as_nl='
-'
-IFS=" ""	$as_nl"
-
-# Find who we are.  Look in the path if we contain no directory separator.
-case $0 in
-  *[\\/]* ) as_myself=$0 ;;
-  *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-  test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
-done
-IFS=$as_save_IFS
-
-     ;;
-esac
-# We did not find ourselves, most probably we were run as `sh COMMAND'
-# in which case we are not to be found in the path.
-if test "x$as_myself" = x; then
-  as_myself=$0
-fi
-if test ! -f "$as_myself"; then
-  echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
-  { (exit 1); exit 1; }
-fi
-
-# Work around bugs in pre-3.0 UWIN ksh.
-for as_var in ENV MAIL MAILPATH
-do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
-done
-PS1='$ '
-PS2='> '
-PS4='+ '
-
-# NLS nuisances.
-for as_var in \
-  LANG LANGUAGE LC_ADDRESS LC_ALL LC_COLLATE LC_CTYPE LC_IDENTIFICATION \
-  LC_MEASUREMENT LC_MESSAGES LC_MONETARY LC_NAME LC_NUMERIC LC_PAPER \
-  LC_TELEPHONE LC_TIME
-do
-  if (set +x; test -z "`(eval $as_var=C; export $as_var) 2>&1`"); then
-    eval $as_var=C; export $as_var
-  else
-    ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var
-  fi
-done
-
-# Required to use basename.
-if expr a : '\(a\)' >/dev/null 2>&1 &&
-   test "X`expr 00001 : '.*\(...\)'`" = X001; then
-  as_expr=expr
-else
-  as_expr=false
-fi
-
-if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
-  as_basename=basename
-else
-  as_basename=false
-fi
-
-
-# Name of the executable.
-as_me=`$as_basename -- "$0" ||
-$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
-	 X"$0" : 'X\(//\)$' \| \
-	 X"$0" : 'X\(/\)' \| . 2>/dev/null ||
-echo X/"$0" |
-    sed '/^.*\/\([^/][^/]*\)\/*$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\/\(\/\/\)$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\/\(\/\).*/{
-	    s//\1/
-	    q
-	  }
-	  s/.*/./; q'`
-
-# CDPATH.
-$as_unset CDPATH
-
-
-
-  as_lineno_1=$LINENO
-  as_lineno_2=$LINENO
-  test "x$as_lineno_1" != "x$as_lineno_2" &&
-  test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || {
-
-  # Create $as_me.lineno as a copy of $as_myself, but with $LINENO
-  # uniformly replaced by the line number.  The first 'sed' inserts a
-  # line-number line after each line using $LINENO; the second 'sed'
-  # does the real work.  The second script uses 'N' to pair each
-  # line-number line with the line containing $LINENO, and appends
-  # trailing '-' during substitution so that $LINENO is not a special
-  # case at line end.
-  # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the
-  # scripts with optimization help from Paolo Bonzini.  Blame Lee
-  # E. McMahon (1931-1989) for sed's syntax.  :-)
-  sed -n '
-    p
-    /[$]LINENO/=
-  ' <$as_myself |
-    sed '
-      s/[$]LINENO.*/&-/
-      t lineno
-      b
-      :lineno
-      N
-      :loop
-      s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
-      t loop
-      s/-\n.*//
-    ' >$as_me.lineno &&
-  chmod +x "$as_me.lineno" ||
-    { echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2
-   { (exit 1); exit 1; }; }
-
-  # Don't try to exec as it changes $[0], causing all sort of problems
-  # (the dirname of $[0] is not the place where we might find the
-  # original and so on.  Autoconf is especially sensitive to this).
-  . "./$as_me.lineno"
-  # Exit status is that of the last command.
-  exit
-}
-
-
-if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
-  as_dirname=dirname
-else
-  as_dirname=false
-fi
-
-ECHO_C= ECHO_N= ECHO_T=
-case `echo -n x` in
--n*)
-  case `echo 'x\c'` in
-  *c*) ECHO_T='	';;	# ECHO_T is single tab character.
-  *)   ECHO_C='\c';;
-  esac;;
-*)
-  ECHO_N='-n';;
-esac
-
-if expr a : '\(a\)' >/dev/null 2>&1 &&
-   test "X`expr 00001 : '.*\(...\)'`" = X001; then
-  as_expr=expr
-else
-  as_expr=false
-fi
-
-rm -f conf$$ conf$$.exe conf$$.file
-if test -d conf$$.dir; then
-  rm -f conf$$.dir/conf$$.file
-else
-  rm -f conf$$.dir
-  mkdir conf$$.dir
-fi
-echo >conf$$.file
-if ln -s conf$$.file conf$$ 2>/dev/null; then
-  as_ln_s='ln -s'
-  # ... but there are two gotchas:
-  # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
-  # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
-  # In both cases, we have to default to `cp -p'.
-  ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
-    as_ln_s='cp -p'
-elif ln conf$$.file conf$$ 2>/dev/null; then
-  as_ln_s=ln
-else
-  as_ln_s='cp -p'
-fi
-rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
-rmdir conf$$.dir 2>/dev/null
-
-if mkdir -p . 2>/dev/null; then
-  as_mkdir_p=:
-else
-  test -d ./-p && rmdir ./-p
-  as_mkdir_p=false
-fi
-
-if test -x / >/dev/null 2>&1; then
-  as_test_x='test -x'
-else
-  if ls -dL / >/dev/null 2>&1; then
-    as_ls_L_option=L
-  else
-    as_ls_L_option=
-  fi
-  as_test_x='
-    eval sh -c '\''
-      if test -d "$1"; then
-        test -d "$1/.";
-      else
-	case $1 in
-        -*)set "./$1";;
-	esac;
-	case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in
-	???[sx]*):;;*)false;;esac;fi
-    '\'' sh
-  '
-fi
-as_executable_p=$as_test_x
-
-# Sed expression to map a string onto a valid CPP name.
-as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
-
-# Sed expression to map a string onto a valid variable name.
-as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
-
-
-exec 6>&1
-
-# Save the log message, to keep $[0] and so on meaningful, and to
-# report actual input values of CONFIG_FILES etc. instead of their
-# values after options handling.
-ac_log="
-This file was extended by task-controller $as_me 0.1, which was
-generated by GNU Autoconf 2.61.  Invocation command line was
-
-  CONFIG_FILES    = $CONFIG_FILES
-  CONFIG_HEADERS  = $CONFIG_HEADERS
-  CONFIG_LINKS    = $CONFIG_LINKS
-  CONFIG_COMMANDS = $CONFIG_COMMANDS
-  $ $0 $@
-
-on `(hostname || uname -n) 2>/dev/null | sed 1q`
-"
-
-_ACEOF
-
-cat >>$CONFIG_STATUS <<_ACEOF
-# Files that config.status was made for.
-config_files="$ac_config_files"
-config_headers="$ac_config_headers"
-
-_ACEOF
-
-cat >>$CONFIG_STATUS <<\_ACEOF
-ac_cs_usage="\
-\`$as_me' instantiates files from templates according to the
-current configuration.
-
-Usage: $0 [OPTIONS] [FILE]...
-
-  -h, --help       print this help, then exit
-  -V, --version    print version number and configuration settings, then exit
-  -q, --quiet      do not print progress messages
-  -d, --debug      don't remove temporary files
-      --recheck    update $as_me by reconfiguring in the same conditions
-  --file=FILE[:TEMPLATE]
-		   instantiate the configuration file FILE
-  --header=FILE[:TEMPLATE]
-		   instantiate the configuration header FILE
-
-Configuration files:
-$config_files
-
-Configuration headers:
-$config_headers
-
-Report bugs to <bug-autoconf@gnu.org>."
-
-_ACEOF
-cat >>$CONFIG_STATUS <<_ACEOF
-ac_cs_version="\\
-task-controller config.status 0.1
-configured by $0, generated by GNU Autoconf 2.61,
-  with options \\"`echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`\\"
-
-Copyright (C) 2006 Free Software Foundation, Inc.
-This config.status script is free software; the Free Software Foundation
-gives unlimited permission to copy, distribute and modify it."
-
-ac_pwd='$ac_pwd'
-srcdir='$srcdir'
-_ACEOF
-
-cat >>$CONFIG_STATUS <<\_ACEOF
-# If no file are specified by the user, then we need to provide default
-# value.  By we need to know if files were specified by the user.
-ac_need_defaults=:
-while test $# != 0
-do
-  case $1 in
-  --*=*)
-    ac_option=`expr "X$1" : 'X\([^=]*\)='`
-    ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
-    ac_shift=:
-    ;;
-  *)
-    ac_option=$1
-    ac_optarg=$2
-    ac_shift=shift
-    ;;
-  esac
-
-  case $ac_option in
-  # Handling of the options.
-  -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
-    ac_cs_recheck=: ;;
-  --version | --versio | --versi | --vers | --ver | --ve | --v | -V )
-    echo "$ac_cs_version"; exit ;;
-  --debug | --debu | --deb | --de | --d | -d )
-    debug=: ;;
-  --file | --fil | --fi | --f )
-    $ac_shift
-    CONFIG_FILES="$CONFIG_FILES $ac_optarg"
-    ac_need_defaults=false;;
-  --header | --heade | --head | --hea )
-    $ac_shift
-    CONFIG_HEADERS="$CONFIG_HEADERS $ac_optarg"
-    ac_need_defaults=false;;
-  --he | --h)
-    # Conflict between --help and --header
-    { echo "$as_me: error: ambiguous option: $1
-Try \`$0 --help' for more information." >&2
-   { (exit 1); exit 1; }; };;
-  --help | --hel | -h )
-    echo "$ac_cs_usage"; exit ;;
-  -q | -quiet | --quiet | --quie | --qui | --qu | --q \
-  | -silent | --silent | --silen | --sile | --sil | --si | --s)
-    ac_cs_silent=: ;;
-
-  # This is an error.
-  -*) { echo "$as_me: error: unrecognized option: $1
-Try \`$0 --help' for more information." >&2
-   { (exit 1); exit 1; }; } ;;
-
-  *) ac_config_targets="$ac_config_targets $1"
-     ac_need_defaults=false ;;
-
-  esac
-  shift
-done
-
-ac_configure_extra_args=
-
-if $ac_cs_silent; then
-  exec 6>/dev/null
-  ac_configure_extra_args="$ac_configure_extra_args --silent"
-fi
-
-_ACEOF
-cat >>$CONFIG_STATUS <<_ACEOF
-if \$ac_cs_recheck; then
-  echo "running CONFIG_SHELL=$SHELL $SHELL $0 "$ac_configure_args \$ac_configure_extra_args " --no-create --no-recursion" >&6
-  CONFIG_SHELL=$SHELL
-  export CONFIG_SHELL
-  exec $SHELL "$0"$ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
-fi
-
-_ACEOF
-cat >>$CONFIG_STATUS <<\_ACEOF
-exec 5>>config.log
-{
-  echo
-  sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
-## Running $as_me. ##
-_ASBOX
-  echo "$ac_log"
-} >&5
-
-_ACEOF
-cat >>$CONFIG_STATUS <<_ACEOF
-_ACEOF
-
-cat >>$CONFIG_STATUS <<\_ACEOF
-
-# Handling of arguments.
-for ac_config_target in $ac_config_targets
-do
-  case $ac_config_target in
-    "configuration.h") CONFIG_HEADERS="$CONFIG_HEADERS configuration.h" ;;
-    "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
-
-  *) { { echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5
-echo "$as_me: error: invalid argument: $ac_config_target" >&2;}
-   { (exit 1); exit 1; }; };;
-  esac
-done
-
-
-# If the user did not use the arguments to specify the items to instantiate,
-# then the envvar interface is used.  Set only those that are not.
-# We use the long form for the default assignment because of an extremely
-# bizarre bug on SunOS 4.1.3.
-if $ac_need_defaults; then
-  test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files
-  test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers
-fi
-
-# Have a temporary directory for convenience.  Make it in the build tree
-# simply because there is no reason against having it here, and in addition,
-# creating and moving files from /tmp can sometimes cause problems.
-# Hook for its removal unless debugging.
-# Note that there is a small window in which the directory will not be cleaned:
-# after its creation but before its name has been assigned to `$tmp'.
-$debug ||
-{
-  tmp=
-  trap 'exit_status=$?
-  { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status
-' 0
-  trap '{ (exit 1); exit 1; }' 1 2 13 15
-}
-# Create a (secure) tmp directory for tmp files.
-
-{
-  tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
-  test -n "$tmp" && test -d "$tmp"
-}  ||
-{
-  tmp=./conf$$-$RANDOM
-  (umask 077 && mkdir "$tmp")
-} ||
-{
-   echo "$me: cannot create a temporary directory in ." >&2
-   { (exit 1); exit 1; }
-}
-
-#
-# Set up the sed scripts for CONFIG_FILES section.
-#
-
-# No need to generate the scripts if there are no CONFIG_FILES.
-# This happens for instance when ./config.status config.h
-if test -n "$CONFIG_FILES"; then
-
-_ACEOF
-
-
-
-ac_delim='%!_!# '
-for ac_last_try in false false false false false :; do
-  cat >conf$$subs.sed <<_ACEOF
-SHELL!$SHELL$ac_delim
-PATH_SEPARATOR!$PATH_SEPARATOR$ac_delim
-PACKAGE_NAME!$PACKAGE_NAME$ac_delim
-PACKAGE_TARNAME!$PACKAGE_TARNAME$ac_delim
-PACKAGE_VERSION!$PACKAGE_VERSION$ac_delim
-PACKAGE_STRING!$PACKAGE_STRING$ac_delim
-PACKAGE_BUGREPORT!$PACKAGE_BUGREPORT$ac_delim
-exec_prefix!$exec_prefix$ac_delim
-prefix!$prefix$ac_delim
-program_transform_name!$program_transform_name$ac_delim
-bindir!$bindir$ac_delim
-sbindir!$sbindir$ac_delim
-libexecdir!$libexecdir$ac_delim
-datarootdir!$datarootdir$ac_delim
-datadir!$datadir$ac_delim
-sysconfdir!$sysconfdir$ac_delim
-sharedstatedir!$sharedstatedir$ac_delim
-localstatedir!$localstatedir$ac_delim
-includedir!$includedir$ac_delim
-oldincludedir!$oldincludedir$ac_delim
-docdir!$docdir$ac_delim
-infodir!$infodir$ac_delim
-htmldir!$htmldir$ac_delim
-dvidir!$dvidir$ac_delim
-pdfdir!$pdfdir$ac_delim
-psdir!$psdir$ac_delim
-libdir!$libdir$ac_delim
-localedir!$localedir$ac_delim
-mandir!$mandir$ac_delim
-DEFS!$DEFS$ac_delim
-ECHO_C!$ECHO_C$ac_delim
-ECHO_N!$ECHO_N$ac_delim
-ECHO_T!$ECHO_T$ac_delim
-LIBS!$LIBS$ac_delim
-build_alias!$build_alias$ac_delim
-host_alias!$host_alias$ac_delim
-target_alias!$target_alias$ac_delim
-CC!$CC$ac_delim
-CFLAGS!$CFLAGS$ac_delim
-LDFLAGS!$LDFLAGS$ac_delim
-CPPFLAGS!$CPPFLAGS$ac_delim
-ac_ct_CC!$ac_ct_CC$ac_delim
-EXEEXT!$EXEEXT$ac_delim
-OBJEXT!$OBJEXT$ac_delim
-CPP!$CPP$ac_delim
-GREP!$GREP$ac_delim
-EGREP!$EGREP$ac_delim
-LIBOBJS!$LIBOBJS$ac_delim
-LTLIBOBJS!$LTLIBOBJS$ac_delim
-_ACEOF
-
-  if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 49; then
-    break
-  elif $ac_last_try; then
-    { { echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5
-echo "$as_me: error: could not make $CONFIG_STATUS" >&2;}
-   { (exit 1); exit 1; }; }
-  else
-    ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
-  fi
-done
-
-ac_eof=`sed -n '/^CEOF[0-9]*$/s/CEOF/0/p' conf$$subs.sed`
-if test -n "$ac_eof"; then
-  ac_eof=`echo "$ac_eof" | sort -nru | sed 1q`
-  ac_eof=`expr $ac_eof + 1`
-fi
-
-cat >>$CONFIG_STATUS <<_ACEOF
-cat >"\$tmp/subs-1.sed" <<\CEOF$ac_eof
-/@[a-zA-Z_][a-zA-Z_0-9]*@/!b end
-_ACEOF
-sed '
-s/[,\\&]/\\&/g; s/@/@|#_!!_#|/g
-s/^/s,@/; s/!/@,|#_!!_#|/
-:n
-t n
-s/'"$ac_delim"'$/,g/; t
-s/$/\\/; p
-N; s/^.*\n//; s/[,\\&]/\\&/g; s/@/@|#_!!_#|/g; b n
-' >>$CONFIG_STATUS <conf$$subs.sed
-rm -f conf$$subs.sed
-cat >>$CONFIG_STATUS <<_ACEOF
-:end
-s/|#_!!_#|//g
-CEOF$ac_eof
-_ACEOF
-
-
-# VPATH may cause trouble with some makes, so we remove $(srcdir),
-# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and
-# trailing colons and then remove the whole line if VPATH becomes empty
-# (actually we leave an empty line to preserve line numbers).
-if test "x$srcdir" = x.; then
-  ac_vpsub='/^[	 ]*VPATH[	 ]*=/{
-s/:*\$(srcdir):*/:/
-s/:*\${srcdir}:*/:/
-s/:*@srcdir@:*/:/
-s/^\([^=]*=[	 ]*\):*/\1/
-s/:*$//
-s/^[^=]*=[	 ]*$//
-}'
-fi
-
-cat >>$CONFIG_STATUS <<\_ACEOF
-fi # test -n "$CONFIG_FILES"
-
-
-for ac_tag in  :F $CONFIG_FILES  :H $CONFIG_HEADERS
-do
-  case $ac_tag in
-  :[FHLC]) ac_mode=$ac_tag; continue;;
-  esac
-  case $ac_mode$ac_tag in
-  :[FHL]*:*);;
-  :L* | :C*:*) { { echo "$as_me:$LINENO: error: Invalid tag $ac_tag." >&5
-echo "$as_me: error: Invalid tag $ac_tag." >&2;}
-   { (exit 1); exit 1; }; };;
-  :[FH]-) ac_tag=-:-;;
-  :[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
-  esac
-  ac_save_IFS=$IFS
-  IFS=:
-  set x $ac_tag
-  IFS=$ac_save_IFS
-  shift
-  ac_file=$1
-  shift
-
-  case $ac_mode in
-  :L) ac_source=$1;;
-  :[FH])
-    ac_file_inputs=
-    for ac_f
-    do
-      case $ac_f in
-      -) ac_f="$tmp/stdin";;
-      *) # Look for the file first in the build tree, then in the source tree
-	 # (if the path is not absolute).  The absolute path cannot be DOS-style,
-	 # because $ac_f cannot contain `:'.
-	 test -f "$ac_f" ||
-	   case $ac_f in
-	   [\\/$]*) false;;
-	   *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
-	   esac ||
-	   { { echo "$as_me:$LINENO: error: cannot find input file: $ac_f" >&5
-echo "$as_me: error: cannot find input file: $ac_f" >&2;}
-   { (exit 1); exit 1; }; };;
-      esac
-      ac_file_inputs="$ac_file_inputs $ac_f"
-    done
-
-    # Let's still pretend it is `configure' which instantiates (i.e., don't
-    # use $as_me), people would be surprised to read:
-    #    /* config.h.  Generated by config.status.  */
-    configure_input="Generated from "`IFS=:
-	  echo $* | sed 's|^[^:]*/||;s|:[^:]*/|, |g'`" by configure."
-    if test x"$ac_file" != x-; then
-      configure_input="$ac_file.  $configure_input"
-      { echo "$as_me:$LINENO: creating $ac_file" >&5
-echo "$as_me: creating $ac_file" >&6;}
-    fi
-
-    case $ac_tag in
-    *:-:* | *:-) cat >"$tmp/stdin";;
-    esac
-    ;;
-  esac
-
-  ac_dir=`$as_dirname -- "$ac_file" ||
-$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
-	 X"$ac_file" : 'X\(//\)[^/]' \| \
-	 X"$ac_file" : 'X\(//\)$' \| \
-	 X"$ac_file" : 'X\(/\)' \| . 2>/dev/null ||
-echo X"$ac_file" |
-    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)[^/].*/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\).*/{
-	    s//\1/
-	    q
-	  }
-	  s/.*/./; q'`
-  { as_dir="$ac_dir"
-  case $as_dir in #(
-  -*) as_dir=./$as_dir;;
-  esac
-  test -d "$as_dir" || { $as_mkdir_p && mkdir -p "$as_dir"; } || {
-    as_dirs=
-    while :; do
-      case $as_dir in #(
-      *\'*) as_qdir=`echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #(
-      *) as_qdir=$as_dir;;
-      esac
-      as_dirs="'$as_qdir' $as_dirs"
-      as_dir=`$as_dirname -- "$as_dir" ||
-$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
-	 X"$as_dir" : 'X\(//\)[^/]' \| \
-	 X"$as_dir" : 'X\(//\)$' \| \
-	 X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
-echo X"$as_dir" |
-    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)[^/].*/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\).*/{
-	    s//\1/
-	    q
-	  }
-	  s/.*/./; q'`
-      test -d "$as_dir" && break
-    done
-    test -z "$as_dirs" || eval "mkdir $as_dirs"
-  } || test -d "$as_dir" || { { echo "$as_me:$LINENO: error: cannot create directory $as_dir" >&5
-echo "$as_me: error: cannot create directory $as_dir" >&2;}
-   { (exit 1); exit 1; }; }; }
-  ac_builddir=.
-
-case "$ac_dir" in
-.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
-*)
-  ac_dir_suffix=/`echo "$ac_dir" | sed 's,^\.[\\/],,'`
-  # A ".." for each directory in $ac_dir_suffix.
-  ac_top_builddir_sub=`echo "$ac_dir_suffix" | sed 's,/[^\\/]*,/..,g;s,/,,'`
-  case $ac_top_builddir_sub in
-  "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
-  *)  ac_top_build_prefix=$ac_top_builddir_sub/ ;;
-  esac ;;
-esac
-ac_abs_top_builddir=$ac_pwd
-ac_abs_builddir=$ac_pwd$ac_dir_suffix
-# for backward compatibility:
-ac_top_builddir=$ac_top_build_prefix
-
-case $srcdir in
-  .)  # We are building in place.
-    ac_srcdir=.
-    ac_top_srcdir=$ac_top_builddir_sub
-    ac_abs_top_srcdir=$ac_pwd ;;
-  [\\/]* | ?:[\\/]* )  # Absolute name.
-    ac_srcdir=$srcdir$ac_dir_suffix;
-    ac_top_srcdir=$srcdir
-    ac_abs_top_srcdir=$srcdir ;;
-  *) # Relative name.
-    ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
-    ac_top_srcdir=$ac_top_build_prefix$srcdir
-    ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
-esac
-ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
-
-
-  case $ac_mode in
-  :F)
-  #
-  # CONFIG_FILE
-  #
-
-_ACEOF
-
-cat >>$CONFIG_STATUS <<\_ACEOF
-# If the template does not know about datarootdir, expand it.
-# FIXME: This hack should be removed a few years after 2.60.
-ac_datarootdir_hack=; ac_datarootdir_seen=
-
-case `sed -n '/datarootdir/ {
-  p
-  q
-}
-/@datadir@/p
-/@docdir@/p
-/@infodir@/p
-/@localedir@/p
-/@mandir@/p
-' $ac_file_inputs` in
-*datarootdir*) ac_datarootdir_seen=yes;;
-*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*)
-  { echo "$as_me:$LINENO: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
-echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
-_ACEOF
-cat >>$CONFIG_STATUS <<_ACEOF
-  ac_datarootdir_hack='
-  s&@datadir@&$datadir&g
-  s&@docdir@&$docdir&g
-  s&@infodir@&$infodir&g
-  s&@localedir@&$localedir&g
-  s&@mandir@&$mandir&g
-    s&\\\${datarootdir}&$datarootdir&g' ;;
-esac
-_ACEOF
-
-# Neutralize VPATH when `$srcdir' = `.'.
-# Shell code in configure.ac might set extrasub.
-# FIXME: do we really want to maintain this feature?
-cat >>$CONFIG_STATUS <<_ACEOF
-  sed "$ac_vpsub
-$extrasub
-_ACEOF
-cat >>$CONFIG_STATUS <<\_ACEOF
-:t
-/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
-s&@configure_input@&$configure_input&;t t
-s&@top_builddir@&$ac_top_builddir_sub&;t t
-s&@srcdir@&$ac_srcdir&;t t
-s&@abs_srcdir@&$ac_abs_srcdir&;t t
-s&@top_srcdir@&$ac_top_srcdir&;t t
-s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t
-s&@builddir@&$ac_builddir&;t t
-s&@abs_builddir@&$ac_abs_builddir&;t t
-s&@abs_top_builddir@&$ac_abs_top_builddir&;t t
-$ac_datarootdir_hack
-" $ac_file_inputs | sed -f "$tmp/subs-1.sed" >$tmp/out
-
-test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
-  { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } &&
-  { ac_out=`sed -n '/^[	 ]*datarootdir[	 ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } &&
-  { echo "$as_me:$LINENO: WARNING: $ac_file contains a reference to the variable \`datarootdir'
-which seems to be undefined.  Please make sure it is defined." >&5
-echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
-which seems to be undefined.  Please make sure it is defined." >&2;}
-
-  rm -f "$tmp/stdin"
-  case $ac_file in
-  -) cat "$tmp/out"; rm -f "$tmp/out";;
-  *) rm -f "$ac_file"; mv "$tmp/out" $ac_file;;
-  esac
- ;;
-  :H)
-  #
-  # CONFIG_HEADER
-  #
-_ACEOF
-
-# Transform confdefs.h into a sed script `conftest.defines', that
-# substitutes the proper values into config.h.in to produce config.h.
-rm -f conftest.defines conftest.tail
-# First, append a space to every undef/define line, to ease matching.
-echo 's/$/ /' >conftest.defines
-# Then, protect against being on the right side of a sed subst, or in
-# an unquoted here document, in config.status.  If some macros were
-# called several times there might be several #defines for the same
-# symbol, which is useless.  But do not sort them, since the last
-# AC_DEFINE must be honored.
-ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]*
-# These sed commands are passed to sed as "A NAME B PARAMS C VALUE D", where
-# NAME is the cpp macro being defined, VALUE is the value it is being given.
-# PARAMS is the parameter list in the macro definition--in most cases, it's
-# just an empty string.
-ac_dA='s,^\\([	 #]*\\)[^	 ]*\\([	 ]*'
-ac_dB='\\)[	 (].*,\\1define\\2'
-ac_dC=' '
-ac_dD=' ,'
-
-uniq confdefs.h |
-  sed -n '
-	t rset
-	:rset
-	s/^[	 ]*#[	 ]*define[	 ][	 ]*//
-	t ok
-	d
-	:ok
-	s/[\\&,]/\\&/g
-	s/^\('"$ac_word_re"'\)\(([^()]*)\)[	 ]*\(.*\)/ '"$ac_dA"'\1'"$ac_dB"'\2'"${ac_dC}"'\3'"$ac_dD"'/p
-	s/^\('"$ac_word_re"'\)[	 ]*\(.*\)/'"$ac_dA"'\1'"$ac_dB$ac_dC"'\2'"$ac_dD"'/p
-  ' >>conftest.defines
-
-# Remove the space that was appended to ease matching.
-# Then replace #undef with comments.  This is necessary, for
-# example, in the case of _POSIX_SOURCE, which is predefined and required
-# on some systems where configure will not decide to define it.
-# (The regexp can be short, since the line contains either #define or #undef.)
-echo 's/ $//
-s,^[	 #]*u.*,/* & */,' >>conftest.defines
-
-# Break up conftest.defines:
-ac_max_sed_lines=50
-
-# First sed command is:	 sed -f defines.sed $ac_file_inputs >"$tmp/out1"
-# Second one is:	 sed -f defines.sed "$tmp/out1" >"$tmp/out2"
-# Third one will be:	 sed -f defines.sed "$tmp/out2" >"$tmp/out1"
-# et cetera.
-ac_in='$ac_file_inputs'
-ac_out='"$tmp/out1"'
-ac_nxt='"$tmp/out2"'
-
-while :
-do
-  # Write a here document:
-    cat >>$CONFIG_STATUS <<_ACEOF
-    # First, check the format of the line:
-    cat >"\$tmp/defines.sed" <<\\CEOF
-/^[	 ]*#[	 ]*undef[	 ][	 ]*$ac_word_re[	 ]*\$/b def
-/^[	 ]*#[	 ]*define[	 ][	 ]*$ac_word_re[(	 ]/b def
-b
-:def
-_ACEOF
-  sed ${ac_max_sed_lines}q conftest.defines >>$CONFIG_STATUS
-  echo 'CEOF
-    sed -f "$tmp/defines.sed"' "$ac_in >$ac_out" >>$CONFIG_STATUS
-  ac_in=$ac_out; ac_out=$ac_nxt; ac_nxt=$ac_in
-  sed 1,${ac_max_sed_lines}d conftest.defines >conftest.tail
-  grep . conftest.tail >/dev/null || break
-  rm -f conftest.defines
-  mv conftest.tail conftest.defines
-done
-rm -f conftest.defines conftest.tail
-
-echo "ac_result=$ac_in" >>$CONFIG_STATUS
-cat >>$CONFIG_STATUS <<\_ACEOF
-  if test x"$ac_file" != x-; then
-    echo "/* $configure_input  */" >"$tmp/config.h"
-    cat "$ac_result" >>"$tmp/config.h"
-    if diff $ac_file "$tmp/config.h" >/dev/null 2>&1; then
-      { echo "$as_me:$LINENO: $ac_file is unchanged" >&5
-echo "$as_me: $ac_file is unchanged" >&6;}
-    else
-      rm -f $ac_file
-      mv "$tmp/config.h" $ac_file
-    fi
-  else
-    echo "/* $configure_input  */"
-    cat "$ac_result"
-  fi
-  rm -f "$tmp/out12"
- ;;
-
-
-  esac
-
-done # for ac_tag
-
-
-{ (exit 0); exit 0; }
-_ACEOF
-chmod +x $CONFIG_STATUS
-ac_clean_files=$ac_clean_files_save
-
-
-# configure is writing to config.log, and then calls config.status.
-# config.status does its own redirection, appending to config.log.
-# Unfortunately, on DOS this fails, as config.log is still kept open
-# by configure, so config.status won't be able to write to it; its
-# output is simply discarded.  So we exec the FD to /dev/null,
-# effectively closing config.log, so it can be properly (re)opened and
-# appended to by config.status.  When coming back to configure, we
-# need to make the FD available again.
-if test "$no_create" != yes; then
-  ac_cs_success=:
-  ac_config_status_args=
-  test "$silent" = yes &&
-    ac_config_status_args="$ac_config_status_args --quiet"
-  exec 5>/dev/null
-  $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false
-  exec 5>>config.log
-  # Use ||, not &&, to avoid exiting from the if with $? = 1, which
-  # would make configure fail if this is the last instruction.
-  $ac_cs_success || { (exit 1); exit 1; }
-fi
-
-

+ 21 - 32
src/c++/task-controller/configure.ac

@@ -1,7 +1,3 @@
-#                                               -*- Autoconf -*-
-# Process this file with autoconf to produce a configure script.
-
-#
 # Licensed to the Apache Software Foundation (ASF) under one
 # or more contributor license agreements.  See the NOTICE file
 # distributed with this work for additional information
@@ -18,49 +14,42 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 #
+#                                               -*- Autoconf -*-
+# Process this file with autoconf to produce a configure script.
 
 AC_PREREQ(2.59)
-AC_INIT([task-controller],[0.1])
+AC_INIT(linux-task-controller, 1.0.0, mapreduce-dev@hadoop.apache.org)
+AC_GNU_SOURCE
+AC_SYS_LARGEFILE
+
+AM_INIT_AUTOMAKE([subdir-objects foreign no-dist])
+
+AC_CONFIG_SRCDIR([impl/task-controller.c])
+AC_CONFIG_FILES([Makefile])
 
-#changing default prefix value to empty string, so that binary does not
-#gets installed within system
-AC_PREFIX_DEFAULT(.)
+AC_PREFIX_DEFAULT(`pwd`/../install)
 
-#add new argument called -with-confdir
-AC_ARG_WITH(confdir,[--with-confdir path to hadoop conf dir])
-AC_CONFIG_SRCDIR([task-controller.h])
-AC_CONFIG_HEADER([configuration.h])
+CHECK_INSTALL_CFLAG
+HADOOP_UTILS_SETUP
 
 # Checks for programs.
 AC_PROG_CC
+AM_PROG_CC_C_O
+AC_PROG_LIBTOOL
 
 # Checks for libraries.
 
 # Checks for header files.
-AC_HEADER_STDC
-AC_CHECK_HEADERS([stdlib.h string.h unistd.h fcntl.h])
-
-#check for HADOOP_CONF_DIR
+AC_LANG(C)
+AC_CHECK_HEADERS([unistd.h])
 
-
-if test "$with_confdir" != ""
-then
-AC_DEFINE_UNQUOTED(HADOOP_CONF_DIR,"$with_confdir")
-fi
 # Checks for typedefs, structures, and compiler characteristics.
+AC_HEADER_STDBOOL
 AC_C_CONST
-AC_TYPE_PID_T
-AC_TYPE_MODE_T
+AC_TYPE_OFF_T
 AC_TYPE_SIZE_T
+AC_FUNC_STRERROR_R
 
 # Checks for library functions.
-AC_FUNC_MALLOC
-AC_FUNC_REALLOC
-AC_FUNC_CHOWN
-AC_CHECK_FUNCS([strerror memset mkdir rmdir strdup])
-
-AC_CONFIG_FILES([Makefile])
+AC_CHECK_FUNCS([mkdir uname])
 AC_OUTPUT
-
-AC_HEADER_STDBOOL
-AC_PROG_MAKE_SET

+ 106 - 49
src/c++/task-controller/configuration.c → src/c++/task-controller/impl/configuration.c

@@ -16,10 +16,32 @@
  * limitations under the License.
  */
 
+// ensure we get the posix version of dirname by including this first
+#include <libgen.h> 
+
 #include "configuration.h"
+#include "task-controller.h"
+
+#include <errno.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
 
+#define INCREMENT_SIZE 1000
+#define MAX_SIZE 10
 
-char * hadoop_conf_dir;
+struct confentry {
+  const char *key;
+  const char *value;
+};
+
+struct configuration {
+  int size;
+  struct confentry **confdetails;
+};
 
 struct configuration config={.size=0, .confdetails=NULL};
 
@@ -41,39 +63,69 @@ void free_configurations() {
   config.size = 0;
 }
 
+/**
+ * Is the file/directory only writable by root.
+ * Returns 1 if true
+ */
+static int is_only_root_writable(const char *file) {
+  struct stat file_stat;
+  if (stat(file, &file_stat) != 0) {
+    fprintf(LOGFILE, "Can't stat file %s - %s\n", file, strerror(errno));
+    return 0;
+  }
+  if (file_stat.st_uid != 0) {
+    fprintf(LOGFILE, "File %s must be owned by root, but is owned by %d\n",
+            file, file_stat.st_uid);
+    return 0;
+  }
+  if ((file_stat.st_mode & (S_IWGRP | S_IWOTH)) != 0) {
+    fprintf(LOGFILE, 
+	    "File %s must not be world or group writable, but is %03o\n",
+	    file, file_stat.st_mode & (~S_IFMT));
+    return 0;
+  }
+  return 1;
+}
+
+/**
+ * Ensure that the configuration file and all of the containing directories
+ * are only writable by root. Otherwise, an attacker can change the 
+ * configuration and potentially cause damage.
+ * returns 0 if permissions are ok
+ */
+int check_configuration_permissions(const char* file_name) {
+  // copy the input so that we can modify it with dirname
+  char* dir = strdup(file_name);
+  char* buffer = dir;
+  do {
+    if (!is_only_root_writable(dir)) {
+      free(buffer);
+      return -1;
+    }
+    dir = dirname(dir);
+  } while (strcmp(dir, "/") != 0);
+  free(buffer);
+  return 0;
+}
+
 //function used to load the configurations present in the secure config
-void get_configs() {
+void read_config(const char* file_name) {
+  fprintf(LOGFILE, "Reading task controller config from %s\n" , file_name);
   FILE *conf_file;
   char *line;
   char *equaltok;
   char *temp_equaltok;
   size_t linesize = 1000;
   int size_read = 0;
-  int str_len = 0;
-  char *file_name = NULL;
-
-#ifndef HADOOP_CONF_DIR
-  str_len = strlen(CONF_FILE_PATTERN) + strlen(hadoop_conf_dir);
-  file_name = (char *) malloc(sizeof(char) * (str_len + 1));
-#else
-  str_len = strlen(CONF_FILE_PATTERN) + strlen(HADOOP_CONF_DIR);
-  file_name = (char *) malloc(sizeof(char) * (str_len + 1));
-#endif
 
   if (file_name == NULL) {
-    fprintf(LOGFILE, "Malloc failed :Out of memory \n");
-    return;
+    fprintf(LOGFILE, "Null configuration filename passed in\n");
+    exit(INVALID_CONFIG_FILE);
   }
-  memset(file_name,'\0',str_len +1);
-#ifndef HADOOP_CONF_DIR
-  snprintf(file_name,str_len, CONF_FILE_PATTERN, hadoop_conf_dir);
-#else
-  snprintf(file_name, str_len, CONF_FILE_PATTERN, HADOOP_CONF_DIR);
-#endif
 
-#ifdef DEBUG
-  fprintf(LOGFILE, "get_configs :Conf file name is : %s \n", file_name);
-#endif
+  #ifdef DEBUG
+    fprintf(LOGFILE, "read_config :Conf file name is : %s \n", file_name);
+  #endif
 
   //allocate space for ten configuration items.
   config.confdetails = (struct confentry **) malloc(sizeof(struct confentry *)
@@ -82,14 +134,13 @@ void get_configs() {
   conf_file = fopen(file_name, "r");
   if (conf_file == NULL) {
     fprintf(LOGFILE, "Invalid conf file provided : %s \n", file_name);
-    free(file_name);
-    return;
+    exit(INVALID_CONFIG_FILE);
   }
   while(!feof(conf_file)) {
     line = (char *) malloc(linesize);
     if(line == NULL) {
       fprintf(LOGFILE, "malloc failed while reading configuration file.\n");
-      goto cleanup;
+      exit(OUT_OF_MEMORY);
     }
     size_read = getline(&line,&linesize,conf_file);
     //feof returns true only after we read past EOF.
@@ -98,8 +149,9 @@ void get_configs() {
     if (size_read == -1) {
       if(!feof(conf_file)){
         fprintf(LOGFILE, "getline returned error.\n");
-        goto cleanup;
+        exit(INVALID_CONFIG_FILE);
       }else {
+        free(line);
         break;
       }
     }
@@ -125,9 +177,9 @@ void get_configs() {
       goto cleanup;
     }
 
-#ifdef DEBUG
-    fprintf(LOGFILE, "get_configs : Adding conf key : %s \n", equaltok);
-#endif
+    #ifdef DEBUG
+      fprintf(LOGFILE, "read_config : Adding conf key : %s \n", equaltok);
+    #endif
 
     memset(config.confdetails[config.size], 0, sizeof(struct confentry));
     config.confdetails[config.size]->key = (char *) malloc(
@@ -146,9 +198,9 @@ void get_configs() {
       continue;
     }
 
-#ifdef DEBUG
-    fprintf(LOGFILE, "get_configs : Adding conf value : %s \n", equaltok);
-#endif
+    #ifdef DEBUG
+      fprintf(LOGFILE, "read_config : Adding conf value : %s \n", equaltok);
+    #endif
 
     config.confdetails[config.size]->value = (char *) malloc(
             sizeof(char) * (strlen(equaltok)+1));
@@ -169,8 +221,12 @@ void get_configs() {
 
   //close the file
   fclose(conf_file);
+
+  if (config.size == 0) {
+    fprintf(LOGFILE, "Invalid configuration provided in %s\n", file_name);
+    exit(INVALID_CONFIG_FILE);
+  }
   //clean up allocated file name
-  free(file_name);
   return;
   //free spaces alloced.
   cleanup:
@@ -178,7 +234,6 @@ void get_configs() {
     free(line);
   }
   fclose(conf_file);
-  free(file_name);
   free_configurations();
   return;
 }
@@ -189,15 +244,8 @@ void get_configs() {
  * array, next time onwards used the populated array.
  *
  */
-const char * get_value(const char* key) {
+char * get_value(const char* key) {
   int count;
-  if (config.size == 0) {
-    get_configs();
-  }
-  if (config.size == 0) {
-    fprintf(LOGFILE, "Invalid configuration provided\n");
-    return NULL;
-  }
   for (count = 0; count < config.size; count++) {
     if (strcmp(config.confdetails[count]->key, key) == 0) {
       return strdup(config.confdetails[count]->value);
@@ -210,9 +258,9 @@ const char * get_value(const char* key) {
  * Function to return an array of values for a key.
  * Value delimiter is assumed to be a comma.
  */
-const char ** get_values(const char * key) {
-  const char ** toPass = NULL;
-  const char *value = get_value(key);
+char ** get_values(const char * key) {
+  char ** toPass = NULL;
+  char *value = get_value(key);
   char *tempTok = NULL;
   char *tempstr = NULL;
   int size = 0;
@@ -220,14 +268,14 @@ const char ** get_values(const char * key) {
 
   //first allocate any array of 10
   if(value != NULL) {
-    toPass = (const char **) malloc(sizeof(char *) * toPassSize);
+    toPass = (char **) malloc(sizeof(char *) * toPassSize);
     tempTok = strtok_r((char *)value, ",", &tempstr);
     while (tempTok != NULL) {
       toPass[size++] = tempTok;
       if(size == toPassSize) {
         toPassSize += MAX_SIZE;
-        toPass = (const char **) realloc(toPass,(sizeof(char *) *
-                                         (MAX_SIZE * toPassSize)));
+        toPass = (char **) realloc(toPass,(sizeof(char *) *
+                                           (MAX_SIZE * toPassSize)));
       }
       tempTok = strtok_r(NULL, ",", &tempstr);
     }
@@ -238,3 +286,12 @@ const char ** get_values(const char * key) {
   return toPass;
 }
 
+// free an entry set of values
+void free_values(char** values) {
+  if (*values != NULL) {
+    free(*values);
+  }
+  if (values != NULL) {
+    free(values);
+  }
+}

+ 16 - 36
src/c++/task-controller/configuration.h.in → src/c++/task-controller/impl/configuration.h

@@ -16,47 +16,27 @@
  * limitations under the License.
  */
 
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <errno.h>
-#include <unistd.h>
-
-#define INCREMENT_SIZE 1000
-#define MAX_SIZE 10
-
-//would be defined by the autoconfiguration tool
-#undef HADOOP_CONF_DIR
-
-struct confentry {
-  const char *key;
-  const char *value;
-};
+/**
+ * Ensure that the configuration file and all of the containing directories
+ * are only writable by root. Otherwise, an attacker can change the 
+ * configuration and potentially cause damage.
+ * returns 0 if permissions are ok
+ */
+int check_configuration_permissions(const char* file_name);
 
+// read the given configuration file
+void read_config(const char* config_file);
 
-struct configuration {
-  int size;
-  struct confentry **confdetails;
-};
+//method exposed to get the configurations
+char *get_value(const char* key);
 
-FILE *LOGFILE;
+//function to return array of values pointing to the key. Values are
+//comma seperated strings.
+char ** get_values(const char* key);
 
-#ifdef HADOOP_CONF_DIR
-  #define CONF_FILE_PATTERN "%s/taskcontroller.cfg"
-#else
-  #define CONF_FILE_PATTERN "%s/conf/taskcontroller.cfg"
-#endif
+// free the memory returned by get_values
+void free_values(char** values);
 
-extern struct configuration config;
-//configuration file contents
-#ifndef HADOOP_CONF_DIR
-  extern char *hadoop_conf_dir;
-#endif
-//method exposed to get the configurations
-const char * get_value(const char* key);
 //method to free allocated configuration
 void free_configurations();
 
-//function to return array of values pointing to the key. Values are
-//comma seperated strings.
-const char ** get_values(const char* key);

+ 191 - 0
src/c++/task-controller/impl/main.c

@@ -0,0 +1,191 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "configuration.h"
+#include "task-controller.h"
+
+#include <errno.h>
+#include <grp.h>
+#include <limits.h>
+#include <unistd.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+
+#define _STRINGIFY(X) #X
+#define STRINGIFY(X) _STRINGIFY(X)
+#define CONF_FILENAME "taskcontroller.cfg"
+
+void display_usage(FILE *stream) {
+  fprintf(stream,
+      "Usage: task-controller user command command-args\n");
+  fprintf(stream, "Commands:\n");
+  fprintf(stream, "   initialize job: %2d jobid credentials cmd args\n",
+	  INITIALIZE_JOB);
+  fprintf(stream, "   launch task:    %2d jobid taskid task-script\n",
+	  LAUNCH_TASK_JVM);
+  fprintf(stream, "   signal task:    %2d task-pid signal\n",
+	  SIGNAL_TASK);
+  fprintf(stream, "   delete as user: %2d relative-path\n",
+	  DELETE_AS_USER);
+  fprintf(stream, "   delete log:     %2d relative-path\n",
+	  DELETE_LOG_AS_USER);
+}
+
+int main(int argc, char **argv) {
+  //Minimum number of arguments required to run the task-controller
+  if (argc < 4) {
+    display_usage(stdout);
+    return INVALID_ARGUMENT_NUMBER;
+  }
+
+  LOGFILE = stdout;
+  int command;
+  const char * job_id = NULL;
+  const char * task_id = NULL;
+  const char * cred_file = NULL;
+  const char * script_file = NULL;
+  const char * current_dir = NULL;
+  const char * job_xml = NULL;
+
+  int exit_code = 0;
+
+  char * dir_to_be_deleted = NULL;
+
+  char *executable_file = get_executable();
+
+#ifndef HADOOP_CONF_DIR
+  #error HADOOP_CONF_DIR must be defined
+#endif
+
+  char *orig_conf_file = STRINGIFY(HADOOP_CONF_DIR) "/" CONF_FILENAME;
+  char *conf_file = realpath(orig_conf_file, NULL);
+
+  if (conf_file == NULL) {
+    fprintf(LOGFILE, "Configuration file %s not found.\n", orig_conf_file);
+    return INVALID_CONFIG_FILE;
+  }
+  if (check_configuration_permissions(conf_file) != 0) {
+    return INVALID_CONFIG_FILE;
+  }
+  read_config(conf_file);
+  free(conf_file);
+
+  // look up the task tracker group in the config file
+  char *tt_group = get_value(TT_GROUP_KEY);
+  if (tt_group == NULL) {
+    fprintf(LOGFILE, "Can't get configured value for %s.\n", TT_GROUP_KEY);
+    exit(INVALID_CONFIG_FILE);
+  }
+  struct group *group_info = getgrnam(tt_group);
+  if (group_info == NULL) {
+    fprintf(LOGFILE, "Can't get group information for %s - %s.\n", tt_group,
+            strerror(errno));
+    exit(INVALID_CONFIG_FILE);
+  }
+  set_tasktracker_uid(getuid(), group_info->gr_gid);
+  // if we are running from a setuid executable, make the real uid root
+  setuid(0);
+  // set the real and effective group id to the task tracker group
+  setgid(group_info->gr_gid);
+
+  if (check_taskcontroller_permissions(executable_file) != 0) {
+    fprintf(LOGFILE, "Invalid permissions on task-controller binary.\n");
+    return INVALID_TASKCONTROLLER_PERMISSIONS;
+  }
+
+  //checks done for user name
+  if (argv[optind] == NULL) {
+    fprintf(LOGFILE, "Invalid user name \n");
+    return INVALID_USER_NAME;
+  }
+  int ret = set_user(argv[optind]);
+  if (ret != 0) {
+    return ret;
+  }
+
+  optind = optind + 1;
+  command = atoi(argv[optind++]);
+
+  fprintf(LOGFILE, "main : command provided %d\n",command);
+  fprintf(LOGFILE, "main : user is %s\n", user_detail->pw_name);
+
+  switch (command) {
+  case INITIALIZE_JOB:
+    if (argc < 7) {
+      fprintf(LOGFILE, "Too few arguments (%d vs 7) for initialize job\n",
+	      argc);
+      return INVALID_ARGUMENT_NUMBER;
+    }
+    job_id = argv[optind++];
+    cred_file = argv[optind++];
+    job_xml = argv[optind++];
+    exit_code = initialize_job(user_detail->pw_name, job_id, cred_file,
+                               job_xml, argv + optind);
+    break;
+  case LAUNCH_TASK_JVM:
+    if (argc < 7) {
+      fprintf(LOGFILE, "Too few arguments (%d vs 7) for launch task\n",
+	      argc);
+      return INVALID_ARGUMENT_NUMBER;
+    }
+    job_id = argv[optind++];
+    task_id = argv[optind++];
+    current_dir = argv[optind++];
+    script_file = argv[optind++];
+    exit_code = run_task_as_user(user_detail->pw_name, job_id, task_id, 
+                                 current_dir, script_file);
+    break;
+  case SIGNAL_TASK:
+    if (argc < 5) {
+      fprintf(LOGFILE, "Too few arguments (%d vs 5) for signal task\n",
+	      argc);
+      return INVALID_ARGUMENT_NUMBER;
+    } else {
+      char* end_ptr = NULL;
+      char* option = argv[optind++];
+      int task_pid = strtol(option, &end_ptr, 10);
+      if (option == end_ptr || *end_ptr != '\0') {
+        fprintf(LOGFILE, "Illegal argument for task pid %s\n", option);
+        return INVALID_ARGUMENT_NUMBER;
+      }
+      option = argv[optind++];
+      int signal = strtol(option, &end_ptr, 10);
+      if (option == end_ptr || *end_ptr != '\0') {
+        fprintf(LOGFILE, "Illegal argument for signal %s\n", option);
+        return INVALID_ARGUMENT_NUMBER;
+      }
+      exit_code = signal_user_task(user_detail->pw_name, task_pid, signal);
+    }
+    break;
+  case DELETE_AS_USER:
+    dir_to_be_deleted = argv[optind++];
+    exit_code= delete_as_user(user_detail->pw_name, dir_to_be_deleted);
+    break;
+  case DELETE_LOG_AS_USER:
+    dir_to_be_deleted = argv[optind++];
+    exit_code= delete_as_user(user_detail->pw_name, dir_to_be_deleted);
+    break;
+  default:
+    exit_code = INVALID_COMMAND_PROVIDED;
+  }
+  fclose(LOGFILE);
+  return exit_code;
+}

+ 1044 - 0
src/c++/task-controller/impl/task-controller.c

@@ -0,0 +1,1044 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "configuration.h"
+#include "task-controller.h"
+
+#include <dirent.h>
+#include <fcntl.h>
+#include <fts.h>
+#include <errno.h>
+#include <grp.h>
+#include <unistd.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+
+#define USER_DIR_PATTERN "%s/taskTracker/%s"
+
+#define TT_JOB_DIR_PATTERN USER_DIR_PATTERN "/jobcache/%s"
+
+#define ATTEMPT_DIR_PATTERN TT_JOB_DIR_PATTERN "/%s/work"
+
+#define TASK_SCRIPT "taskjvm.sh"
+
+#define TT_LOCAL_TASK_DIR_PATTERN    "%s/taskTracker/%s/jobcache/%s/%s"
+
+#define TT_SYS_DIR_KEY "mapred.local.dir"
+
+#define TT_LOG_DIR_KEY "hadoop.log.dir"
+
+#define JOB_FILENAME "job.xml"
+
+#define CREDENTIALS_FILENAME "jobToken"
+
+#define MIN_USERID_KEY "min.user.id"
+
+static const int DEFAULT_MIN_USERID = 1000;
+
+#define BANNED_USERS_KEY "banned.users"
+
+static const char* DEFAULT_BANNED_USERS[] = {"mapred", "hdfs", "bin", 0};
+
+//struct to store the user details
+struct passwd *user_detail = NULL;
+
+FILE* LOGFILE = NULL;
+
+static uid_t tt_uid = -1;
+static gid_t tt_gid = -1;
+
+void set_tasktracker_uid(uid_t user, gid_t group) {
+  tt_uid = user;
+  tt_gid = group;
+}
+
+/**
+ * get the executable filename.
+ */
+char* get_executable() {
+  char buffer[PATH_MAX];
+  snprintf(buffer, PATH_MAX, "/proc/%u/exe", getpid());
+  char *filename = malloc(PATH_MAX);
+  ssize_t len = readlink(buffer, filename, PATH_MAX);
+  if (len == -1) {
+    fprintf(stderr, "Can't get executable name from %s - %s\n", buffer,
+            strerror(errno));
+    exit(-1);
+  } else if (len >= PATH_MAX) {
+    fprintf(LOGFILE, "Executable name %.*s is longer than %d characters.\n",
+            PATH_MAX, filename, PATH_MAX);
+    exit(-1);
+  }
+  filename[len] = '\0';
+  return filename;
+}
+
+/**
+ * Check the permissions on taskcontroller to make sure that security is
+ * promisable. For this, we need task-controller binary to
+ *    * be user-owned by root
+ *    * be group-owned by a configured special group.
+ *    * others do not have any permissions
+ *    * be setuid/setgid
+ */
+int check_taskcontroller_permissions(char *executable_file) {
+
+  errno = 0;
+  char * resolved_path = realpath(executable_file, NULL);
+  if (resolved_path == NULL) {
+    fprintf(LOGFILE,
+        "Error resolving the canonical name for the executable : %s!",
+        strerror(errno));
+    return -1;
+  }
+
+  struct stat filestat;
+  errno = 0;
+  if (stat(resolved_path, &filestat) != 0) {
+    fprintf(LOGFILE, 
+            "Could not stat the executable : %s!.\n", strerror(errno));
+    return -1;
+  }
+
+  uid_t binary_euid = filestat.st_uid; // Binary's user owner
+  gid_t binary_gid = filestat.st_gid; // Binary's group owner
+
+  // Effective uid should be root
+  if (binary_euid != 0) {
+    fprintf(LOGFILE,
+        "The task-controller binary should be user-owned by root.\n");
+    return -1;
+  }
+
+  if (binary_gid != getgid()) {
+    fprintf(LOGFILE, "The configured tasktracker group %d is different from"
+            " the group of the executable %d\n", getgid(), binary_gid);
+    return -1;
+  }
+
+  // check others do not have read/write/execute permissions
+  if ((filestat.st_mode & S_IROTH) == S_IROTH || (filestat.st_mode & S_IWOTH)
+      == S_IWOTH || (filestat.st_mode & S_IXOTH) == S_IXOTH) {
+    fprintf(LOGFILE,
+            "The task-controller binary should not have read or write or"
+            " execute for others.\n");
+    return -1;
+  }
+
+  // Binary should be setuid/setgid executable
+  if ((filestat.st_mode & S_ISUID) == 0) {
+    fprintf(LOGFILE, "The task-controller binary should be set setuid.\n");
+    return -1;
+  }
+
+  return 0;
+}
+
+/**
+ * Change the effective user id to limit damage.
+ */
+static int change_effective_user(uid_t user, gid_t group) {
+  if (geteuid() == user) {
+    return 0;
+  }
+  if (seteuid(0) != 0) {
+    return -1;
+  }
+  if (setegid(group) != 0) {
+    fprintf(LOGFILE, "Failed to set effective group id %d - %s\n", group,
+            strerror(errno));
+    return -1;
+  }
+  if (seteuid(user) != 0) {
+    fprintf(LOGFILE, "Failed to set effective user id %d - %s\n", user,
+            strerror(errno));
+    return -1;
+  }
+  return 0;
+}
+
+/**
+ * Change the real and effective user and group to abandon the super user
+ * priviledges.
+ */
+int change_user(uid_t user, gid_t group) {
+  if (user == getuid() && user == geteuid() && 
+      group == getgid() && group == getegid()) {
+    return 0;
+  }
+
+  if (seteuid(0) != 0) {
+    fprintf(LOGFILE, "unable to reacquire root - %s\n", strerror(errno));
+    fprintf(LOGFILE, "Real: %d:%d; Effective: %d:%d\n",
+	    getuid(), getgid(), geteuid(), getegid());
+    return SETUID_OPER_FAILED;
+  }
+  if (setgid(group) != 0) {
+    fprintf(LOGFILE, "unable to set group to %d - %s\n", group, 
+            strerror(errno));
+    fprintf(LOGFILE, "Real: %d:%d; Effective: %d:%d\n",
+	    getuid(), getgid(), geteuid(), getegid());
+    return SETUID_OPER_FAILED;
+  }
+  if (setuid(user) != 0) {
+    fprintf(LOGFILE, "unable to set user to %d - %s\n", user, strerror(errno));
+    fprintf(LOGFILE, "Real: %d:%d; Effective: %d:%d\n",
+	    getuid(), getgid(), geteuid(), getegid());
+    return SETUID_OPER_FAILED;
+  }
+
+  return 0;
+}
+
+/**
+ * Utility function to concatenate argB to argA using the concat_pattern.
+ */
+char *concatenate(char *concat_pattern, char *return_path_name, 
+                  int numArgs, ...) {
+  va_list ap;
+  va_start(ap, numArgs);
+  int strlen_args = 0;
+  char *arg = NULL;
+  int j;
+  for (j = 0; j < numArgs; j++) {
+    arg = va_arg(ap, char*);
+    if (arg == NULL) {
+      fprintf(LOGFILE, "One of the arguments passed for %s in null.\n",
+          return_path_name);
+      return NULL;
+    }
+    strlen_args += strlen(arg);
+  }
+  va_end(ap);
+
+  char *return_path = NULL;
+  int str_len = strlen(concat_pattern) + strlen_args + 1;
+
+  return_path = (char *) malloc(str_len);
+  if (return_path == NULL) {
+    fprintf(LOGFILE, "Unable to allocate memory for %s.\n", return_path_name);
+    return NULL;
+  }
+  va_start(ap, numArgs);
+  vsnprintf(return_path, str_len, concat_pattern, ap);
+  va_end(ap);
+  return return_path;
+}
+
+/**
+ * Get the job-directory path from tt_root, user name and job-id
+ */
+char *get_job_directory(const char * tt_root, const char *user,
+                        const char *jobid) {
+  return concatenate(TT_JOB_DIR_PATTERN, "job_dir_path", 3, tt_root, user,
+      jobid);
+}
+
+/**
+ * Get the user directory of a particular user
+ */
+char *get_user_directory(const char *tt_root, const char *user) {
+  return concatenate(USER_DIR_PATTERN, "user_dir_path", 2, tt_root, user);
+}
+
+char *get_job_work_directory(const char *job_dir) {
+  return concatenate("%s/work", "job work", 1, job_dir);
+}
+
+/**
+ * Get the attempt directory for the given attempt_id
+ */
+char *get_attempt_work_directory(const char *tt_root, const char *user,
+				 const char *job_id, const char *attempt_id) {
+  return concatenate(ATTEMPT_DIR_PATTERN, "attempt_dir_path", 4,
+                     tt_root, user, job_id, attempt_id);
+}
+
+char *get_task_launcher_file(const char* work_dir) {
+  return concatenate("%s/%s", "task launcher", 2, work_dir, TASK_SCRIPT);
+}
+
+/**
+ * Get the job log directory.
+ * Ensures that the result is a realpath and that it is underneath the 
+ * tt log root.
+ */
+char* get_job_log_directory(const char* jobid) {
+  char* log_dir = get_value(TT_LOG_DIR_KEY);
+  if (log_dir == NULL) {
+    fprintf(LOGFILE, "Log directory %s is not configured.\n", TT_LOG_DIR_KEY);
+    return NULL;
+  }
+  char *result = concatenate("%s/userlogs/%s", "job log dir", 2, log_dir, 
+                             jobid);
+  if (result == NULL) {
+    fprintf(LOGFILE, "failed to get memory in get_job_log_directory for %s"
+            " and %s\n", log_dir, jobid);
+  }
+  free(log_dir);
+  return result;
+}
+
+/*
+ * Get a user subdirectory.
+ */
+char *get_user_subdirectory(const char *tt_root,
+                            const char *user,
+                            const char *subdir) {
+  char * user_dir = get_user_directory(tt_root, user);
+  char * result = concatenate("%s/%s", "user subdir", 2,
+                              user_dir, subdir);
+  free(user_dir);
+  return result;
+}
+
+/**
+ * Ensure that the given path and all of the parent directories are created
+ * with the desired permissions.
+ */
+int mkdirs(const char* path, mode_t perm) {
+  char *buffer = strdup(path);
+  char *token;
+  int cwd = open("/", O_RDONLY);
+  if (cwd == -1) {
+    fprintf(LOGFILE, "Can't open / in %s - %s\n", path, strerror(errno));
+    free(buffer);
+    return -1;
+  }
+  for(token = strtok(buffer, "/"); token != NULL; token = strtok(NULL, "/")) {
+    if (mkdirat(cwd, token, perm) != 0) {
+      if (errno != EEXIST) {
+        fprintf(LOGFILE, "Can't create directory %s in %s - %s\n", 
+                token, path, strerror(errno));
+        close(cwd);
+        free(buffer);
+        return -1;
+      }
+    }
+    int new_dir = openat(cwd, token, O_RDONLY);
+    close(cwd);
+    cwd = new_dir;
+    if (cwd == -1) {
+      fprintf(LOGFILE, "Can't open %s in %s - %s\n", token, path, 
+              strerror(errno));
+      free(buffer);
+      return -1;
+    }
+  }
+  free(buffer);
+  close(cwd);
+  return 0;
+}
+
+/**
+ * Function to prepare the attempt directories for the task JVM.
+ * It creates the task work and log directories.
+ */
+static int create_attempt_directories(const char* user, const char *job_id, 
+					const char *task_id) {
+  // create dirs as 0750
+  const mode_t perms = S_IRWXU | S_IRGRP | S_IXGRP;
+  if (job_id == NULL || task_id == NULL || user == NULL) {
+    fprintf(LOGFILE, 
+            "Either task_id is null or the user passed is null.\n");
+    return -1;
+  }
+  int result = 0;
+
+  char **local_dir = get_values(TT_SYS_DIR_KEY);
+
+  if (local_dir == NULL) {
+    fprintf(LOGFILE, "%s is not configured.\n", TT_SYS_DIR_KEY);
+    return -1;
+  }
+
+  char **local_dir_ptr;
+  for(local_dir_ptr = local_dir; *local_dir_ptr != NULL; ++local_dir_ptr) {
+    char *task_dir = get_attempt_work_directory(*local_dir_ptr, user, job_id, 
+                                                task_id);
+    if (task_dir == NULL) {
+      free_values(local_dir);
+      return -1;
+    }
+    if (mkdirs(task_dir, perms) != 0) {
+      // continue on to create other task directories
+      free(task_dir);
+    } else {
+      free(task_dir);
+    }
+  }
+  free_values(local_dir);
+
+  // also make the directory for the task logs
+  char *job_task_name = malloc(strlen(job_id) + strlen(task_id) + 2);
+  if (job_task_name == NULL) {
+    fprintf(LOGFILE, "Malloc of job task name failed\n");
+    result = -1;
+  } else {
+    sprintf(job_task_name, "%s/%s", job_id, task_id);
+    char *log_dir = get_job_log_directory(job_task_name);
+    free(job_task_name);
+    if (log_dir == NULL) {
+      result = -1;
+    } else if (mkdirs(log_dir, perms) != 0) {
+      result = -1;
+    }
+    free(log_dir);
+  }
+  return result;
+}
+
+/**
+ * Load the user information for a given user name.
+ */
+static struct passwd* get_user_info(const char* user) {
+  int string_size = sysconf(_SC_GETPW_R_SIZE_MAX);
+  void* buffer = malloc(string_size + sizeof(struct passwd));
+  struct passwd *result = NULL;
+  if (getpwnam_r(user, buffer, buffer + sizeof(struct passwd), string_size,
+		 &result) != 0) {
+    free(buffer);
+    fprintf(LOGFILE, "Can't get user information %s - %s\n", user,
+	    strerror(errno));
+    return NULL;
+  }
+  return result;
+}
+
+/**
+ * Is the user a real user account?
+ * Checks:
+ *   1. Not root
+ *   2. UID is above the minimum configured.
+ *   3. Not in banned user list
+ * Returns NULL on failure
+ */
+struct passwd* check_user(const char *user) {
+  if (strcmp(user, "root") == 0) {
+    fprintf(LOGFILE, "Running as root is not allowed\n");
+    return NULL;
+  }
+  char *min_uid_str = get_value(MIN_USERID_KEY);
+  int min_uid = DEFAULT_MIN_USERID;
+  if (min_uid_str != NULL) {
+    char *end_ptr = NULL;
+    min_uid = strtol(min_uid_str, &end_ptr, 10);
+    if (min_uid_str == end_ptr || *end_ptr != '\0') {
+      fprintf(LOGFILE, "Illegal value of %s for %s in configuration\n", 
+	      min_uid_str, MIN_USERID_KEY);
+      free(min_uid_str);
+      return NULL;
+    }
+    free(min_uid_str);
+  }
+  struct passwd *user_info = get_user_info(user);
+  if (NULL == user_info) {
+    fprintf(LOGFILE, "User %s not found\n", user);
+    return NULL;
+  }
+  if (user_info->pw_uid < min_uid) {
+    fprintf(LOGFILE, "Requested user %s has id %d, which is below the "
+	    "minimum allowed %d\n", user, user_info->pw_uid, min_uid);
+    free(user_info);
+    return NULL;
+  }
+  char **banned_users = get_values(BANNED_USERS_KEY);
+  char **banned_user = (banned_users == NULL) ? 
+    (char**) DEFAULT_BANNED_USERS : banned_users;
+  for(; *banned_user; ++banned_user) {
+    if (strcmp(*banned_user, user) == 0) {
+      free(user_info);
+      fprintf(LOGFILE, "Requested user %s is banned\n", user);
+      return NULL;
+    }
+  }
+  if (banned_users != NULL) {
+    free_values(banned_users);
+  }
+  return user_info;
+}
+
+/**
+ * function used to populate and user_details structure.
+ */
+int set_user(const char *user) {
+  // free any old user
+  if (user_detail != NULL) {
+    free(user_detail);
+    user_detail = NULL;
+  }
+  user_detail = check_user(user);
+  if (user_detail == NULL) {
+    return -1;
+  }
+  return change_effective_user(user_detail->pw_uid, user_detail->pw_gid);
+}
+
+/**
+ * Change the ownership of the given file or directory to the new user.
+ */
+static int change_owner(const char* path, uid_t user, gid_t group) {
+  if (geteuid() == user && getegid() == group) {
+    return 0;
+  } else {
+    uid_t old_user = geteuid();
+    gid_t old_group = getegid();
+    if (change_effective_user(0, group) != 0) {
+      return -1;
+    }
+    if (chown(path, user, group) != 0) {
+      fprintf(LOGFILE, "Can't chown %s to %d:%d - %s\n", path, user, group,
+	      strerror(errno));
+      return -1;
+    }
+    return change_effective_user(old_user, old_group);
+  }
+}
+
+/**
+ * Create a top level directory for the user.
+ * It assumes that the parent directory is *not* writable by the user.
+ * It creates directories with 02700 permissions owned by the user
+ * and with the group set to the task tracker group.
+ * return non-0 on failure
+ */
+int create_directory_for_user(const char* path) {
+  // set 2750 permissions and group sticky bit
+  mode_t permissions = S_IRWXU | S_IRGRP | S_IXGRP | S_ISGID;
+  uid_t user = geteuid();
+  gid_t group = getegid();
+  int ret = 0;
+  ret = change_effective_user(tt_uid, tt_gid);
+  if (ret == 0) {
+    if (mkdir(path, permissions) == 0) {
+      // need to reassert the group sticky bit
+      if (chmod(path, permissions) != 0) {
+        fprintf(LOGFILE, "Can't chmod %s to add the sticky bit - %s\n",
+                path, strerror(errno));
+        ret = -1;
+      } else if (change_owner(path, user, tt_gid) != 0) {
+        ret = -1;
+      }
+    } else if (errno == EEXIST) {
+      struct stat file_stat;
+      if (stat(path, &file_stat) != 0) {
+        fprintf(LOGFILE, "Can't stat directory %s - %s\n", path, 
+                strerror(errno));
+        ret = -1;
+      } else {
+        if (file_stat.st_uid != user ||
+            file_stat.st_gid != tt_gid) {
+          fprintf(LOGFILE, "Directory %s owned by wrong user or group. "
+                  "Expected %d:%d and found %d:%d.\n",
+                  path, user, tt_gid, file_stat.st_uid, file_stat.st_gid);
+          ret = -1;
+        }
+      }
+    } else {
+      fprintf(LOGFILE, "Failed to create directory %s - %s\n", path,
+              strerror(errno));
+      ret = -1;
+    }
+  }
+  if (change_effective_user(user, group) != 0) {
+    ret = -1;
+  }
+  return ret;
+}
+                            
+/**
+ * Open a file as the tasktracker and return a file descriptor for it.
+ * Returns -1 on error
+ */
+static int open_file_as_task_tracker(const char* filename) {
+  uid_t user = geteuid();
+  gid_t group = getegid();
+  if (change_effective_user(tt_uid, tt_gid) != 0) {
+    return -1;
+  }
+  int result = open(filename, O_RDONLY);
+  if (result == -1) {
+    fprintf(LOGFILE, "Can't open file %s as task tracker - %s\n", filename,
+	    strerror(errno));
+  }
+  if (change_effective_user(user, group)) {
+    result = -1;
+  }
+  return result;
+}
+
+/**
+ * Copy a file from a fd to a given filename.
+ * The new file must not exist and it is created with permissions perm.
+ * The input stream is closed.
+ * Return 0 if everything is ok.
+ */
+static int copy_file(int input, const char* in_filename, 
+		     const char* out_filename, mode_t perm) {
+  const int buffer_size = 128*1024;
+  char buffer[buffer_size];
+  int out_fd = open(out_filename, O_WRONLY|O_CREAT|O_EXCL|O_NOFOLLOW, perm);
+  if (out_fd == -1) {
+    fprintf(LOGFILE, "Can't open %s for output - %s\n", out_filename, 
+            strerror(errno));
+    return -1;
+  }
+  ssize_t len = read(input, buffer, buffer_size);
+  while (len > 0) {
+    ssize_t pos = 0;
+    while (pos < len) {
+      ssize_t write_result = write(out_fd, buffer + pos, len - pos);
+      if (write_result <= 0) {
+	fprintf(LOGFILE, "Error writing to %s - %s\n", out_filename,
+		strerror(errno));
+	close(out_fd);
+	return -1;
+      }
+      pos += write_result;
+    }
+    len = read(input, buffer, buffer_size);
+  }
+  if (len < 0) {
+    fprintf(LOGFILE, "Failed to read file %s - %s\n", in_filename, 
+	    strerror(errno));
+    close(out_fd);
+    return -1;
+  }
+  if (close(out_fd) != 0) {
+    fprintf(LOGFILE, "Failed to close file %s - %s\n", out_filename, 
+	    strerror(errno));
+    return -1;
+  }
+  close(input);
+  return 0;
+}
+
+/**
+ * Function to initialize the user directories of a user.
+ */
+int initialize_user(const char *user) {
+  char **local_dir = get_values(TT_SYS_DIR_KEY);
+  if (local_dir == NULL) {
+    fprintf(LOGFILE, "%s is not configured.\n", TT_SYS_DIR_KEY);
+    return INVALID_TT_ROOT;
+  }
+
+  char *user_dir;
+  char **local_dir_ptr = local_dir;
+  int failed = 0;
+  for(local_dir_ptr = local_dir; *local_dir_ptr != 0; ++local_dir_ptr) {
+    user_dir = get_user_directory(*local_dir_ptr, user);
+    if (user_dir == NULL) {
+      fprintf(LOGFILE, "Couldn't get userdir directory for %s.\n", user);
+      failed = 1;
+      break;
+    }
+    if (create_directory_for_user(user_dir) != 0) {
+      failed = 1;
+    }
+    free(user_dir);
+  }
+  free_values(local_dir);
+  return failed ? INITIALIZE_USER_FAILED : 0;
+}
+
+/**
+ * Function to prepare the job directories for the task JVM.
+ */
+int initialize_job(const char *user, const char *jobid, 
+		   const char* credentials, const char* job_xml,
+                   char* const* args) {
+  if (jobid == NULL || user == NULL) {
+    fprintf(LOGFILE, "Either jobid is null or the user passed is null.\n");
+    return INVALID_ARGUMENT_NUMBER;
+  }
+
+  // create the user directory
+  int result = initialize_user(user);
+  if (result != 0) {
+    return result;
+  }
+
+  // create the log directory for the job
+  char *job_log_dir = get_job_log_directory(jobid);
+  if (job_log_dir == NULL) {
+    return -1;
+  }
+  result = create_directory_for_user(job_log_dir);
+  free(job_log_dir);
+  if (result != 0) {
+    return -1;
+  }
+
+  // open up the credentials file
+  int cred_file = open_file_as_task_tracker(credentials);
+  if (cred_file == -1) {
+    return -1;
+  }
+
+  int job_file = open_file_as_task_tracker(job_xml);
+  if (job_file == -1) {
+    return -1;
+  }
+
+  // give up root privs
+  if (change_user(user_detail->pw_uid, user_detail->pw_gid) != 0) {
+    return -1;
+  }
+
+  // 750
+  mode_t permissions = S_IRWXU | S_IRGRP | S_IXGRP;
+  char **tt_roots = get_values(TT_SYS_DIR_KEY);
+
+  if (tt_roots == NULL) {
+    return INVALID_CONFIG_FILE;
+  }
+
+  char **tt_root;
+  char *primary_job_dir = NULL;
+  for(tt_root=tt_roots; *tt_root != NULL; ++tt_root) {
+    char *job_dir = get_job_directory(*tt_root, user, jobid);
+    if (job_dir == NULL) {
+      // try the next one
+    } else if (mkdirs(job_dir, permissions) != 0) {
+      free(job_dir);
+    } else if (primary_job_dir == NULL) {
+      primary_job_dir = job_dir;
+    } else {
+      free(job_dir);
+    }
+  }
+  free_values(tt_roots);
+  if (primary_job_dir == NULL) {
+    fprintf(LOGFILE, "Did not create any job directories\n");
+    return -1;
+  }
+
+  char *cred_file_name = concatenate("%s/%s", "cred file", 2,
+				     primary_job_dir, CREDENTIALS_FILENAME);
+  if (cred_file_name == NULL) {
+    return -1;
+  }
+  if (copy_file(cred_file, credentials, cred_file_name, S_IRUSR|S_IWUSR) != 0){
+    return -1;
+  }
+  char *job_file_name = concatenate("%s/%s", "job file", 2,
+				     primary_job_dir, JOB_FILENAME);
+  if (job_file_name == NULL) {
+    return -1;
+  }
+  if (copy_file(job_file, job_xml, job_file_name,
+        S_IRUSR|S_IWUSR|S_IRGRP) != 0) {
+    return -1;
+  }
+  fclose(stdin);
+  fflush(LOGFILE);
+  if (LOGFILE != stdout) {
+    fclose(stdout);
+  }
+  fclose(stderr);
+  chdir(primary_job_dir);
+  execvp(args[0], args);
+  fprintf(LOGFILE, "Failure to exec job initialization process - %s\n",
+	  strerror(errno));
+  return -1;
+}
+
+/*
+ * Function used to launch a task as the provided user. It does the following :
+ * 1) Creates attempt work dir and log dir to be accessible by the child
+ * 2) Copies the script file from the TT to the work directory
+ * 3) Sets up the environment
+ * 4) Does an execlp on the same in order to replace the current image with
+ *    task image.
+ */
+int run_task_as_user(const char *user, const char *job_id, 
+                     const char *task_id, const char *work_dir,
+                     const char *script_name) {
+  int exit_code = -1;
+  char *task_script_path = NULL;
+  if (create_attempt_directories(user, job_id, task_id) != 0) {
+    goto cleanup;
+  }
+  int task_file_source = open_file_as_task_tracker(script_name);
+  if (task_file_source == -1) {
+    goto cleanup;
+  }
+  task_script_path = get_task_launcher_file(work_dir);
+  if (task_script_path == NULL) {
+    exit_code = OUT_OF_MEMORY;
+    goto cleanup;
+  }
+  if (copy_file(task_file_source, script_name,task_script_path,S_IRWXU) != 0) {
+    goto cleanup;
+  }
+
+  //change the user
+  fcloseall();
+  umask(0027);
+  if (chdir(work_dir) != 0) {
+    fprintf(LOGFILE, "Can't change directory to %s -%s\n", work_dir,
+	    strerror(errno));
+    goto cleanup;
+  }
+  if (change_user(user_detail->pw_uid, user_detail->pw_gid) != 0) {
+    exit_code = SETUID_OPER_FAILED;
+    goto cleanup;
+  }
+
+  if (execlp(task_script_path, task_script_path, NULL) != 0) {
+    fprintf(LOGFILE, "Couldn't execute the task jvm file %s - %s", 
+            task_script_path, strerror(errno));
+    exit_code = UNABLE_TO_EXECUTE_TASK_SCRIPT;
+    goto cleanup;
+  }
+  exit_code = 0;
+
+ cleanup:
+  free(task_script_path);
+  return exit_code;
+}
+
+/**
+ * Function used to signal a task launched by the user.
+ * The function sends appropriate signal to the process group
+ * specified by the task_pid.
+ */
+int signal_user_task(const char *user, int pid, int sig) {
+  if(pid <= 0) {
+    return INVALID_TASK_PID;
+  }
+
+  if (change_user(user_detail->pw_uid, user_detail->pw_gid) != 0) {
+    return SETUID_OPER_FAILED;
+  }
+
+  //Don't continue if the process-group is not alive anymore.
+  int has_group = 1;
+  if (kill(-pid,0) < 0) {
+    if (kill(pid, 0) < 0) {
+      if (errno == ESRCH) {
+        return INVALID_TASK_PID;
+      }
+      fprintf(LOGFILE, "Error signalling task %d with %d - %s\n",
+	      pid, sig, strerror(errno));
+      return -1;
+    } else {
+      has_group = 0;
+    }
+  }
+
+  if (kill((has_group ? -1 : 1) * pid, sig) < 0) {
+    if(errno != ESRCH) {
+      fprintf(LOGFILE, 
+              "Error signalling process group %d with signal %d - %s\n", 
+              -pid, sig, strerror(errno));
+      return UNABLE_TO_KILL_TASK;
+    } else {
+      return INVALID_TASK_PID;
+    }
+  }
+  fprintf(LOGFILE, "Killing process %s%d with %d\n",
+	  (has_group ? "group " :""), pid, sig);
+  return 0;
+}
+
+/**
+ * Delete a final directory as the task tracker user.
+ */
+static int rmdir_as_tasktracker(const char* path) {
+  int user_uid = geteuid();
+  int user_gid = getegid();
+  int ret = change_effective_user(tt_uid, tt_gid);
+  if (ret == 0) {
+    if (rmdir(path) != 0) {
+      fprintf(LOGFILE, "rmdir of %s failed - %s\n", path, strerror(errno));
+      ret = -1;
+    }
+  }
+  // always change back
+  if (change_effective_user(user_uid, user_gid) != 0) {
+    ret = -1;
+  }
+  return ret;
+}
+
+/**
+ * Recursively delete the given path.
+ * full_path : the path to delete
+ * needs_tt_user: the top level directory must be deleted by the tt user.
+ */
+static int delete_path(const char *full_path, 
+                       int needs_tt_user) {
+  int exit_code = 0;
+
+  if (full_path == NULL) {
+    fprintf(LOGFILE, "Path is null\n");
+    exit_code = UNABLE_TO_BUILD_PATH; // may be malloc failed
+  } else {
+    char *(paths[]) = {strdup(full_path), 0};
+    if (paths[0] == NULL) {
+      fprintf(LOGFILE, "Malloc failed in delete_path\n");
+      return -1;
+    }
+    // check to make sure the directory exists
+    if (access(full_path, F_OK) != 0) {
+      if (errno == ENOENT) {
+	free(paths[0]);
+	return 0;
+      }
+    }
+    FTS* tree = fts_open(paths, FTS_PHYSICAL | FTS_XDEV, NULL);
+    FTSENT* entry = NULL;
+    int ret = 0;
+
+    if (tree == NULL) {
+      fprintf(LOGFILE,
+              "Cannot open file traversal structure for the path %s:%s.\n", 
+              full_path, strerror(errno));
+      free(paths[0]);
+      return -1;
+    }
+    while (((entry = fts_read(tree)) != NULL) && exit_code == 0) {
+      switch (entry->fts_info) {
+
+      case FTS_DP:        // A directory being visited in post-order
+	if (!needs_tt_user ||
+            strcmp(entry->fts_path, full_path) != 0) {
+	  if (rmdir(entry->fts_accpath) != 0) {
+	    fprintf(LOGFILE, "Couldn't delete directory %s - %s\n", 
+		    entry->fts_path, strerror(errno));
+	    exit_code = -1;
+	  }
+	}
+        break;
+
+      case FTS_F:         // A regular file
+      case FTS_SL:        // A symbolic link
+      case FTS_SLNONE:    // A broken symbolic link
+      case FTS_DEFAULT:   // Unknown type of file
+        if (unlink(entry->fts_accpath) != 0) {
+          fprintf(LOGFILE, "Couldn't delete file %s - %s\n", entry->fts_path,
+                  strerror(errno));
+	  exit_code = -1;
+        }
+        break;
+
+      case FTS_DNR:       // Unreadable directory
+        fprintf(LOGFILE, "Unreadable directory %s. Skipping..\n", 
+                entry->fts_path);
+        break;
+
+      case FTS_D:         // A directory in pre-order
+        // if the directory isn't readable, chmod it
+        if ((entry->fts_statp->st_mode & 0200) == 0) {
+          fprintf(LOGFILE, "Unreadable directory %s, chmoding.\n", 
+                  entry->fts_path);
+          if (chmod(entry->fts_accpath, 0700) != 0) {
+            fprintf(LOGFILE, "Error chmoding %s - %s, continuing\n", 
+                    entry->fts_path, strerror(errno));
+          }
+	}
+        break;
+
+      case FTS_NS:        // A file with no stat(2) information
+        // usually a root directory that doesn't exist
+        fprintf(LOGFILE, "Directory not found %s\n", entry->fts_path);
+	break;
+
+      case FTS_DC:        // A directory that causes a cycle
+      case FTS_DOT:       // A dot directory
+      case FTS_NSOK:      // No stat information requested
+        break;
+
+      case FTS_ERR:       // Error return
+        fprintf(LOGFILE, "Error traversing directory %s - %s\n", 
+                entry->fts_path, strerror(entry->fts_errno));
+        exit_code = -1;
+        break;
+        break;
+      default:
+        exit_code = -1;
+        break;
+      }
+    }
+    ret = fts_close(tree);
+    if (exit_code == 0 && ret != 0) {
+      fprintf(LOGFILE, "Error in fts_close while deleting %s\n", full_path);
+      exit_code = -1;
+    }
+    if (needs_tt_user) {
+      // If the delete failed, try a final rmdir as root on the top level.
+      // That handles the case where the top level directory is in a directory
+      // that is owned by the task tracker.
+      exit_code = rmdir_as_tasktracker(full_path);
+    }
+    free(paths[0]);
+  }
+  return exit_code;
+}
+
+/**
+ * Delete the given directory as the user from each of the tt_root directories
+ * user: the user doing the delete
+ * subdir: the subdir to delete
+ */
+int delete_as_user(const char *user,
+                   const char *subdir) {
+  int ret = 0;
+
+  char** tt_roots = get_values(TT_SYS_DIR_KEY);
+  char** ptr;
+  if (tt_roots == NULL || *tt_roots == NULL) {
+    fprintf(LOGFILE, "No %s defined in the configuration\n", TT_SYS_DIR_KEY);
+    return INVALID_CONFIG_FILE;
+  }
+
+  // do the delete
+  for(ptr = tt_roots; *ptr != NULL; ++ptr) {
+    char* full_path = get_user_subdirectory(*ptr, user, subdir);
+    if (full_path == NULL) {
+      return -1;
+    }
+    int this_ret = delete_path(full_path, strlen(subdir) == 0);
+    free(full_path);
+    // delete as much as we can, but remember the error
+    if (this_ret != 0) {
+      ret = this_ret;
+    }
+  }
+  free_values(tt_roots);
+  return ret;
+}
+
+/**
+ * delete a given log directory
+ */
+int delete_log_directory(const char *subdir) {
+  char* log_subdir = get_job_log_directory(subdir);
+  int ret = -1;
+  if (log_subdir != NULL) {
+    ret = delete_path(log_subdir, strchr(subdir, '/') == NULL);
+  }
+  free(log_subdir);
+  return ret;
+}

+ 149 - 0
src/c++/task-controller/impl/task-controller.h

@@ -0,0 +1,149 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <pwd.h>
+#include <stdio.h>
+#include <sys/types.h>
+
+//command definitions
+enum command {
+  INITIALIZE_JOB = 0,
+  LAUNCH_TASK_JVM = 1,
+  SIGNAL_TASK = 2,
+  DELETE_AS_USER = 3,
+  DELETE_LOG_AS_USER = 4
+};
+
+enum errorcodes {
+  INVALID_ARGUMENT_NUMBER = 1,
+  INVALID_USER_NAME, //2
+  INVALID_COMMAND_PROVIDED, //3
+  SUPER_USER_NOT_ALLOWED_TO_RUN_TASKS, //4
+  INVALID_TT_ROOT, //5
+  SETUID_OPER_FAILED, //6
+  UNABLE_TO_EXECUTE_TASK_SCRIPT, //7
+  UNABLE_TO_KILL_TASK, //8
+  INVALID_TASK_PID, //9
+  ERROR_RESOLVING_FILE_PATH, //10
+  RELATIVE_PATH_COMPONENTS_IN_FILE_PATH, //11
+  UNABLE_TO_STAT_FILE, //12
+  FILE_NOT_OWNED_BY_TASKTRACKER, //13
+  PREPARE_ATTEMPT_DIRECTORIES_FAILED, //14
+  INITIALIZE_JOB_FAILED, //15
+  PREPARE_TASK_LOGS_FAILED, //16
+  INVALID_TT_LOG_DIR, //17
+  OUT_OF_MEMORY, //18
+  INITIALIZE_DISTCACHEFILE_FAILED, //19
+  INITIALIZE_USER_FAILED, //20
+  UNABLE_TO_BUILD_PATH, //21
+  INVALID_TASKCONTROLLER_PERMISSIONS, //22
+  PREPARE_JOB_LOGS_FAILED, //23
+  INVALID_CONFIG_FILE, // 24
+};
+
+#define TT_GROUP_KEY "mapreduce.tasktracker.group"
+
+extern struct passwd *user_detail;
+
+// the log file for error messages
+extern FILE *LOGFILE;
+
+// get the executable's filename
+char* get_executable();
+
+int check_taskcontroller_permissions(char *executable_file);
+
+/**
+ * delete a given log directory as a user
+ */
+int delete_log_directory(const char *log_dir);
+
+// initialize the job directory
+int initialize_job(const char *user, const char *jobid,
+                   const char *credentials, 
+                   const char *job_xml, char* const* args);
+
+// run the task as the user
+int run_task_as_user(const char * user, const char *jobid, const char *taskid,
+                     const char *work_dir, const char *script_name);
+
+// send a signal as the user
+int signal_user_task(const char *user, int pid, int sig);
+
+// delete a directory (or file) recursively as the user.
+int delete_as_user(const char *user,
+                   const char *dir_to_be_deleted);
+
+// set the task tracker's uid and gid
+void set_tasktracker_uid(uid_t user, gid_t group);
+
+/**
+ * Is the user a real user account?
+ * Checks:
+ *   1. Not root
+ *   2. UID is above the minimum configured.
+ *   3. Not in banned user list
+ * Returns NULL on failure
+ */
+struct passwd* check_user(const char *user);
+
+// set the user
+int set_user(const char *user);
+
+// methods to get the directories
+
+char *get_user_directory(const char *tt_root, const char *user);
+
+char *get_job_directory(const char * tt_root, const char *user,
+                        const char *jobid);
+
+char *get_attempt_work_directory(const char *tt_root, const char *user,
+				 const char *job_dir, const char *attempt_id);
+
+char *get_task_launcher_file(const char* work_dir);
+
+/**
+ * Get the job log directory.
+ * Ensures that the result is a realpath and that it is underneath the 
+ * tt log root.
+ */
+char* get_job_log_directory(const char* jobid);
+
+char *get_task_log_dir(const char *log_dir, const char *job_id, 
+                       const char *attempt_id);
+
+/**
+ * Ensure that the given path and all of the parent directories are created
+ * with the desired permissions.
+ */
+int mkdirs(const char* path, mode_t perm);
+
+/**
+ * Function to initialize the user directories of a user.
+ */
+int initialize_user(const char *user);
+
+/**
+ * Create a top level directory for the user.
+ * It assumes that the parent directory is *not* writable by the user.
+ * It creates directories with 02700 permissions owned by the user
+ * and with the group set to the task tracker group.
+ * return non-0 on failure
+ */
+int create_directory_for_user(const char* path);
+
+int change_user(uid_t user, gid_t group);

+ 0 - 242
src/c++/task-controller/main.c

@@ -1,242 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "task-controller.h"
-
-void open_log_file(const char *log_file) {
-  if (log_file == NULL) {
-    LOGFILE = stdout;
-  } else {
-    LOGFILE = fopen(log_file, "a");
-    if (LOGFILE == NULL) {
-      fprintf(stdout, "Unable to open LOGFILE : %s \n", log_file);
-      LOGFILE = stdout;
-    }
-    if (LOGFILE != stdout) {
-      if (chmod(log_file, S_IREAD | S_IEXEC | S_IWRITE | S_IROTH | S_IWOTH
-          | S_IRGRP | S_IWGRP) < 0) {
-        fprintf(stdout, "Unable to change permission of the log file %s \n",
-            log_file);
-        fclose(LOGFILE);
-        fprintf(stdout, "changing log file to stdout");
-        LOGFILE = stdout;
-      }
-    }
-  }
-}
-
-void display_usage(FILE *stream) {
-  fprintf(stream,
-      "Usage: task-controller [-l logfile] user command command-args\n");
-}
-
-/**
- * Check the permissions on taskcontroller to make sure that security is
- * promisable. For this, we need task-controller binary to
- *    * be user-owned by root
- *    * be group-owned by a configured special group.
- *    * others do not have any permissions
- *    * be setuid/setgid
- */
-int check_taskcontroller_permissions(char *executable_file) {
-
-  errno = 0;
-  char * resolved_path = (char *) canonicalize_file_name(executable_file);
-  if (resolved_path == NULL) {
-    fprintf(LOGFILE,
-        "Error resolving the canonical name for the executable : %s!",
-        strerror(errno));
-    return -1;
-  }
-
-  struct stat filestat;
-  errno = 0;
-  if (stat(resolved_path, &filestat) != 0) {
-    fprintf(LOGFILE, "Could not stat the executable : %s!.\n", strerror(errno));
-    return -1;
-  }
-
-  uid_t binary_euid = filestat.st_uid; // Binary's user owner
-  gid_t binary_egid = filestat.st_gid; // Binary's group owner
-
-  // Effective uid should be root
-  if (binary_euid != 0) {
-    fprintf(LOGFILE,
-        "The task-controller binary should be user-owned by root.\n");
-    return -1;
-  }
-
-  // Get the group entry for the special_group
-  errno = 0;
-  struct group *special_group_entry = getgrgid(binary_egid);
-  if (special_group_entry == NULL) {
-    fprintf(LOGFILE,
-      "Unable to get information for effective group of the binary : %s\n",
-      strerror(errno));
-    return -1;
-  }
-
-  char * binary_group = special_group_entry->gr_name;
-  // verify that the group name of the special group
-  // is same as the one in configuration
-  if (check_variable_against_config(TT_GROUP_KEY, binary_group) != 0) {
-    fprintf(LOGFILE,
-      "Group of the binary does not match with that in configuration\n");
-    return -1;
-  }
-
-  // check others do not have read/write/execute permissions
-  if ((filestat.st_mode & S_IROTH) == S_IROTH || (filestat.st_mode & S_IWOTH)
-      == S_IWOTH || (filestat.st_mode & S_IXOTH) == S_IXOTH) {
-    fprintf(LOGFILE,
-      "The task-controller binary should not have read or write or execute for others.\n");
-    return -1;
-  }
-
-  // Binary should be setuid/setgid executable
-  if ((filestat.st_mode & S_ISUID) != S_ISUID || (filestat.st_mode & S_ISGID)
-      != S_ISGID) {
-    fprintf(LOGFILE,
-        "The task-controller binary should be set setuid and setgid bits.\n");
-    return -1;
-  }
-
-  return 0;
-}
-
-int main(int argc, char **argv) {
-  int command;
-  int next_option = 0;
-  const char * job_id = NULL;
-  const char * task_id = NULL;
-  const char * tt_root = NULL;
-  const char *log_dir = NULL;
-  const char * unique_string = NULL;
-  int exit_code = 0;
-  const char * task_pid = NULL;
-  const char* const short_options = "l:";
-  const struct option long_options[] = { { "log", 1, NULL, 'l' }, { NULL, 0,
-      NULL, 0 } };
-
-  const char* log_file = NULL;
-  char * dir_to_be_deleted = NULL;
-
-  char *executable_file = argv[0];
-#ifndef HADOOP_CONF_DIR
-  hadoop_conf_dir = (char *) malloc (sizeof(char) *
-      (strlen(executable_file) - strlen(EXEC_PATTERN)) + 1);
-  strncpy(hadoop_conf_dir,executable_file,
-    (strlen(executable_file) - strlen(EXEC_PATTERN)));
-  hadoop_conf_dir[(strlen(executable_file) - strlen(EXEC_PATTERN))] = '\0';
-#endif
-  do {
-    next_option = getopt_long(argc, argv, short_options, long_options, NULL);
-    switch (next_option) {
-    case 'l':
-      log_file = optarg;
-    default:
-      break;
-    }
-  } while (next_option != -1);
-
-  open_log_file(log_file);
-
-  if (check_taskcontroller_permissions(executable_file) != 0) {
-    fprintf(LOGFILE, "Invalid permissions on task-controller binary.\n");
-    return INVALID_TASKCONTROLLER_PERMISSIONS;
-  }
-
-  //Minimum number of arguments required to run the task-controller
-  //command-name user command tt-root
-  if (argc < 3) {
-    display_usage(stdout);
-    return INVALID_ARGUMENT_NUMBER;
-  }
-
-  //checks done for user name
-  //checks done if the user is root or not.
-  if (argv[optind] == NULL) {
-    fprintf(LOGFILE, "Invalid user name \n");
-    return INVALID_USER_NAME;
-  }
-  if (get_user_details(argv[optind]) != 0) {
-    return INVALID_USER_NAME;
-  }
-  //implicit conversion to int instead of __gid_t and __uid_t
-  if (user_detail->pw_gid == 0 || user_detail->pw_uid == 0) {
-    fprintf(LOGFILE, "Cannot run tasks as super user\n");
-    return SUPER_USER_NOT_ALLOWED_TO_RUN_TASKS;
-  }
-  optind = optind + 1;
-  command = atoi(argv[optind++]);
-
-  fprintf(LOGFILE, "main : command provided %d\n",command);
-  fprintf(LOGFILE, "main : user is %s\n", user_detail->pw_name);
-
-  switch (command) {
-  case INITIALIZE_USER:
-    exit_code = initialize_user(user_detail->pw_name);
-    break;
-  case INITIALIZE_JOB:
-    job_id = argv[optind++];
-    exit_code = initialize_job(job_id, user_detail->pw_name);
-    break;
-  case INITIALIZE_DISTRIBUTEDCACHE_FILE:
-    tt_root = argv[optind++];
-    unique_string = argv[optind++];
-    exit_code = initialize_distributed_cache_file(tt_root, unique_string,
-        user_detail->pw_name);
-    break;
-  case LAUNCH_TASK_JVM:
-    tt_root = argv[optind++];
-    job_id = argv[optind++];
-    task_id = argv[optind++];
-    exit_code
-        = run_task_as_user(user_detail->pw_name, job_id, task_id, tt_root);
-    break;
-  case INITIALIZE_TASK:
-    job_id = argv[optind++];
-    task_id = argv[optind++];
-    exit_code = initialize_task(job_id, task_id, user_detail->pw_name);
-    break;
-  case TERMINATE_TASK_JVM:
-    task_pid = argv[optind++];
-    exit_code = kill_user_task(user_detail->pw_name, task_pid, SIGTERM);
-    break;
-  case KILL_TASK_JVM:
-    task_pid = argv[optind++];
-    exit_code = kill_user_task(user_detail->pw_name, task_pid, SIGKILL);
-    break;
-  case ENABLE_TASK_FOR_CLEANUP:
-    tt_root = argv[optind++];
-    job_id = argv[optind++];
-    dir_to_be_deleted = argv[optind++];
-    exit_code = enable_task_for_cleanup(tt_root, user_detail->pw_name, job_id,
-                                        dir_to_be_deleted);
-    break;
-  case ENABLE_JOB_FOR_CLEANUP:
-    tt_root = argv[optind++];
-    job_id = argv[optind++];
-    exit_code = enable_job_for_cleanup(tt_root, user_detail->pw_name, job_id);
-    break;
-  default:
-    exit_code = INVALID_COMMAND_PROVIDED;
-  }
-  fflush(LOGFILE);
-  fclose(LOGFILE);
-  return exit_code;
-}

+ 0 - 1270
src/c++/task-controller/task-controller.c

@@ -1,1270 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "task-controller.h"
-
-//struct to store the user details
-struct passwd *user_detail = NULL;
-
-//LOGFILE
-FILE *LOGFILE;
-
-//placeholder for global cleanup operations
-void cleanup() {
-  free_configurations();
-}
-
-//change the user to passed user for executing/killing tasks
-int change_user(const char * user) {
-  if (get_user_details(user) < 0) {
-    return -1;
-  }
-
-  if(initgroups(user_detail->pw_name, user_detail->pw_gid) != 0) {
-	  cleanup();
-	  return SETUID_OPER_FAILED;
-  }
-
-  errno = 0;
-
-  setgid(user_detail->pw_gid);
-  if (errno != 0) {
-    fprintf(LOGFILE, "unable to setgid : %s\n", strerror(errno));
-    cleanup();
-    return SETUID_OPER_FAILED;
-  }
-
-  setegid(user_detail->pw_gid);
-  if (errno != 0) {
-    fprintf(LOGFILE, "unable to setegid : %s\n", strerror(errno));
-    cleanup();
-    return SETUID_OPER_FAILED;
-  }
-
-  setuid(user_detail->pw_uid);
-  if (errno != 0) {
-    fprintf(LOGFILE, "unable to setuid : %s\n", strerror(errno));
-    cleanup();
-    return SETUID_OPER_FAILED;
-  }
-
-  seteuid(user_detail->pw_uid);
-  if (errno != 0) {
-    fprintf(LOGFILE, "unable to seteuid : %s\n", strerror(errno));
-    cleanup();
-    return SETUID_OPER_FAILED;
-  }
-  return 0;
-}
-
-/**
- * Checks the passed value for the variable config_key against the values in
- * the configuration.
- * Returns 0 if the passed value is found in the configuration,
- *        -1 otherwise
- */
-int check_variable_against_config(const char *config_key,
-    const char *passed_value) {
-
-  if (config_key == NULL || passed_value == NULL) {
-    return -1;
-  }
-
-  int found = -1;
-
-  const char **config_value = get_values(config_key);
-
-  if (config_value == NULL) {
-    fprintf(LOGFILE, "%s is not configured.\n", config_key);
-    return -1;
-  }
-
-  char *full_config_value = (char *)get_value(config_key);
-
-  char **config_val_ptr = (char **) config_value;
-  while (*config_val_ptr != NULL) {
-    if (strcmp(*config_val_ptr, passed_value) == 0) {
-      found = 0;
-      break;
-    }
-    config_val_ptr++;
-  }
-
-  if (found != 0) {
-    fprintf(
-        LOGFILE,
-        "Invalid value passed: \
-        Configured value of %s is %s. \
-        Passed value is %s.\n",
-        config_key, full_config_value, passed_value);
-  }
-  free(full_config_value);
-  free(config_value);
-  return found;
-}
-
-/**
- * Utility function to concatenate argB to argA using the concat_pattern
- */
-char *concatenate(char *concat_pattern, char *return_path_name, int numArgs,
-    ...) {
-  va_list ap;
-  va_start(ap, numArgs);
-  int strlen_args = 0;
-  char *arg = NULL;
-  int j;
-  for (j = 0; j < numArgs; j++) {
-    arg = va_arg(ap, char*);
-    if (arg == NULL) {
-      fprintf(LOGFILE, "One of the arguments passed for %s in null.\n",
-          return_path_name);
-      return NULL;
-    }
-    strlen_args += strlen(arg);
-  }
-  va_end(ap);
-
-  char *return_path = NULL;
-  int str_len = strlen(concat_pattern) + strlen_args;
-
-  return_path = (char *) malloc(sizeof(char) * (str_len + 1));
-  if (return_path == NULL) {
-    fprintf(LOGFILE, "Unable to allocate memory for %s.\n", return_path_name);
-    return NULL;
-  }
-  memset(return_path, '\0', str_len + 1);
-  va_start(ap, numArgs);
-  vsnprintf(return_path, str_len, concat_pattern, ap);
-  va_end(ap);
-  return return_path;
-}
-
-/**
- * Get the job-directory path from tt_root, user name and job-id
- */
-char *get_job_directory(const char * tt_root, const char *user,
-    const char *jobid) {
-  return concatenate(TT_JOB_DIR_PATTERN, "job_dir_path", 3, tt_root, user,
-      jobid);
-}
-
-/**
- * Get the user directory of a particular user
- */
-char *get_user_directory(const char *tt_root, const char *user) {
-  return concatenate(USER_DIR_PATTERN, "user_dir_path", 2, tt_root, user);
-}
-
-/**
- * Get the distributed cache directory for a particular user
- */
-char *get_distributed_cache_directory(const char *tt_root, const char *user,
-    const char* unique_string) {
-  return concatenate(USER_DISTRIBUTED_CACHE_DIR_PATTERN, 
-      "dist_cache_unique_path", 3, tt_root, user, unique_string);
-}
-
-char *get_job_work_directory(const char *job_dir) {
-  return concatenate(JOB_DIR_TO_JOB_WORK_PATTERN, "job_work_dir_path", 2,
-      job_dir, "");
-}
-/**
- * Get the attempt directory for the given attempt_id
- */
-char *get_attempt_directory(const char *job_dir, const char *attempt_id) {
-  return concatenate(JOB_DIR_TO_ATTEMPT_DIR_PATTERN, "attempt_dir_path", 2,
-      job_dir, attempt_id);
-}
-
-/*
- * Get the path to the task launcher file which is created by the TT
- */
-char *get_task_launcher_file(const char *job_dir, const char *attempt_dir) {
-  return concatenate(TASK_SCRIPT_PATTERN, "task_script_path", 2, job_dir,
-      attempt_dir);
-}
-
-/*
- * Builds the full path of the dir(localTaskDir or localWorkDir)
- * tt_root : is the base path(i.e. mapred-local-dir) sent to task-controller
- * dir_to_be_deleted : is either taskDir($taskId) OR taskWorkDir($taskId/work)
- */
-char *get_task_dir_path(const char *tt_root, const char *user,
-                        const char *jobid, const char *dir_to_be_deleted) {
-  return concatenate(TT_LOCAL_TASK_DIR_PATTERN, "task_dir_full_path", 4,
-                     tt_root, user, jobid, dir_to_be_deleted);
-}
-
-/**
- * Get the log directory for the given attempt.
- */
-char *get_task_log_dir(const char *log_dir, const char *job_id, 
-    const char *attempt_id) {
-  return concatenate(ATTEMPT_LOG_DIR_PATTERN, "task_log_dir", 3, log_dir,
-      job_id, attempt_id);
-}
-
-/**
- * Get the log directory for the given job.
- */
-char *get_job_log_dir(const char *log_dir, const char *job_id) {
-  return concatenate(JOB_LOG_DIR_PATTERN, "job_log_dir", 2, log_dir, job_id);
-}
-
-/**
- * Get the job ACLs file for the given job log dir.
- */
-char *get_job_acls_file(const char *log_dir) {
-  return concatenate(JOB_LOG_DIR_TO_JOB_ACLS_FILE_PATTERN, "job_acls_file",
-                     1, log_dir);
-}
-
-/**
- * Function to check if the passed tt_root is present in mapred.local.dir
- * the task-controller is configured with.
- */
-int check_tt_root(const char *tt_root) {
-  return check_variable_against_config(TT_SYS_DIR_KEY, tt_root);
-}
-
-/**
- * Function to check if the constructed path and absolute path of the task
- * launcher file resolve to one and same. This is done so as to avoid
- * security pitfalls because of relative path components in the file name.
- */
-int check_path_for_relative_components(char *path) {
-  char * resolved_path = (char *) canonicalize_file_name(path);
-  if (resolved_path == NULL) {
-    fprintf(LOGFILE,
-        "Error resolving the path: %s. Passed path: %s\n",
-        strerror(errno), path);
-    return ERROR_RESOLVING_FILE_PATH;
-  }
-  if (strcmp(resolved_path, path) != 0) {
-    fprintf(LOGFILE,
-        "Relative path components in the path: %s. Resolved path: %s\n",
-        path, resolved_path);
-    free(resolved_path);
-    return RELATIVE_PATH_COMPONENTS_IN_FILE_PATH;
-  }
-  free(resolved_path);
-  return 0;
-}
-
-/**
- * Function to change the owner/group of a given path.
- */
-static int change_owner(const char *path, uid_t uid, gid_t gid) {
-  int exit_code = chown(path, uid, gid);
-  if (exit_code != 0) {
-    fprintf(LOGFILE, "chown %d:%d for path %s failed: %s.\n", uid, gid, path,
-        strerror(errno));
-  }
-  return exit_code;
-}
-
-/**
- * Function to change the mode of a given path.
- */
-static int change_mode(const char *path, mode_t mode) {
-  int exit_code = chmod(path, mode);
-  if (exit_code != 0) {
-    fprintf(LOGFILE, "chmod %d of path %s failed: %s.\n", mode, path,
-        strerror(errno));
-  }
-  return exit_code;
-}
-
-/**
- * Function to change permissions of the given path. It does the following
- * recursively:
- *    1) changes the owner/group of the paths to the passed owner/group
- *    2) changes the file permission to the passed file_mode and directory
- *       permission to the passed dir_mode
- *
- * should_check_ownership : boolean to enable checking of ownership of each path
- */
-static int secure_path(const char *path, uid_t uid, gid_t gid,
-    mode_t file_mode, mode_t dir_mode, int should_check_ownership) {
-  FTS *tree = NULL; // the file hierarchy
-  FTSENT *entry = NULL; // a file in the hierarchy
-  char *paths[] = { (char *) path, NULL };//array needs to be NULL-terminated
-  int process_path = 0;
-  int dir = 0;
-  int error_code = 0;
-  int done = 0;
-
-  // Get physical locations and don't resolve the symlinks.
-  // Don't change directory while walking the directory.
-  int ftsoptions = FTS_PHYSICAL | FTS_NOCHDIR;
-
-  tree = fts_open(paths, ftsoptions, NULL);
-  if (tree == NULL) {
-    fprintf(LOGFILE,
-        "Cannot open file traversal structure for the path %s:%s.\n", path,
-        strerror(errno));
-    return -1;
-  }
-
-  while (((entry = fts_read(tree)) != NULL) && !done) {
-    dir = 0;
-    switch (entry->fts_info) {
-    case FTS_D:
-      // A directory being visited in pre-order.
-      // We change ownership of directories in post-order.
-      // so ignore the pre-order visit.
-      process_path = 0;
-      break;
-    case FTS_DC:
-      // A directory that causes a cycle in the tree
-      // We don't expect cycles, ignore.
-      process_path = 0;
-      break;
-    case FTS_DNR:
-      // A directory which cannot be read
-      // Ignore and set error code.
-      process_path = 0;
-      error_code = -1;
-      break;
-    case FTS_DOT:
-      // "."  or ".."
-      process_path = 0;
-      break;
-    case FTS_F:
-      // A regular file
-      process_path = 1;
-      break;
-    case FTS_DP:
-      // A directory being visited in post-order
-      if (entry->fts_level == 0) {
-        // root directory. Done with traversing.
-        done = 1;
-      }
-      process_path = 1;
-      dir = 1;
-      break;
-    case FTS_SL:
-      // A symbolic link
-      // We don't want to change-ownership(and set-permissions) for the file/dir
-      // pointed to by any symlink.
-      process_path = 0;
-      break;
-    case FTS_SLNONE:
-      // A symbolic link with a nonexistent target
-      process_path = 0;
-      break;
-    case FTS_NS:
-      // A  file for which no stat(2) information was available
-      // Ignore and set error code
-      process_path = 0;
-      error_code = -1;
-      break;
-    case FTS_ERR:
-      // An error return. Ignore and set error code.
-      process_path = 0;
-      error_code = -1;
-      break;
-    case FTS_DEFAULT:
-      // File that doesn't belong to any of the above type. Ignore.
-      process_path = 0;
-      break;
-    default:
-      // None of the above. Ignore and set error code
-      process_path = 0;
-      error_code = -1;
-    }
-
-    if (error_code != 0) {
-      break;
-    }
-    if (!process_path) {
-      continue;
-    }
-
-    error_code = secure_single_path(entry->fts_path, uid, gid,
-     (dir ? dir_mode : file_mode), should_check_ownership);
-  }
-  if (fts_close(tree) != 0) {
-    fprintf(LOGFILE, "couldn't close file traversal structure:%s.\n",
-        strerror(errno));
-  }
-  return error_code;
-}
-
-/**
- * Function to change ownership and permissions of the given path. 
- * This call sets ownership and permissions just for the path, not recursive.  
- */
-int secure_single_path(char *path, uid_t uid, gid_t gid,
-    mode_t perm, int should_check_ownership) {
-  int error_code = 0;
-  if (should_check_ownership && 
-      (check_ownership(path, uid, gid) != 0)) {
-    fprintf(LOGFILE,
-      "Invalid file path. %s not user/group owned by the tasktracker.\n", path);
-    error_code = -1;
-  } else if (change_owner(path, uid, gid) != 0) {
-    fprintf(LOGFILE, "couldn't change the ownership of %s\n", path);
-    error_code = -3;
-  } else if (change_mode(path, perm) != 0) {
-    fprintf(LOGFILE, "couldn't change the permissions of %s\n", path);
-    error_code = -3;
-  }
-  return error_code;
-}
-
-/**
- * Function to prepare the attempt directories for the task JVM.
- * This is done by changing the ownership of the attempt directory recursively
- * to the job owner. We do the following:
- *  *  sudo chown user:mapred -R taskTracker/$user/jobcache/$jobid/$attemptid/
- *  *  sudo chmod 2770 -R taskTracker/$user/jobcache/$jobid/$attemptid/
- */
-int prepare_attempt_directories(const char *job_id, const char *attempt_id,
-    const char *user) {
-  if (job_id == NULL || attempt_id == NULL || user == NULL) {
-    fprintf(LOGFILE, "Either attempt_id is null or the user passed is null.\n");
-    return INVALID_ARGUMENT_NUMBER;
-  }
-
-  gid_t tasktracker_gid = getegid(); // the group permissions of the binary.
-
-  if (get_user_details(user) < 0) {
-    fprintf(LOGFILE, "Couldn't get the user details of %s.\n", user);
-    return INVALID_USER_NAME;
-  }
-
-  char **local_dir = (char **) get_values(TT_SYS_DIR_KEY);
-
-  if (local_dir == NULL) {
-    fprintf(LOGFILE, "%s is not configured.\n", TT_SYS_DIR_KEY);
-    cleanup();
-    return PREPARE_ATTEMPT_DIRECTORIES_FAILED;
-  }
-
-  char *full_local_dir_str = (char *) get_value(TT_SYS_DIR_KEY);
-#ifdef DEBUG
-  fprintf(LOGFILE, "Value from config for %s is %s.\n", TT_SYS_DIR_KEY,
-      full_local_dir_str);
-#endif
-
-  char *job_dir;
-  char *attempt_dir;
-  char **local_dir_ptr = local_dir;
-  int failed = 0;
-  while (*local_dir_ptr != NULL) {
-    job_dir = get_job_directory(*local_dir_ptr, user, job_id);
-    if (job_dir == NULL) {
-      fprintf(LOGFILE, "Couldn't get job directory for %s.\n", job_id);
-      failed = 1;
-      break;
-    }
-
-    // prepare attempt-dir in each of the mapred_local_dir
-    attempt_dir = get_attempt_directory(job_dir, attempt_id);
-    if (attempt_dir == NULL) {
-      fprintf(LOGFILE, "Couldn't get attempt directory for %s.\n", attempt_id);
-      failed = 1;
-      free(job_dir);
-      break;
-    }
-
-    struct stat filestat;
-    if (stat(attempt_dir, &filestat) != 0) {
-      if (errno == ENOENT) {
-#ifdef DEBUG
-        fprintf(LOGFILE,
-            "attempt_dir %s doesn't exist. Not doing anything.\n", attempt_dir);
-#endif
-      } else {
-        // stat failed because of something else!
-        fprintf(LOGFILE, "Failed to stat the attempt_dir %s\n", attempt_dir);
-        failed = 1;
-        free(attempt_dir);
-        free(job_dir);
-        break;
-      }
-    } else if (secure_path(attempt_dir, user_detail->pw_uid,
-               tasktracker_gid, S_IRWXU | S_IRWXG, S_ISGID | S_IRWXU | S_IRWXG,
-               1) != 0) {
-      // No setgid on files and setgid on dirs, 770
-      fprintf(LOGFILE, "Failed to secure the attempt_dir %s\n", attempt_dir);
-      failed = 1;
-      free(attempt_dir);
-      free(job_dir);
-      break;
-    }
-
-    local_dir_ptr++;
-    free(attempt_dir);
-    free(job_dir);
-  }
-  free(local_dir);
-  free(full_local_dir_str);
-
-  cleanup();
-  if (failed) {
-    return PREPARE_ATTEMPT_DIRECTORIES_FAILED;
-  }
-  return 0;
-}
-
-/**
- * Function to prepare the job log dir(and job acls file in it) for the child.
- * It gives the user ownership of the job's log-dir to the user and
- * group ownership to the user running tasktracker(i.e. tt_user).
- *
- *   *  sudo chown user:mapred log-dir/userlogs/$jobid
- *   *    if user is not $tt_user,
- *   *      sudo chmod 2570 log-dir/userlogs/$jobid
- *   *    else
- *   *      sudo chmod 2770 log-dir/userlogs/$jobid
- *   *  sudo chown user:mapred log-dir/userlogs/$jobid/job-acls.xml
- *   *    if user is not $tt_user,
- *   *      sudo chmod 2570 log-dir/userlogs/$jobid/job-acls.xml
- *   *    else
- *   *      sudo chmod 2770 log-dir/userlogs/$jobid/job-acls.xml 
- */
-int prepare_job_logs(const char *log_dir, const char *job_id,
-    mode_t permissions) {
-
-  char *job_log_dir = get_job_log_dir(log_dir, job_id);
-  if (job_log_dir == NULL) {
-    fprintf(LOGFILE, "Couldn't get job log directory %s.\n", job_log_dir);
-    return -1;
-  }
-
-  struct stat filestat;
-  if (stat(job_log_dir, &filestat) != 0) {
-    if (errno == ENOENT) {
-#ifdef DEBUG
-      fprintf(LOGFILE, "job_log_dir %s doesn't exist. Not doing anything.\n",
-          job_log_dir);
-#endif
-      free(job_log_dir);
-      return 0;
-    } else {
-      // stat failed because of something else!
-      fprintf(LOGFILE, "Failed to stat the job log dir %s\n", job_log_dir);
-      free(job_log_dir);
-      return -1;
-    }
-  }
-
-  gid_t tasktracker_gid = getegid(); // the group permissions of the binary.
-  // job log directory should not be set permissions recursively
-  // because, on tt restart/reinit, it would contain directories of earlier run
-  if (secure_single_path(job_log_dir, user_detail->pw_uid, tasktracker_gid,
-      S_ISGID | permissions, 1) != 0) {
-    fprintf(LOGFILE, "Failed to secure the log_dir %s\n", job_log_dir);
-    free(job_log_dir);
-    return -1;
-  }
-
-  //set ownership and permissions for job_log_dir/job-acls.xml, if exists.
-  char *job_acls_file = get_job_acls_file(job_log_dir);
-  if (job_acls_file == NULL) {
-    fprintf(LOGFILE, "Couldn't get job acls file %s.\n", job_acls_file);
-    free(job_log_dir);
-    return -1; 
-  }
-
-  struct stat filestat1;
-  if (stat(job_acls_file, &filestat1) != 0) {
-    if (errno == ENOENT) {
-#ifdef DEBUG
-      fprintf(LOGFILE, "job_acls_file %s doesn't exist. Not doing anything.\n",
-          job_acls_file);
-#endif
-      free(job_acls_file);
-      free(job_log_dir);
-      return 0;
-    } else {
-      // stat failed because of something else!
-      fprintf(LOGFILE, "Failed to stat the job_acls_file %s\n", job_acls_file);
-      free(job_acls_file);
-      free(job_log_dir);
-      return -1;
-    }
-  }
-
-  if (secure_single_path(job_acls_file, user_detail->pw_uid, tasktracker_gid,
-      permissions, 1) != 0) {
-    fprintf(LOGFILE, "Failed to secure the job acls file %s\n", job_acls_file);
-    free(job_acls_file);
-    free(job_log_dir);
-    return -1;
-  }
-  free(job_acls_file);
-  free(job_log_dir);
-  return 0;
-}
-
-/**
- * Function to prepare the task logs for the child. It gives the user
- * ownership of the attempt's log-dir to the user and group ownership to the
- * user running tasktracker.
- *     *  sudo chown user:mapred log-dir/userlogs/$jobid/$attemptid
- *     *  sudo chmod -R 2770 log-dir/userlogs/$jobid/$attemptid
- */
-int prepare_task_logs(const char *log_dir, const char *job_id, 
-    const char *task_id) {
-
-  char *task_log_dir = get_task_log_dir(log_dir, job_id, task_id);
-  if (task_log_dir == NULL) {
-    fprintf(LOGFILE, "Couldn't get task_log directory %s.\n", task_log_dir);
-    return -1;
-  }
-
-  struct stat filestat;
-  if (stat(task_log_dir, &filestat) != 0) {
-    if (errno == ENOENT) {
-      // See TaskRunner.java to see that an absent log-dir doesn't fail the task.
-#ifdef DEBUG
-      fprintf(LOGFILE, "task_log_dir %s doesn't exist. Not doing anything.\n",
-          task_log_dir);
-#endif
-      free(task_log_dir);
-      return 0;
-    } else {
-      // stat failed because of something else!
-      fprintf(LOGFILE, "Failed to stat the task_log_dir %s\n", task_log_dir);
-      free(task_log_dir);
-      return -1;
-    }
-  }
-
-  gid_t tasktracker_gid = getegid(); // the group permissions of the binary.
-  if (secure_path(task_log_dir, user_detail->pw_uid, tasktracker_gid,
-      S_IRWXU | S_IRWXG, S_ISGID | S_IRWXU | S_IRWXG, 1) != 0) {
-    // setgid on dirs but not files, 770. As of now, there are no files though
-    fprintf(LOGFILE, "Failed to secure the log_dir %s\n", task_log_dir);
-    free(task_log_dir);
-    return -1;
-  }
-  free(task_log_dir);
-  return 0;
-}
-
-//function used to populate and user_details structure.
-
-int get_user_details(const char *user) {
-  if (user_detail == NULL) {
-    user_detail = getpwnam(user);
-    if (user_detail == NULL) {
-      fprintf(LOGFILE, "Invalid user\n");
-      return -1;
-    }
-  }
-  return 0;
-}
-
-/*
- * Function to check if the TaskTracker actually owns the file.
- * Or it has right ownership already. 
-  */
-int check_ownership(char *path, uid_t uid, gid_t gid) {
-  struct stat filestat;
-  if (stat(path, &filestat) != 0) {
-    return UNABLE_TO_STAT_FILE;
-  }
-  // check user/group. User should be TaskTracker user, group can either be
-  // TaskTracker's primary group or the special group to which binary's
-  // permissions are set.
-  // Or it can be the user/group owned by uid and gid passed. 
-  if ((getuid() != filestat.st_uid || (getgid() != filestat.st_gid && getegid()
-      != filestat.st_gid)) &&
-      ((uid != filestat.st_uid) || (gid != filestat.st_gid))) {
-    return FILE_NOT_OWNED_BY_TASKTRACKER;
-  }
-  return 0;
-}
-
-/**
- * Function to initialize the user directories of a user.
- * It does the following:
- *     *  sudo chown user:mapred -R taskTracker/$user
- *     *  if user is not $tt_user,
- *     *    sudo chmod 2570 -R taskTracker/$user
- *     *  else // user is tt_user
- *     *    sudo chmod 2770 -R taskTracker/$user
- * This is done once per every user on the TaskTracker.
- */
-int initialize_user(const char *user) {
-
-  if (user == NULL) {
-    fprintf(LOGFILE, "user passed is null.\n");
-    return INVALID_ARGUMENT_NUMBER;
-  }
-
-  if (get_user_details(user) < 0) {
-    fprintf(LOGFILE, "Couldn't get the user details of %s", user);
-    return INVALID_USER_NAME;
-  }
-
-  gid_t tasktracker_gid = getegid(); // the group permissions of the binary.
-
-  char **local_dir = (char **) get_values(TT_SYS_DIR_KEY);
-  if (local_dir == NULL) {
-    fprintf(LOGFILE, "%s is not configured.\n", TT_SYS_DIR_KEY);
-    cleanup();
-    return INVALID_TT_ROOT;
-  }
-
-  char *full_local_dir_str = (char *) get_value(TT_SYS_DIR_KEY);
-#ifdef DEBUG
-  fprintf(LOGFILE, "Value from config for %s is %s.\n", TT_SYS_DIR_KEY,
-      full_local_dir_str);
-#endif
-
-  int is_tt_user = (user_detail->pw_uid == getuid());
-  
-  // for tt_user, set 770 permissions; otherwise set 570
-  mode_t permissions = is_tt_user ? (S_IRWXU | S_IRWXG)
-                                  : (S_IRUSR | S_IXUSR | S_IRWXG);
-  char *user_dir;
-  char **local_dir_ptr = local_dir;
-  int failed = 0;
-  while (*local_dir_ptr != NULL) {
-    user_dir = get_user_directory(*local_dir_ptr, user);
-    if (user_dir == NULL) {
-      fprintf(LOGFILE, "Couldn't get userdir directory for %s.\n", user);
-      failed = 1;
-      break;
-    }
-
-    struct stat filestat;
-    if (stat(user_dir, &filestat) != 0) {
-      if (errno == ENOENT) {
-#ifdef DEBUG
-        fprintf(LOGFILE, "user_dir %s doesn't exist. Not doing anything.\n",
-            user_dir);
-#endif
-      } else {
-        // stat failed because of something else!
-        fprintf(LOGFILE, "Failed to stat the user_dir %s\n",
-            user_dir);
-        failed = 1;
-        free(user_dir);
-        break;
-      }
-    } else if (secure_path(user_dir, user_detail->pw_uid,
-        tasktracker_gid, permissions, S_ISGID | permissions, 1) != 0) {
-      // No setgid on files and setgid on dirs,
-      // 770 for tt_user and 570 for any other user
-      fprintf(LOGFILE, "Failed to secure the user_dir %s\n",
-              user_dir);
-      failed = 1;
-      free(user_dir);
-      break;
-    }
-
-    local_dir_ptr++;
-    free(user_dir);
-  }
-  free(local_dir);
-  free(full_local_dir_str);
-  cleanup();
-  if (failed) {
-    return INITIALIZE_USER_FAILED;
-  }
-  return 0;
-}
-
-/**
- * Function to prepare the job directories for the task JVM.
- * We do the following:
- *     *  sudo chown user:mapred -R taskTracker/$user/jobcache/$jobid
- *     *  sudo chown user:mapred -R logs/userlogs/$jobid 
- *     *  if user is not $tt_user,
- *     *    sudo chmod 2570 -R taskTracker/$user/jobcache/$jobid
- *     *    sudo chmod 2570 -R logs/userlogs/$jobid
- *     *  else // user is tt_user
- *     *    sudo chmod 2770 -R taskTracker/$user/jobcache/$jobid
- *     *    sudo chmod 2770 -R logs/userlogs/$jobid
- *     *
- *     *  For any user, sudo chmod 2770 taskTracker/$user/jobcache/$jobid/work
- */
-int initialize_job(const char *jobid, const char *user) {
-  if (jobid == NULL || user == NULL) {
-    fprintf(LOGFILE, "Either jobid is null or the user passed is null.\n");
-    return INVALID_ARGUMENT_NUMBER;
-  }
-
-  if (get_user_details(user) < 0) {
-    fprintf(LOGFILE, "Couldn't get the user details of %s", user);
-    return INVALID_USER_NAME;
-  }
-
-  gid_t tasktracker_gid = getegid(); // the group permissions of the binary.
-
-  char **local_dir = (char **) get_values(TT_SYS_DIR_KEY);
-  if (local_dir == NULL) {
-    fprintf(LOGFILE, "%s is not configured.\n", TT_SYS_DIR_KEY);
-    cleanup();
-    return INVALID_TT_ROOT;
-  }
-
-  char *full_local_dir_str = (char *) get_value(TT_SYS_DIR_KEY);
-#ifdef DEBUG
-  fprintf(LOGFILE, "Value from config for %s is %s.\n", TT_SYS_DIR_KEY,
-      full_local_dir_str);
-#endif
-
-  int is_tt_user = (user_detail->pw_uid == getuid());
-  
-  // for tt_user, set 770 permissions; for any other user, set 570 for job-dir
-  mode_t permissions = is_tt_user ? (S_IRWXU | S_IRWXG)
-                                  : (S_IRUSR | S_IXUSR | S_IRWXG);
-  char *job_dir, *job_work_dir;
-  char **local_dir_ptr = local_dir;
-  int failed = 0;
-  while (*local_dir_ptr != NULL) {
-    job_dir = get_job_directory(*local_dir_ptr, user, jobid);
-    if (job_dir == NULL) {
-      fprintf(LOGFILE, "Couldn't get job directory for %s.\n", jobid);
-      failed = 1;
-      break;
-    }
-
-    struct stat filestat;
-    if (stat(job_dir, &filestat) != 0) {
-      if (errno == ENOENT) {
-#ifdef DEBUG
-        fprintf(LOGFILE, "job_dir %s doesn't exist. Not doing anything.\n",
-            job_dir);
-#endif
-      } else {
-        // stat failed because of something else!
-        fprintf(LOGFILE, "Failed to stat the job_dir %s\n", job_dir);
-        failed = 1;
-        free(job_dir);
-        break;
-      }
-    } else if (secure_path(job_dir, user_detail->pw_uid, tasktracker_gid,
-               permissions, S_ISGID | permissions, 1) != 0) {
-      // No setgid on files and setgid on dirs,
-      // 770 for tt_user and 570 for any other user
-      fprintf(LOGFILE, "Failed to secure the job_dir %s\n", job_dir);
-      failed = 1;
-      free(job_dir);
-      break;
-    } else if (!is_tt_user) {
-      // For tt_user, we don't need this as we already set 2770 for
-      // job-work-dir because of "chmod -R" done above
-      job_work_dir = get_job_work_directory(job_dir);
-      if (job_work_dir == NULL) {
-        fprintf(LOGFILE, "Couldn't get job-work directory for %s.\n", jobid);
-        failed = 1;
-        break;
-      }
-
-      // Set 2770 on the job-work directory
-      if (stat(job_work_dir, &filestat) != 0) {
-        if (errno == ENOENT) {
-#ifdef DEBUG
-          fprintf(LOGFILE,
-              "job_work_dir %s doesn't exist. Not doing anything.\n",
-              job_work_dir);
-#endif
-          free(job_work_dir);
-        } else {
-          // stat failed because of something else!
-          fprintf(LOGFILE, "Failed to stat the job_work_dir %s\n",
-              job_work_dir);
-          failed = 1;
-          free(job_work_dir);
-          free(job_dir);
-          break;
-        }
-      } else if (change_mode(job_work_dir, S_ISGID | S_IRWXU | S_IRWXG) != 0) {
-        fprintf(LOGFILE,
-            "couldn't change the permissions of job_work_dir %s\n",
-            job_work_dir);
-        failed = 1;
-        free(job_work_dir);
-        free(job_dir);
-        break;
-      }
-    }
-
-    local_dir_ptr++;
-    free(job_dir);
-  }
-  free(local_dir);
-  free(full_local_dir_str);
-  int exit_code = 0;
-  if (failed) {
-    exit_code = INITIALIZE_JOB_FAILED;
-    goto cleanup;
-  }
-
-  char *log_dir = (char *) get_value(TT_LOG_DIR_KEY);
-  if (log_dir == NULL) {
-    fprintf(LOGFILE, "Log directory is not configured.\n");
-    exit_code = INVALID_TT_LOG_DIR;
-    goto cleanup;
-  }
-
-  if (prepare_job_logs(log_dir, jobid, permissions) != 0) {
-    fprintf(LOGFILE, "Couldn't prepare job logs directory %s for %s.\n",
-        log_dir, jobid);
-    exit_code = PREPARE_JOB_LOGS_FAILED;
-  }
-
-  cleanup:
-  // free configurations
-  cleanup();
-  if (log_dir != NULL) {
-    free(log_dir);
-  }
-  return exit_code;
-}
-
-/**
- * Function to initialize the distributed cache file for a user.
- * It does the following:
- *     *  sudo chown user:mapred -R taskTracker/$user/distcache/<randomdir>
- *     *  if user is not $tt_user,
- *     *    sudo chmod 2570 -R taskTracker/$user/distcache/<randomdir>
- *     *  else // user is tt_user
- *     *    sudo chmod 2770 -R taskTracker/$user/distcache/<randomdir>
- * This is done once per localization. Tasks reusing JVMs just create
- * symbolic links themselves and so there isn't anything specific to do in
- * that case.
- */
-int initialize_distributed_cache_file(const char *tt_root, 
-    const char *unique_string, const char *user) {
-  if (tt_root == NULL) {
-    fprintf(LOGFILE, "tt_root passed is null.\n");
-    return INVALID_ARGUMENT_NUMBER;
-  }
-  if (unique_string == NULL) {
-    fprintf(LOGFILE, "unique_string passed is null.\n");
-    return INVALID_ARGUMENT_NUMBER;
-  }
- 
-  if (user == NULL) {
-    fprintf(LOGFILE, "user passed is null.\n");
-    return INVALID_ARGUMENT_NUMBER;
-  }
-
-  if (get_user_details(user) < 0) {
-    fprintf(LOGFILE, "Couldn't get the user details of %s", user);
-    return INVALID_USER_NAME;
-  }
-  //Check tt_root
-  if (check_tt_root(tt_root) < 0) {
-    fprintf(LOGFILE, "invalid tt root passed %s\n", tt_root);
-    cleanup();
-    return INVALID_TT_ROOT;
-  }
-
-  // set permission on the unique directory
-  char *localized_unique_dir = get_distributed_cache_directory(tt_root, user,
-      unique_string);
-  if (localized_unique_dir == NULL) {
-    fprintf(LOGFILE, "Couldn't get unique distcache directory for %s.\n", user);
-    cleanup();
-    return INITIALIZE_DISTCACHEFILE_FAILED;
-  }
-
-  gid_t binary_gid = getegid(); // the group permissions of the binary.
-  
-  int is_tt_user = (user_detail->pw_uid == getuid());
-  
-  // for tt_user, set 770 permissions; for any other user, set 570
-  mode_t permissions = is_tt_user ? (S_IRWXU | S_IRWXG)
-                                  : (S_IRUSR | S_IXUSR | S_IRWXG);
-  int failed = 0;
-  struct stat filestat;
-  if (stat(localized_unique_dir, &filestat) != 0) {
-    // stat on distcache failed because of something
-    fprintf(LOGFILE, "Failed to stat the localized_unique_dir %s\n",
-        localized_unique_dir);
-    failed = INITIALIZE_DISTCACHEFILE_FAILED;
-  } else if (secure_path(localized_unique_dir, user_detail->pw_uid,
-        binary_gid, permissions, S_ISGID | permissions, 1) != 0) {
-    // No setgid on files and setgid on dirs,
-    // 770 for tt_user and 570 for any other user
-    fprintf(LOGFILE, "Failed to secure the localized_unique_dir %s\n",
-        localized_unique_dir);
-    failed = INITIALIZE_DISTCACHEFILE_FAILED;
-  }
-  free(localized_unique_dir);
-  cleanup();
-  return failed;
-}
-
-/**
- * Function used to initialize task. Prepares attempt_dir, jars_dir and
- * log_dir to be accessible by the child
- */
-int initialize_task(const char *jobid, const char *taskid, const char *user) {
-  int exit_code = 0;
-#ifdef DEBUG
-  fprintf(LOGFILE, "job-id passed to initialize_task : %s.\n", jobid);
-  fprintf(LOGFILE, "task-d passed to initialize_task : %s.\n", taskid);
-#endif
-
-  if (prepare_attempt_directories(jobid, taskid, user) != 0) {
-    fprintf(LOGFILE,
-        "Couldn't prepare the attempt directories for %s of user %s.\n",
-        taskid, user);
-    exit_code = PREPARE_ATTEMPT_DIRECTORIES_FAILED;
-    goto cleanup;
-  }
-
-  char *log_dir = (char *) get_value(TT_LOG_DIR_KEY);
-  if (log_dir == NULL) {
-    fprintf(LOGFILE, "Log directory is not configured.\n");
-    exit_code = INVALID_TT_LOG_DIR;
-    goto cleanup;
-  }
-
-  if (prepare_task_logs(log_dir, jobid, taskid) != 0) {
-    fprintf(LOGFILE, "Couldn't prepare task logs directory %s for %s.\n",
-        log_dir, taskid);
-    exit_code = PREPARE_TASK_LOGS_FAILED;
-  }
-
-  cleanup:
-  // free configurations
-  cleanup();
-  if (log_dir != NULL) {
-    free(log_dir);
-  }
-  return exit_code;
-}
-
-/*
- * Function used to launch a task as the provided user. It does the following :
- * 1) Checks if the tt_root passed is found in mapred.local.dir
- * 2) Prepares attempt_dir and log_dir to be accessible by the child
- * 3) Uses get_task_launcher_file to fetch the task script file path
- * 4) Does an execlp on the same in order to replace the current image with
- * task image.
- */
-int run_task_as_user(const char * user, const char *jobid, const char *taskid,
-    const char *tt_root) {
-  int exit_code = 0;
-
-  if (jobid == NULL || taskid == NULL || tt_root == NULL) {
-    return INVALID_ARGUMENT_NUMBER;
-  }
-
-#ifdef DEBUG
-  fprintf(LOGFILE, "Job-id passed to run_task_as_user : %s.\n", jobid);
-  fprintf(LOGFILE, "task-d passed to run_task_as_user : %s.\n", taskid);
-  fprintf(LOGFILE, "tt_root passed to run_task_as_user : %s.\n", tt_root);
-#endif
-
-  //Check tt_root before switching the user, as reading configuration
-  //file requires privileged access.
-  if (check_tt_root(tt_root) < 0) {
-    fprintf(LOGFILE, "invalid tt root passed %s\n", tt_root);
-    cleanup();
-    return INVALID_TT_ROOT;
-  }
-
-  char *job_dir = NULL, *task_script_path = NULL;
-
-  if ((exit_code = initialize_task(jobid, taskid, user)) != 0) {
-    fprintf(LOGFILE, "Couldn't initialise the task %s of user %s.\n", taskid,
-        user);
-    goto cleanup;
-  }
-
-  job_dir = get_job_directory(tt_root, user, jobid);
-  if (job_dir == NULL) {
-    fprintf(LOGFILE, "Couldn't obtain job_dir for %s in %s.\n", jobid, tt_root);
-    exit_code = OUT_OF_MEMORY;
-    goto cleanup;
-  }
-
-  task_script_path = get_task_launcher_file(job_dir, taskid);
-  if (task_script_path == NULL) {
-    fprintf(LOGFILE, "Couldn't obtain task_script_path in %s.\n", job_dir);
-    exit_code = OUT_OF_MEMORY;
-    goto cleanup;
-  }
-
-  errno = 0;
-  exit_code = check_path_for_relative_components(task_script_path);
-  if(exit_code != 0) {
-    goto cleanup;
-  }
-
-  //change the user
-  fcloseall();
-  free(job_dir);
-  umask(0007);
-  if (change_user(user) != 0) {
-    exit_code = SETUID_OPER_FAILED;
-    goto cleanup;
-  }
-
-  errno = 0;
-  cleanup();
-  execlp(task_script_path, task_script_path, NULL);
-  if (errno != 0) {
-    fprintf(LOGFILE, "Couldn't execute the task jvm file: %s", strerror(errno));
-    free(task_script_path);
-    exit_code = UNABLE_TO_EXECUTE_TASK_SCRIPT;
-  }
-
-  return exit_code;
-
-cleanup:
-  if (job_dir != NULL) {
-    free(job_dir);
-  }
-  if (task_script_path != NULL) {
-    free(task_script_path);
-  }
-  // free configurations
-  cleanup();
-  return exit_code;
-}
-
-/**
- * Function used to terminate/kill a task launched by the user.
- * The function sends appropriate signal to the process group
- * specified by the task_pid.
- */
-int kill_user_task(const char *user, const char *task_pid, int sig) {
-  int pid = 0;
-
-  if(task_pid == NULL) {
-    return INVALID_ARGUMENT_NUMBER;
-  }
-
-#ifdef DEBUG
-  fprintf(LOGFILE, "user passed to kill_user_task : %s.\n", user);
-  fprintf(LOGFILE, "task-pid passed to kill_user_task : %s.\n", task_pid);
-  fprintf(LOGFILE, "signal passed to kill_user_task : %d.\n", sig);
-#endif
-
-  pid = atoi(task_pid);
-
-  if(pid <= 0) {
-    return INVALID_TASK_PID;
-  }
-
-  fcloseall();
-  if (change_user(user) != 0) {
-    cleanup();
-    return SETUID_OPER_FAILED;
-  }
-
-  //Don't continue if the process-group is not alive anymore.
-  if(kill(-pid,0) < 0) {
-    errno = 0;
-    cleanup();
-    return 0;
-  }
-
-  if (kill(-pid, sig) < 0) {
-    if(errno != ESRCH) {
-      fprintf(LOGFILE, "Error is %s\n", strerror(errno));
-      cleanup();
-      return UNABLE_TO_KILL_TASK;
-    }
-    errno = 0;
-  }
-  cleanup();
-  return 0;
-}
-
-/**
- * Enables the path for deletion by changing the owner, group and permissions
- * of the specified path and all the files/directories in the path recursively.
- *     *  sudo chown user:mapred -R full_path
- *     *  sudo chmod 2770 -R full_path
- * Before changing permissions, makes sure that the given path doesn't contain
- * any relative components.
- * tt_root : is the base path(i.e. mapred-local-dir) sent to task-controller
- * full_path : is either jobLocalDir, taskDir OR taskWorkDir that is to be 
- *             deleted
- */
-static int enable_path_for_cleanup(const char *tt_root, const char *user,
-                                   char *full_path) {
-  int exit_code = 0;
-  gid_t tasktracker_gid = getegid(); // the group permissions of the binary.
-
-  if (check_tt_root(tt_root) < 0) {
-    fprintf(LOGFILE, "invalid tt root passed %s\n", tt_root);
-    cleanup();
-    return INVALID_TT_ROOT;
-  }
- 
-  if (full_path == NULL) {
-    fprintf(LOGFILE,
-            "Could not build the full path. Not deleting the dir %s\n",
-            full_path);
-    exit_code = UNABLE_TO_BUILD_PATH; // may be malloc failed
-  }
-     // Make sure that the path given is not having any relative components
-  else if ((exit_code = check_path_for_relative_components(full_path)) != 0) {
-    fprintf(LOGFILE,
-    "Not changing permissions. Path may contain relative components.\n",
-         full_path);
-  }
-  else if (get_user_details(user) < 0) {
-    fprintf(LOGFILE, "Couldn't get the user details of %s.\n", user);
-    exit_code = INVALID_USER_NAME;
-  }
-  else if (exit_code = secure_path(full_path, user_detail->pw_uid,
-               tasktracker_gid,
-               S_IRWXU | S_IRWXG, S_ISGID | S_IRWXU | S_IRWXG, 0) != 0) {
-    // No setgid on files and setgid on dirs, 770.
-    // set 770 permissions for user, TTgroup for all files/directories in
-    // 'full_path' recursively sothat deletion of path by TaskTracker succeeds.
-
-    fprintf(LOGFILE, "Failed to set permissions for %s\n", full_path);
-  }
-
-  if (full_path != NULL) {
-    free(full_path);
-  }
-  // free configurations
-  cleanup();
-  return exit_code;
-}
-
-/**
- * Enables the task work-dir/local-dir path for deletion.
- * tt_root : is the base path(i.e. mapred-local-dir) sent to task-controller
- * dir_to_be_deleted : is either taskDir OR taskWorkDir that is to be deleted
- */
-int enable_task_for_cleanup(const char *tt_root, const char *user,
-           const char *jobid, const char *dir_to_be_deleted) {
-  char *full_path = get_task_dir_path(tt_root, user, jobid, dir_to_be_deleted);
-  return enable_path_for_cleanup(tt_root, user, full_path);
-}
-
-/**
- * Enables the jobLocalDir for deletion.
- * tt_root : is the base path(i.e. mapred-local-dir) sent to task-controller
- * user    : owner of the job
- * jobid   : id of the job for which the cleanup is needed.
- */
-int enable_job_for_cleanup(const char *tt_root, const char *user, 
-                           const char *jobid) {
-  char *full_path = get_job_directory(tt_root, user, jobid);
-  return enable_path_for_cleanup(tt_root, user, full_path);
-}

+ 0 - 141
src/c++/task-controller/task-controller.h

@@ -1,141 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdarg.h>
-#include <string.h>
-#include <errno.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <pwd.h>
-#include <assert.h>
-#include <getopt.h>
-#include <sys/stat.h>
-#include <sys/signal.h>
-#include <getopt.h>
-#include <grp.h>
-#include <dirent.h>
-#include <fcntl.h>
-#include <fts.h>
-
-#include "configuration.h"
-
-//command definitions
-enum command {
-  INITIALIZE_USER,
-  INITIALIZE_JOB,
-  INITIALIZE_DISTRIBUTEDCACHE_FILE,
-  LAUNCH_TASK_JVM,
-  INITIALIZE_TASK,
-  TERMINATE_TASK_JVM,
-  KILL_TASK_JVM,
-  ENABLE_TASK_FOR_CLEANUP,
-  ENABLE_JOB_FOR_CLEANUP
-};
-
-enum errorcodes {
-  INVALID_ARGUMENT_NUMBER = 1,
-  INVALID_USER_NAME, //2
-  INVALID_COMMAND_PROVIDED, //3
-  SUPER_USER_NOT_ALLOWED_TO_RUN_TASKS, //4
-  INVALID_TT_ROOT, //5
-  SETUID_OPER_FAILED, //6
-  UNABLE_TO_EXECUTE_TASK_SCRIPT, //7
-  UNABLE_TO_KILL_TASK, //8
-  INVALID_TASK_PID, //9
-  ERROR_RESOLVING_FILE_PATH, //10
-  RELATIVE_PATH_COMPONENTS_IN_FILE_PATH, //11
-  UNABLE_TO_STAT_FILE, //12
-  FILE_NOT_OWNED_BY_TASKTRACKER, //13
-  PREPARE_ATTEMPT_DIRECTORIES_FAILED, //14
-  INITIALIZE_JOB_FAILED, //15
-  PREPARE_TASK_LOGS_FAILED, //16
-  INVALID_TT_LOG_DIR, //17
-  OUT_OF_MEMORY, //18
-  INITIALIZE_DISTCACHEFILE_FAILED, //19
-  INITIALIZE_USER_FAILED, //20
-  UNABLE_TO_BUILD_PATH, //21
-  INVALID_TASKCONTROLLER_PERMISSIONS, //22
-  PREPARE_JOB_LOGS_FAILED, //23
-};
-
-#define USER_DIR_PATTERN "%s/taskTracker/%s"
-
-#define TT_JOB_DIR_PATTERN USER_DIR_PATTERN"/jobcache/%s"
-
-#define USER_DISTRIBUTED_CACHE_DIR_PATTERN USER_DIR_PATTERN"/distcache/%s"
-
-#define JOB_DIR_TO_JOB_WORK_PATTERN "%s/work"
-
-#define JOB_DIR_TO_ATTEMPT_DIR_PATTERN "%s/%s"
-
-#define JOB_LOG_DIR_PATTERN "%s/userlogs/%s"
-
-#define JOB_LOG_DIR_TO_JOB_ACLS_FILE_PATTERN "%s/job-acls.xml"
-
-#define ATTEMPT_LOG_DIR_PATTERN JOB_LOG_DIR_PATTERN"/%s"
-
-#define TASK_SCRIPT_PATTERN "%s/%s/taskjvm.sh"
-
-#define TT_LOCAL_TASK_DIR_PATTERN    "%s/taskTracker/%s/jobcache/%s/%s"
-
-#define TT_SYS_DIR_KEY "mapred.local.dir"
-
-#define TT_LOG_DIR_KEY "hadoop.log.dir"
-
-#define TT_GROUP_KEY "mapreduce.tasktracker.group"
-
-#ifndef HADOOP_CONF_DIR
-  #define EXEC_PATTERN "/bin/task-controller"
-  extern char * hadoop_conf_dir;
-#endif
-
-extern struct passwd *user_detail;
-
-extern FILE *LOGFILE;
-
-int run_task_as_user(const char * user, const char *jobid, const char *taskid,
-    const char *tt_root);
-
-int initialize_user(const char *user);
-
-int initialize_task(const char *jobid, const char *taskid, const char *user);
-
-int initialize_job(const char *jobid, const char *user);
-
-int initialize_distributed_cache_file(const char *tt_root, 
-    const char* unique_string, const char *user);
-
-int kill_user_task(const char *user, const char *task_pid, int sig);
-
-int enable_task_for_cleanup(const char *tt_root, const char *user,
-                            const char *jobid, const char *dir_to_be_deleted);
-
-int enable_job_for_cleanup(const char *tt_root, const char *user,
-                           const char *jobid);
-
-int prepare_attempt_directory(const char *attempt_dir, const char *user);
-
-// The following functions are exposed for testing
-
-int check_variable_against_config(const char *config_key,
-    const char *passed_value);
-
-int get_user_details(const char *user);
-
-char *get_task_launcher_file(const char *job_dir, const char *attempt_dir);

+ 763 - 0
src/c++/task-controller/test/test-task-controller.c

@@ -0,0 +1,763 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "configuration.h"
+#include "task-controller.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+
+#define TEST_ROOT "/tmp/test-task-controller"
+#define DONT_TOUCH_FILE "dont-touch-me"
+
+static char* username = NULL;
+
+/**
+ * Run the command using the effective user id.
+ * It can't use system, since bash seems to copy the real user id into the
+ * effective id.
+ */
+void run(const char *cmd) {
+  fflush(stdout);
+  fflush(stderr);
+  pid_t child = fork();
+  if (child == -1) {
+    printf("FAIL: failed to fork - %s\n", strerror(errno));
+  } else if (child == 0) {
+    char *cmd_copy = strdup(cmd);
+    char *ptr;
+    int words = 1;
+    for(ptr = strchr(cmd_copy, ' ');  ptr; ptr = strchr(ptr+1, ' ')) {
+      words += 1;
+    }
+    char **argv = malloc(sizeof(char *) * (words + 1));
+    ptr = strtok(cmd_copy, " ");
+    int i = 0;
+    argv[i++] = ptr;
+    while (ptr != NULL) {
+      ptr = strtok(NULL, " ");
+      argv[i++] = ptr;
+    }
+    if (execvp(argv[0], argv) != 0) {
+      printf("FAIL: exec failed in child %s - %s\n", cmd, strerror(errno));
+      exit(42);
+    }
+  } else {
+    int status = 0;
+    if (waitpid(child, &status, 0) <= 0) {
+      printf("FAIL: failed waiting for child process %s pid %d - %s\n", 
+	     cmd, child, strerror(errno));
+      exit(1);
+    }
+    if (!WIFEXITED(status)) {
+      printf("FAIL: process %s pid %d did not exit\n", cmd, child);
+      exit(1);
+    }
+    if (WEXITSTATUS(status) != 0) {
+      printf("FAIL: process %s pid %d exited with error status %d\n", cmd, 
+	     child, WEXITSTATUS(status));
+      exit(1);
+    }
+  }
+}
+
+int write_config_file(char *file_name) {
+  FILE *file;
+  file = fopen(file_name, "w");
+  if (file == NULL) {
+    printf("Failed to open %s.\n", file_name);
+    return EXIT_FAILURE;
+  }
+  fprintf(file, "mapred.local.dir=" TEST_ROOT "/local-1");
+  int i;
+  for(i=2; i < 5; ++i) {
+    fprintf(file, "," TEST_ROOT "/local-%d", i);
+  }
+  fprintf(file, "\n");
+  fprintf(file, "hadoop.log.dir=" TEST_ROOT "/logs\n");
+  fclose(file);
+  return 0;
+}
+
+void create_tt_roots() {
+  char** tt_roots = get_values("mapred.local.dir");
+  char** tt_root;
+  for(tt_root=tt_roots; *tt_root != NULL; ++tt_root) {
+    if (mkdir(*tt_root, 0755) != 0) {
+      printf("FAIL: Can't create directory %s - %s\n", *tt_root,
+	     strerror(errno));
+      exit(1);
+    }
+    char buffer[100000];
+    sprintf(buffer, "%s/taskTracker", *tt_root);
+    if (mkdir(buffer, 0755) != 0) {
+      printf("FAIL: Can't create directory %s - %s\n", buffer,
+	     strerror(errno));
+      exit(1);
+    }
+  }
+  free_values(tt_roots);
+}
+
+void test_get_user_directory() {
+  char *user_dir = get_user_directory("/tmp", "user");
+  char *expected = "/tmp/taskTracker/user";
+  if (strcmp(user_dir, expected) != 0) {
+    printf("test_get_user_directory expected %s got %s\n", user_dir, expected);
+    exit(1);
+  }
+  free(user_dir);
+}
+
+void test_get_job_directory() {
+  char *expected = "/tmp/taskTracker/user/jobcache/job_200906101234_0001";
+  char *job_dir = (char *) get_job_directory("/tmp", "user",
+      "job_200906101234_0001");
+  if (strcmp(job_dir, expected) != 0) {
+    exit(1);
+  }
+  free(job_dir);
+}
+
+void test_get_attempt_directory() {
+  char *attempt_dir = get_attempt_work_directory("/tmp", "owen", "job_1",
+						 "attempt_1");
+  char *expected = "/tmp/taskTracker/owen/jobcache/job_1/attempt_1/work";
+  if (strcmp(attempt_dir, expected) != 0) {
+    printf("Fail get_attempt_work_directory got %s expected %s\n",
+	   attempt_dir, expected);
+  }
+  free(attempt_dir);
+}
+
+void test_get_task_launcher_file() {
+  char *expected_file = ("/tmp/taskTracker/user/jobcache/job_200906101234_0001"
+			 "/taskjvm.sh");
+  char *job_dir = get_job_directory("/tmp", "user",
+                                    "job_200906101234_0001");
+  char *task_file =  get_task_launcher_file(job_dir);
+  if (strcmp(task_file, expected_file) != 0) {
+    printf("failure to match expected task file %s vs %s\n", task_file,
+           expected_file);
+    exit(1);
+  }
+  free(job_dir);
+  free(task_file);
+}
+
+void test_get_job_log_dir() {
+  char *expected = TEST_ROOT "/logs/userlogs/job_200906101234_0001";
+  char *logdir = get_job_log_directory("job_200906101234_0001");
+  if (strcmp(logdir, expected) != 0) {
+    printf("Fail get_job_log_dir got %s expected %s\n", logdir, expected);
+    exit(1);
+  }
+  free(logdir);
+}
+
+void test_get_task_log_dir() {
+  char *logdir = get_job_log_directory("job_5/task_4");
+  char *expected = TEST_ROOT "/logs/userlogs/job_5/task_4";
+  if (strcmp(logdir, expected) != 0) {
+    printf("FAIL: get_task_log_dir expected %s got %s\n", logdir, expected);
+  }
+  free(logdir);
+}
+
+void test_check_user() {
+  printf("\nTesting test_check_user\n");
+  struct passwd *user = check_user(username);
+  if (user == NULL) {
+    printf("FAIL: failed check for user %s\n", username);
+    exit(1);
+  }
+  free(user);
+  if (check_user("lp") != NULL) {
+    printf("FAIL: failed check for system user lp\n");
+    exit(1);
+  }
+  if (check_user("root") != NULL) {
+    printf("FAIL: failed check for system user root\n");
+    exit(1);
+  }
+  if (check_user("mapred") != NULL) {
+    printf("FAIL: failed check for hadoop user mapred\n");
+    exit(1);
+  }
+}
+
+void test_check_configuration_permissions() {
+  printf("\nTesting check_configuration_permissions\n");
+  if (check_configuration_permissions("/etc/passwd") != 0) {
+    printf("FAIL: failed permission check on /etc/passwd\n");
+    exit(1);
+  }
+  if (check_configuration_permissions(TEST_ROOT) == 0) {
+    printf("FAIL: failed permission check on %s\n", TEST_ROOT);
+    exit(1);
+  }
+}
+
+void test_delete_task() {
+  if (initialize_user(username)) {
+    printf("FAIL: failed to initialized user %s\n", username);
+    exit(1);
+  }
+  char* job_dir = get_job_directory(TEST_ROOT "/local-2", username, "job_1");
+  char* dont_touch = get_job_directory(TEST_ROOT "/local-2", username, 
+                                       DONT_TOUCH_FILE);
+  char* task_dir = get_attempt_work_directory(TEST_ROOT "/local-2", 
+					      username, "job_1", "task_1");
+  char buffer[100000];
+  sprintf(buffer, "mkdir -p %s/who/let/the/dogs/out/who/who", task_dir);
+  run(buffer);
+  sprintf(buffer, "touch %s", dont_touch);
+  run(buffer);
+
+  // soft link to the canary file from the task directory
+  sprintf(buffer, "ln -s %s %s/who/softlink", dont_touch, task_dir);
+  run(buffer);
+  // hard link to the canary file from the task directory
+  sprintf(buffer, "ln %s %s/who/hardlink", dont_touch, task_dir);
+  run(buffer);
+  // create a dot file in the task directory
+  sprintf(buffer, "touch %s/who/let/.dotfile", task_dir);
+  run(buffer);
+  // create a no permission file
+  sprintf(buffer, "touch %s/who/let/protect", task_dir);
+  run(buffer);
+  sprintf(buffer, "chmod 000 %s/who/let/protect", task_dir);
+  run(buffer);
+  // create a no permission directory
+  sprintf(buffer, "chmod 000 %s/who/let", task_dir);
+  run(buffer);
+
+  // delete task directory
+  int ret = delete_as_user(username, "jobcache/job_1/task_1");
+  if (ret != 0) {
+    printf("FAIL: return code from delete_as_user is %d\n", ret);
+    exit(1);
+  }
+
+  // check to make sure the task directory is gone
+  if (access(task_dir, R_OK) == 0) {
+    printf("FAIL: failed to delete the directory - %s\n", task_dir);
+    exit(1);
+  }
+  // check to make sure the job directory is not gone
+  if (access(job_dir, R_OK) != 0) {
+    printf("FAIL: accidently deleted the directory - %s\n", job_dir);
+    exit(1);
+  }
+  // but that the canary is not gone
+  if (access(dont_touch, R_OK) != 0) {
+    printf("FAIL: accidently deleted file %s\n", dont_touch);
+    exit(1);
+  }
+  sprintf(buffer, "chmod -R 700 %s", job_dir);
+  run(buffer);
+  sprintf(buffer, "rm -fr %s", job_dir);
+  run(buffer);
+  free(job_dir);
+  free(task_dir);
+  free(dont_touch);
+}
+
+void test_delete_job() {
+  char* job_dir = get_job_directory(TEST_ROOT "/local-2", username, "job_2");
+  char* dont_touch = get_job_directory(TEST_ROOT "/local-2", username, 
+                                       DONT_TOUCH_FILE);
+  char* task_dir = get_attempt_work_directory(TEST_ROOT "/local-2", 
+					      username, "job_2", "task_1");
+  char buffer[100000];
+  sprintf(buffer, "mkdir -p %s/who/let/the/dogs/out/who/who", task_dir);
+  run(buffer);
+  sprintf(buffer, "touch %s", dont_touch);
+  run(buffer);
+
+  // soft link to the canary file from the task directory
+  sprintf(buffer, "ln -s %s %s/who/softlink", dont_touch, task_dir);
+  run(buffer);
+  // hard link to the canary file from the task directory
+  sprintf(buffer, "ln %s %s/who/hardlink", dont_touch, task_dir);
+  run(buffer);
+  // create a dot file in the task directory
+  sprintf(buffer, "touch %s/who/let/.dotfile", task_dir);
+  run(buffer);
+  // create a no permission file
+  sprintf(buffer, "touch %s/who/let/protect", task_dir);
+  run(buffer);
+  sprintf(buffer, "chmod 000 %s/who/let/protect", task_dir);
+  run(buffer);
+  // create a no permission directory
+  sprintf(buffer, "chmod 000 %s/who/let", task_dir);
+  run(buffer);
+
+  // delete task directory
+  int ret = delete_as_user(username, "jobcache/job_2");
+  if (ret != 0) {
+    printf("FAIL: return code from delete_as_user is %d\n", ret);
+    exit(1);
+  }
+
+  // check to make sure the task directory is gone
+  if (access(task_dir, R_OK) == 0) {
+    printf("FAIL: failed to delete the directory - %s\n", task_dir);
+    exit(1);
+  }
+  // check to make sure the job directory is gone
+  if (access(job_dir, R_OK) == 0) {
+    printf("FAIL: didn't delete the directory - %s\n", job_dir);
+    exit(1);
+  }
+  // but that the canary is not gone
+  if (access(dont_touch, R_OK) != 0) {
+    printf("FAIL: accidently deleted file %s\n", dont_touch);
+    exit(1);
+  }
+  free(job_dir);
+  free(task_dir);
+  free(dont_touch);
+}
+
+
+void test_delete_user() {
+  printf("\nTesting delete_user\n");
+  char* job_dir = get_job_directory(TEST_ROOT "/local-1", username, "job_3");
+  if (mkdirs(job_dir, 0700) != 0) {
+    exit(1);
+  }
+  char buffer[100000];
+  sprintf(buffer, "%s/local-1/taskTracker/%s", TEST_ROOT, username);
+  if (access(buffer, R_OK) != 0) {
+    printf("FAIL: directory missing before test\n");
+    exit(1);
+  }
+  if (delete_as_user(username, "") != 0) {
+    exit(1);
+  }
+  if (access(buffer, R_OK) == 0) {
+    printf("FAIL: directory not deleted\n");
+    exit(1);
+  }
+  if (access(TEST_ROOT "/local-1", R_OK) != 0) {
+    printf("FAIL: local-1 directory does not exist\n");
+    exit(1);
+  }
+  free(job_dir);
+}
+
+void test_delete_log_directory() {
+  printf("\nTesting delete_log_directory\n");
+  char *job_log_dir = get_job_log_directory("job_1");
+  if (job_log_dir == NULL) {
+    exit(1);
+  }
+  if (create_directory_for_user(job_log_dir) != 0) {
+    exit(1);
+  }
+  free(job_log_dir);
+  char *task_log_dir = get_job_log_directory("job_1/task_2");
+  if (task_log_dir == NULL) {
+    exit(1);
+  }
+  if (mkdirs(task_log_dir, 0700) != 0) {
+    exit(1);
+  }
+  if (access(TEST_ROOT "/logs/userlogs/job_1/task_2", R_OK) != 0) {
+    printf("FAIL: can't access task directory - %s\n", strerror(errno));
+    exit(1);
+  }
+  if (delete_log_directory("job_1/task_2") != 0) {
+    printf("FAIL: can't delete task directory\n");
+    exit(1);
+  }
+  if (access(TEST_ROOT "/logs/userlogs/job_1/task_2", R_OK) == 0) {
+    printf("FAIL: task directory not deleted\n");
+    exit(1);
+  }
+  if (access(TEST_ROOT "/logs/userlogs/job_1", R_OK) != 0) {
+    printf("FAIL: job directory not deleted - %s\n", strerror(errno));
+    exit(1);
+  }
+  if (delete_log_directory("job_1") != 0) {
+    printf("FAIL: can't delete task directory\n");
+    exit(1);
+  }
+  if (access(TEST_ROOT "/logs/userlogs/job_1", R_OK) == 0) {
+    printf("FAIL: job directory not deleted\n");
+    exit(1);
+  }
+  free(task_log_dir);
+}
+
+void run_test_in_child(const char* test_name, void (*func)()) {
+  printf("\nRunning test %s in child process\n", test_name);
+  fflush(stdout);
+  fflush(stderr);
+  pid_t child = fork();
+  if (child == -1) {
+    printf("FAIL: fork failed\n");
+    exit(1);
+  } else if (child == 0) {
+    func();
+    exit(0);
+  } else {
+    int status = 0;
+    if (waitpid(child, &status, 0) == -1) {
+      printf("FAIL: waitpid %d failed - %s\n", child, strerror(errno));
+      exit(1);
+    }
+    if (!WIFEXITED(status)) {
+      printf("FAIL: child %d didn't exit - %d\n", child, status);
+      exit(1);
+    }
+    if (WEXITSTATUS(status) != 0) {
+      printf("FAIL: child %d exited with bad status %d\n",
+	     child, WEXITSTATUS(status));
+      exit(1);
+    }
+  }
+}
+
+void test_signal_task() {
+  printf("\nTesting signal_task\n");
+  fflush(stdout);
+  fflush(stderr);
+  pid_t child = fork();
+  if (child == -1) {
+    printf("FAIL: fork failed\n");
+    exit(1);
+  } else if (child == 0) {
+    if (change_user(user_detail->pw_uid, user_detail->pw_gid) != 0) {
+      exit(1);
+    }
+    sleep(3600);
+    exit(0);
+  } else {
+    printf("Child task launched as %d\n", child);
+    if (signal_user_task(username, child, SIGQUIT) != 0) {
+      exit(1);
+    }
+    int status = 0;
+    if (waitpid(child, &status, 0) == -1) {
+      printf("FAIL: waitpid failed - %s\n", strerror(errno));
+      exit(1);
+    }
+    if (!WIFSIGNALED(status)) {
+      printf("FAIL: child wasn't signalled - %d\n", status);
+      exit(1);
+    }
+    if (WTERMSIG(status) != SIGQUIT) {
+      printf("FAIL: child was killed with %d instead of %d\n", 
+	     WTERMSIG(status), SIGQUIT);
+      exit(1);
+    }
+  }
+}
+
+void test_signal_task_group() {
+  printf("\nTesting group signal_task\n");
+  fflush(stdout);
+  fflush(stderr);
+  pid_t child = fork();
+  if (child == -1) {
+    printf("FAIL: fork failed\n");
+    exit(1);
+  } else if (child == 0) {
+    setpgrp();
+    if (change_user(user_detail->pw_uid, user_detail->pw_gid) != 0) {
+      exit(1);
+    }
+    sleep(3600);
+    exit(0);
+  }
+  printf("Child task launched as %d\n", child);
+  if (signal_user_task(username, child, SIGKILL) != 0) {
+    exit(1);
+  }
+  int status = 0;
+  if (waitpid(child, &status, 0) == -1) {
+    printf("FAIL: waitpid failed - %s\n", strerror(errno));
+    exit(1);
+  }
+  if (!WIFSIGNALED(status)) {
+    printf("FAIL: child wasn't signalled - %d\n", status);
+    exit(1);
+  }
+  if (WTERMSIG(status) != SIGKILL) {
+    printf("FAIL: child was killed with %d instead of %d\n", 
+	   WTERMSIG(status), SIGKILL);
+    exit(1);
+  }
+}
+
+void test_init_job() {
+  printf("\nTesting init job\n");
+  if (seteuid(0) != 0) {
+    printf("FAIL: seteuid to root failed - %s\n", strerror(errno));
+    exit(1);
+  }
+  FILE* creds = fopen(TEST_ROOT "/creds.txt", "w");
+  if (creds == NULL) {
+    printf("FAIL: failed to create credentials file - %s\n", strerror(errno));
+    exit(1);
+  }
+  if (fprintf(creds, "secret key\n") < 0) {
+    printf("FAIL: fprintf failed - %s\n", strerror(errno));
+    exit(1);
+  }
+  if (fclose(creds) != 0) {
+    printf("FAIL: fclose failed - %s\n", strerror(errno));
+    exit(1);
+  }
+  FILE* job_xml = fopen(TEST_ROOT "/job.xml", "w");
+  if (job_xml == NULL) {
+    printf("FAIL: failed to create job file - %s\n", strerror(errno));
+    exit(1);
+  }
+  if (fprintf(job_xml, "<jobconf/>\n") < 0) {
+    printf("FAIL: fprintf failed - %s\n", strerror(errno));
+    exit(1);
+  }
+  if (fclose(job_xml) != 0) {
+    printf("FAIL: fclose failed - %s\n", strerror(errno));
+    exit(1);
+  }
+  if (seteuid(user_detail->pw_uid) != 0) {
+    printf("FAIL: failed to seteuid back to user - %s\n", strerror(errno));
+    exit(1);
+  }
+  fflush(stdout);
+  fflush(stderr);
+  pid_t child = fork();
+  if (child == -1) {
+    printf("FAIL: failed to fork process for init_job - %s\n", 
+	   strerror(errno));
+    exit(1);
+  } else if (child == 0) {
+    char *final_pgm[] = {"touch", "my-touch-file", 0};
+    if (initialize_job(username, "job_4", TEST_ROOT "/creds.txt", 
+                       TEST_ROOT "/job.xml", final_pgm) != 0) {
+      printf("FAIL: failed in child\n");
+      exit(42);
+    }
+    // should never return
+    exit(1);
+  }
+  int status = 0;
+  if (waitpid(child, &status, 0) <= 0) {
+    printf("FAIL: failed waiting for process %d - %s\n", child, 
+	   strerror(errno));
+    exit(1);
+  }
+  if (access(TEST_ROOT "/logs/userlogs/job_4", R_OK) != 0) {
+    printf("FAIL: failed to create job log directory\n");
+    exit(1);
+  }
+  char* job_dir = get_job_directory(TEST_ROOT "/local-1", username, "job_4");
+  if (access(job_dir, R_OK) != 0) {
+    printf("FAIL: failed to create job directory %s\n", job_dir);
+    exit(1);
+  }
+  char buffer[100000];
+  sprintf(buffer, "%s/jobToken", job_dir);
+  if (access(buffer, R_OK) != 0) {
+    printf("FAIL: failed to create credentials %s\n", buffer);
+    exit(1);
+  }
+  sprintf(buffer, "%s/my-touch-file", job_dir);
+  if (access(buffer, R_OK) != 0) {
+    printf("FAIL: failed to create touch file %s\n", buffer);
+    exit(1);
+  }
+  free(job_dir);
+  job_dir = get_job_log_directory("job_4");
+  if (access(job_dir, R_OK) != 0) {
+    printf("FAIL: failed to create job log directory %s\n", job_dir);
+    exit(1);
+  }
+  free(job_dir);
+}
+
+void test_run_task() {
+  printf("\nTesting run task\n");
+  if (seteuid(0) != 0) {
+    printf("FAIL: seteuid to root failed - %s\n", strerror(errno));
+    exit(1);
+  }
+  const char* script_name = TEST_ROOT "/task-script";
+  FILE* script = fopen(script_name, "w");
+  if (script == NULL) {
+    printf("FAIL: failed to create script file - %s\n", strerror(errno));
+    exit(1);
+  }
+  if (seteuid(user_detail->pw_uid) != 0) {
+    printf("FAIL: failed to seteuid back to user - %s\n", strerror(errno));
+    exit(1);
+  }
+  if (fprintf(script, "#!/bin/bash\n"
+                     "touch foobar\n"
+                     "exit 0") < 0) {
+    printf("FAIL: fprintf failed - %s\n", strerror(errno));
+    exit(1);
+  }
+  if (fclose(script) != 0) {
+    printf("FAIL: fclose failed - %s\n", strerror(errno));
+    exit(1);
+  }
+  fflush(stdout);
+  fflush(stderr);
+  char* task_dir = get_attempt_work_directory(TEST_ROOT "/local-1", 
+					      username, "job_4", "task_1");
+  pid_t child = fork();
+  if (child == -1) {
+    printf("FAIL: failed to fork process for init_job - %s\n", 
+	   strerror(errno));
+    exit(1);
+  } else if (child == 0) {
+    if (run_task_as_user(username, "job_4", "task_1", 
+                         task_dir, script_name) != 0) {
+      printf("FAIL: failed in child\n");
+      exit(42);
+    }
+    // should never return
+    exit(1);
+  }
+  int status = 0;
+  if (waitpid(child, &status, 0) <= 0) {
+    printf("FAIL: failed waiting for process %d - %s\n", child, 
+	   strerror(errno));
+    exit(1);
+  }
+  if (access(TEST_ROOT "/logs/userlogs/job_4/task_1", R_OK) != 0) {
+    printf("FAIL: failed to create task log directory\n");
+    exit(1);
+  }
+  if (access(task_dir, R_OK) != 0) {
+    printf("FAIL: failed to create task directory %s\n", task_dir);
+    exit(1);
+  }
+  char buffer[100000];
+  sprintf(buffer, "%s/foobar", task_dir);
+  if (access(buffer, R_OK) != 0) {
+    printf("FAIL: failed to create touch file %s\n", buffer);
+    exit(1);
+  }
+  free(task_dir);
+  task_dir = get_job_log_directory("job_4/task_1");
+  if (access(task_dir, R_OK) != 0) {
+    printf("FAIL: failed to create job log directory %s\n", task_dir);
+    exit(1);
+  }
+  free(task_dir);
+}
+
+int main(int argc, char **argv) {
+  LOGFILE = stdout;
+  int my_username = 0;
+
+  // clean up any junk from previous run
+  system("chmod -R u=rwx " TEST_ROOT "; rm -fr " TEST_ROOT);
+  
+  if (mkdirs(TEST_ROOT "/logs/userlogs", 0755) != 0) {
+    exit(1);
+  }
+  
+  if (write_config_file(TEST_ROOT "/test.cfg") != 0) {
+    exit(1);
+  }
+  read_config(TEST_ROOT "/test.cfg");
+
+  create_tt_roots();
+
+  if (getuid() == 0 && argc == 2) {
+    username = argv[1];
+  } else {
+    username = strdup(getpwuid(getuid())->pw_name);
+    my_username = 1;
+  }
+  set_tasktracker_uid(geteuid(), getegid());
+
+  if (set_user(username)) {
+    exit(1);
+  }
+
+  printf("\nStarting tests\n");
+
+  printf("\nTesting get_user_directory()\n");
+  test_get_user_directory();
+
+  printf("\nTesting get_job_directory()\n");
+  test_get_job_directory();
+
+  printf("\nTesting get_attempt_directory()\n");
+  test_get_attempt_directory();
+
+  printf("\nTesting get_task_launcher_file()\n");
+  test_get_task_launcher_file();
+
+  printf("\nTesting get_job_log_dir()\n");
+  test_get_job_log_dir();
+
+  test_check_configuration_permissions();
+
+  printf("\nTesting get_task_log_dir()\n");
+  test_get_task_log_dir();
+
+  printf("\nTesting delete_task()\n");
+  test_delete_task();
+
+  printf("\nTesting delete_job()\n");
+  test_delete_job();
+
+  test_delete_user();
+
+  test_check_user();
+
+  test_delete_log_directory();
+
+  // the tests that change user need to be run in a subshell, so that
+  // when they change user they don't give up our privs
+  run_test_in_child("test_signal_task", test_signal_task);
+  run_test_in_child("test_signal_task_group", test_signal_task_group);
+
+  // init job and run task can't be run if you aren't testing as root
+  if (getuid() == 0) {
+    // these tests do internal forks so that the change_owner and execs
+    // don't mess up our process.
+    test_init_job();
+    test_run_task();
+  }
+
+  seteuid(0);
+  run("rm -fr " TEST_ROOT);
+  printf("\nFinished tests\n");
+
+  if (my_username) {
+    free(username);
+  }
+  free_configurations();
+  return 0;
+}

+ 0 - 243
src/c++/task-controller/tests/test-task-controller.c

@@ -1,243 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include "../task-controller.h"
-
-#define HADOOP_CONF_DIR "/tmp"
-
-int write_config_file(char *file_name) {
-  FILE *file;
-  char const *str =
-      "mapred.local.dir=/tmp/testing1,/tmp/testing2,/tmp/testing3,/tmp/testing4\n";
-
-  file = fopen(file_name, "w");
-  if (file == NULL) {
-    printf("Failed to open %s.\n", file_name);
-    return EXIT_FAILURE;
-  }
-  fwrite(str, 1, strlen(str), file);
-  fclose(file);
-  return 0;
-}
-
-void test_check_variable_against_config() {
-
-  // A temporary configuration directory
-  char *conf_dir_templ = "/tmp/test-task-controller-conf-dir-XXXXXX";
-
-  // To accomodate "/conf/taskcontroller.cfg"
-  char template[strlen(conf_dir_templ) + strlen("/conf/taskcontroller.cfg")];
-
-  strcpy(template, conf_dir_templ);
-  char *temp_dir = mkdtemp(template);
-  if (temp_dir == NULL) {
-    printf("Couldn't create a temporary dir for conf.\n");
-    goto cleanup;
-  }
-
-  // Set the configuration directory
-  hadoop_conf_dir = strdup(temp_dir);
-
-  // create the configuration directory
-  strcat(template, "/conf");
-  char *conf_dir = strdup(template);
-  mkdir(conf_dir, S_IRWXU);
-
-  // create the configuration file
-  strcat(template, "/taskcontroller.cfg");
-  if (write_config_file(template) != 0) {
-    printf("Couldn't write the configuration file.\n");
-    goto cleanup;
-  }
-
-  // Test obtaining a value for a key from the config
-  char *config_values[4] = { "/tmp/testing1", "/tmp/testing2",
-      "/tmp/testing3", "/tmp/testing4" };
-  char *value = (char *) get_value("mapred.local.dir");
-  if (strcmp(value, "/tmp/testing1,/tmp/testing2,/tmp/testing3,/tmp/testing4")
-      != 0) {
-    printf("Obtaining a value for a key from the config failed.\n");
-    goto cleanup;
-  }
-
-  // Test the parsing of a multiple valued key from the config
-  char **values = (char **) get_values("mapred.local.dir");
-  char **values_ptr = values;
-  int i = 0;
-  while (*values_ptr != NULL) {
-    printf(" value : %s\n", *values_ptr);
-    if (strcmp(*values_ptr, config_values[i++]) != 0) {
-      printf("Configured values are not read out properly. Test failed!");
-      goto cleanup;;
-    }
-    values_ptr++;
-  }
-
-  if (check_variable_against_config("mapred.local.dir", "/tmp/testing5") == 0) {
-    printf("Configuration should not contain /tmp/testing5! \n");
-    goto cleanup;
-  }
-
-  if (check_variable_against_config("mapred.local.dir", "/tmp/testing4") != 0) {
-    printf("Configuration should contain /tmp/testing4! \n");
-    goto cleanup;
-  }
-
-  cleanup: if (value != NULL) {
-    free(value);
-  }
-  if (values != NULL) {
-    free(values);
-  }
-  if (hadoop_conf_dir != NULL) {
-    free(hadoop_conf_dir);
-  }
-  unlink(template);
-  rmdir(conf_dir);
-  rmdir(hadoop_conf_dir);
-}
-
-void test_get_user_directory() {
-  char *user_dir = (char *) get_user_directory("/tmp", "user");
-  printf("user_dir obtained is %s\n", user_dir);
-  int ret = 0;
-  if (strcmp(user_dir, "/tmp/taskTracker/user") != 0) {
-    ret = -1;
-  }
-  free(user_dir);
-  assert(ret == 0);
-}
-
-void test_get_job_directory() {
-  char *job_dir = (char *) get_job_directory("/tmp", "user",
-      "job_200906101234_0001");
-  printf("job_dir obtained is %s\n", job_dir);
-  int ret = 0;
-  if (strcmp(job_dir, "/tmp/taskTracker/user/jobcache/job_200906101234_0001")
-      != 0) {
-    ret = -1;
-  }
-  free(job_dir);
-  assert(ret == 0);
-}
-
-void test_get_attempt_directory() {
-  char *job_dir = (char *) get_job_directory("/tmp", "user",
-      "job_200906101234_0001");
-  printf("job_dir obtained is %s\n", job_dir);
-  char *attempt_dir = (char *) get_attempt_directory(job_dir,
-      "attempt_200906101234_0001_m_000000_0");
-  printf("attempt_dir obtained is %s\n", attempt_dir);
-  int ret = 0;
-  if (strcmp(
-      attempt_dir,
-      "/tmp/taskTracker/user/jobcache/job_200906101234_0001/attempt_200906101234_0001_m_000000_0")
-      != 0) {
-    ret = -1;
-  }
-  free(job_dir);
-  free(attempt_dir);
-  assert(ret == 0);
-}
-
-void test_get_task_launcher_file() {
-  char *job_dir = (char *) get_job_directory("/tmp", "user",
-      "job_200906101234_0001");
-  char *task_file = (char *) get_task_launcher_file(job_dir,
-      "attempt_200906112028_0001_m_000000_0");
-  printf("task_file obtained is %s\n", task_file);
-  int ret = 0;
-  if (strcmp(
-      task_file,
-      "/tmp/taskTracker/user/jobcache/job_200906101234_0001/attempt_200906112028_0001_m_000000_0/taskjvm.sh")
-      != 0) {
-    ret = -1;
-  }
-  free(task_file);
-  assert(ret == 0);
-}
-
-void test_get_job_log_dir() {
-  char *logdir = (char *) get_job_log_dir("/tmp/testing",
-    "job_200906101234_0001");
-  printf("logdir obtained is %s\n", logdir);
-  int ret = 0;
-  if (strcmp(logdir, "/tmp/testing/userlogs/job_200906101234_0001") != 0) {
-    ret = -1;
-  }
-  free(logdir);
-  assert(ret == 0);
-}
-
-void test_get_job_acls_file() {
-  char *job_acls_file = (char *) get_job_acls_file(
-    "/tmp/testing/userlogs/job_200906101234_0001");
-  printf("job acls file obtained is %s\n", job_acls_file);
-  int ret = 0;
-  if (strcmp(job_acls_file,
-    "/tmp/testing/userlogs/job_200906101234_0001/job-acls.xml") != 0) {
-    ret = -1;
-  }
-  free(job_acls_file);
-  assert(ret == 0);
-}
-
-void test_get_task_log_dir() {
-  char *logdir = (char *) get_task_log_dir("/tmp/testing",
-    "job_200906101234_0001", "attempt_200906112028_0001_m_000000_0");
-  printf("logdir obtained is %s\n", logdir);
-  int ret = 0;
-  if (strcmp(logdir,
-      "/tmp/testing/userlogs/job_200906101234_0001/attempt_200906112028_0001_m_000000_0")
-      != 0) {
-    ret = -1;
-  }
-  free(logdir);
-  assert(ret == 0);
-}
-
-int main(int argc, char **argv) {
-  printf("\nStarting tests\n");
-  LOGFILE = stdout;
-
-  printf("\nTesting check_variable_against_config()\n");
-  test_check_variable_against_config();
-
-  printf("\nTesting get_user_directory()\n");
-  test_get_user_directory();
-
-  printf("\nTesting get_job_directory()\n");
-  test_get_job_directory();
-
-  printf("\nTesting get_attempt_directory()\n");
-  test_get_attempt_directory();
-
-  printf("\nTesting get_task_launcher_file()\n");
-  test_get_task_launcher_file();
-
-  printf("\nTesting get_job_log_dir()\n");
-  test_get_job_log_dir();
-
-  printf("\nTesting get_job_acls_file()\n");
-  test_get_job_acls_file();
-
-  printf("\nTesting get_task_log_dir()\n");
-  test_get_task_log_dir();
-
-  printf("\nFinished tests\n");
-  return 0;
-}

+ 89 - 12
src/core/org/apache/hadoop/fs/LocalDirAllocator.java

@@ -142,6 +142,23 @@ public class LocalDirAllocator {
     return context.getLocalPathToRead(pathStr, conf);
   }
 
+  /**
+   * Get all of the paths that currently exist in the working directories.
+   * @param pathStr the path underneath the roots
+   * @param conf the configuration to look up the roots in
+   * @return all of the paths that exist under any of the roots
+   * @throws IOException
+   */
+  public Iterable<Path> getAllLocalPathsToRead(String pathStr, 
+                                               Configuration conf
+                                               ) throws IOException {
+    AllocatorPerContext context;
+    synchronized (this) {
+      context = obtainContext(contextCfgItemName);
+    }
+    return context.getAllLocalPathsToRead(pathStr, conf);    
+  }
+
   /** Creates a temporary file in the local FS. Pass size as -1 if not known 
    *  apriori. We round-robin over the set of disks (via the configured dirs) 
    *  and select the first complete path which has enough space. A file is
@@ -210,7 +227,8 @@ public class LocalDirAllocator {
     /** This method gets called everytime before any read/write to make sure
      * that any change to localDirs is reflected immediately.
      */
-    private void confChanged(Configuration conf) throws IOException {
+    private synchronized void confChanged(Configuration conf
+                                          ) throws IOException {
       String newLocalDirs = conf.get(contextCfgItemName);
       if (!newLocalDirs.equals(savedLocalDirs)) {
         localDirs = conf.getStrings(contextCfgItemName);
@@ -270,17 +288,6 @@ public class LocalDirAllocator {
       return dirNumLastAccessed;
     }
     
-    /** Get a path from the local FS. This method should be used if the size of 
-     *  the file is not known a priori. 
-     *  
-     *  It will use roulette selection, picking directories
-     *  with probability proportional to their available space. 
-     */
-    public synchronized Path getLocalPathForWrite(String path, 
-        Configuration conf) throws IOException {
-      return getLocalPathForWrite(path, SIZE_UNKNOWN, conf);
-    }
-
     /** Get a path from the local FS. If size is known, we go
      *  round-robin over the set of disks (via the configured dirs) and return
      *  the first complete path which has enough space.
@@ -395,6 +402,76 @@ public class LocalDirAllocator {
       " the configured local directories");
     }
 
+    private static 
+    class PathIterator implements Iterator<Path>, Iterable<Path> {
+      private final FileSystem fs;
+      private final String pathStr;
+      private int i = 0;
+      private final String[] rootDirs;
+      private Path next = null;
+
+      private PathIterator(FileSystem fs, String pathStr, String[] rootDirs
+                           ) throws IOException {
+        this.fs = fs;
+        this.pathStr = pathStr;
+        this.rootDirs = rootDirs;
+        advance();
+      }
+
+      @Override
+      public boolean hasNext() {
+        return next != null;
+      }
+
+      private void advance() throws IOException {
+        while (i < rootDirs.length) {
+          next = new Path(rootDirs[i++], pathStr);
+          if (fs.exists(next)) {
+            return;
+          }
+        }
+        next = null;
+      }
+
+      @Override
+      public Path next() {
+        Path result = next;
+        try {
+          advance();
+        } catch (IOException ie) {
+          throw new RuntimeException("Can't check existance of " + next, ie);
+        }
+        return result;
+      }
+
+      @Override
+      public void remove() {
+        throw new UnsupportedOperationException("read only iterator");
+      }
+
+      @Override
+      public Iterator<Path> iterator() {
+        return this;
+      }
+    }
+
+    /**
+     * Get all of the paths that currently exist in the working directories.
+     * @param pathStr the path underneath the roots
+     * @param conf the configuration to look up the roots in
+     * @return all of the paths that exist under any of the roots
+     * @throws IOException
+     */
+    synchronized Iterable<Path> getAllLocalPathsToRead(String pathStr,
+                                                       Configuration conf
+                                                       ) throws IOException {
+      confChanged(conf);
+      if (pathStr.startsWith("/")) {
+        pathStr = pathStr.substring(1);
+      }
+      return new PathIterator(localFS, pathStr, localDirs);
+    }
+
     /** We search through all the configured dirs for the file's existence
      *  and return true when we find one 
      */

+ 33 - 128
src/core/org/apache/hadoop/util/ProcessTree.java

@@ -33,8 +33,20 @@ import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 public class ProcessTree {
 
   private static final Log LOG = LogFactory.getLog(ProcessTree.class);
-
-  public static final long DEFAULT_SLEEPTIME_BEFORE_SIGKILL = 5000L;
+  
+  /**
+   * The constants for the signals.
+   */
+  public static enum Signal {
+    QUIT(3), KILL(9), TERM(15);
+    private int value;
+    private Signal(int value) {
+      this.value = value;
+    }
+    public int getValue() {
+      return value;
+    }
+  }
 
   public static final boolean isSetsidAvailable = isSetsidSupported();
   private static boolean isSetsidSupported() {
@@ -49,95 +61,8 @@ public class ProcessTree {
       setsidSupported = false;
     } finally { // handle the exit code
       LOG.info("setsid exited with exit code " + shexec.getExitCode());
-      return setsidSupported;
-    }
-  }
-
-  /**
-   * Destroy the process-tree.
-   * @param pid process id of the root process of the subtree of processes
-   *            to be killed
-   * @param sleeptimeBeforeSigkill The time to wait before sending SIGKILL
-   *                               after sending SIGTERM
-   * @param isProcessGroup pid is a process group leader or not
-   * @param inBackground Process is to be killed in the back ground with
-   *                     a separate thread
-   */
-  public static void destroy(String pid, long sleeptimeBeforeSigkill,
-                             boolean isProcessGroup, boolean inBackground) {
-    if(isProcessGroup) {
-      destroyProcessGroup(pid, sleeptimeBeforeSigkill, inBackground);
-    }
-    else {
-      //TODO: Destroy all the processes in the subtree in this case also.
-      // For the time being, killing only the root process.
-      destroyProcess(pid, sleeptimeBeforeSigkill, inBackground);
-    }
-  }
-
-  /** Destroy the process.
-   * @param pid Process id of to-be-killed-process
-   * @param sleeptimeBeforeSigkill The time to wait before sending SIGKILL
-   *                               after sending SIGTERM
-   * @param inBackground Process is to be killed in the back ground with
-   *                     a separate thread
-   */
-  protected static void destroyProcess(String pid, long sleeptimeBeforeSigkill,
-                                    boolean inBackground) {
-    terminateProcess(pid);
-    sigKill(pid, false, sleeptimeBeforeSigkill, inBackground);
-  }
-
-  /** Destroy the process group.
-   * @param pgrpId Process group id of to-be-killed-processes
-   * @param sleeptimeBeforeSigkill The time to wait before sending SIGKILL
-   *                               after sending SIGTERM
-   * @param inBackground Process group is to be killed in the back ground with
-   *                     a separate thread
-   */
-  protected static void destroyProcessGroup(String pgrpId,
-                       long sleeptimeBeforeSigkill, boolean inBackground) {
-    terminateProcessGroup(pgrpId);
-    sigKill(pgrpId, true, sleeptimeBeforeSigkill, inBackground);
-  }
-
-  /**
-   * Sends terminate signal to the process, allowing it to gracefully exit.
-   * 
-   * @param pid pid of the process to be sent SIGTERM
-   */
-  public static void terminateProcess(String pid) {
-    ShellCommandExecutor shexec = null;
-    try {
-      String[] args = { "kill", pid };
-      shexec = new ShellCommandExecutor(args);
-      shexec.execute();
-    } catch (IOException ioe) {
-      LOG.warn("Error executing shell command " + ioe);
-    } finally {
-      LOG.info("Killing process " + pid +
-               " with SIGTERM. Exit code " + shexec.getExitCode());
-    }
-  }
-
-  /**
-   * Sends terminate signal to all the process belonging to the passed process
-   * group, allowing the group to gracefully exit.
-   * 
-   * @param pgrpId process group id
-   */
-  public static void terminateProcessGroup(String pgrpId) {
-    ShellCommandExecutor shexec = null;
-    try {
-      String[] args = { "kill", "--", "-" + pgrpId };
-      shexec = new ShellCommandExecutor(args);
-      shexec.execute();
-    } catch (IOException ioe) {
-      LOG.warn("Error executing shell command " + ioe);
-    } finally {
-      LOG.info("Killing all processes in the process group " + pgrpId +
-               " with SIGTERM. Exit code " + shexec.getExitCode());
     }
+    return setsidSupported;
   }
 
   /**
@@ -160,83 +85,63 @@ public class ProcessTree {
         LOG.warn("Thread sleep is interrupted.");
       }
       if(isProcessGroup) {
-        killProcessGroup(pid);
+        killProcessGroup(pid, Signal.KILL);
       } else {
-        killProcess(pid);
+        killProcess(pid, Signal.KILL);
       }
     }  
   }
   
-
-  /** Kills the process(OR process group) by sending the signal SIGKILL
-   * @param pid Process id(OR process group id) of to-be-deleted-process
-   * @param isProcessGroup Is pid a process group id of to-be-deleted-processes
-   * @param sleeptimeBeforeSigkill The time to wait before sending SIGKILL
-   *                               after sending SIGTERM
-   * @param inBackground Process is to be killed in the back ground with
-   *                     a separate thread
-   */
-  private static void sigKill(String pid, boolean isProcessGroup,
-                        long sleeptimeBeforeSigkill, boolean inBackground) {
-
-    if(inBackground) { // use a separate thread for killing
-      SigKillThread sigKillThread = new SigKillThread(pid, isProcessGroup,
-                                                      sleeptimeBeforeSigkill);
-      sigKillThread.setDaemon(true);
-      sigKillThread.start();
-    }
-    else {
-      sigKillInCurrentThread(pid, isProcessGroup, sleeptimeBeforeSigkill);
-    }
-  }
-
   /**
-   * Sends kill signal to process, forcefully terminating the process.
+   * Sends signal to process, forcefully terminating the process.
    * 
    * @param pid process id
+   * @param signal the signal number to send
    */
-  public static void killProcess(String pid) {
+  public static void killProcess(String pid, Signal signal) {
 
     //If process tree is not alive then return immediately.
     if(!ProcessTree.isAlive(pid)) {
       return;
     }
-    String[] args = { "kill", "-9", pid };
+    String[] args = { "kill", "-" + signal.getValue(), pid };
     ShellCommandExecutor shexec = new ShellCommandExecutor(args);
     try {
       shexec.execute();
     } catch (IOException e) {
-      LOG.warn("Error sending SIGKILL to process "+ pid + " ."+ 
+      LOG.warn("Error sending signal " + signal + " to process "+ pid + " ."+ 
           StringUtils.stringifyException(e));
     } finally {
-      LOG.info("Killing process " + pid + " with SIGKILL. Exit code "
-          + shexec.getExitCode());
+      LOG.info("Killing process " + pid + " with signal " + signal + 
+               ". Exit code " + shexec.getExitCode());
     }
   }
 
   /**
-   * Sends kill signal to all process belonging to same process group,
+   * Sends signal to all process belonging to same process group,
    * forcefully terminating the process group.
    * 
    * @param pgrpId process group id
+   * @param signal the signal number to send
    */
-  public static void killProcessGroup(String pgrpId) {
+  public static void killProcessGroup(String pgrpId, Signal signal) {
 
     //If process tree is not alive then return immediately.
     if(!ProcessTree.isProcessGroupAlive(pgrpId)) {
       return;
     }
 
-    String[] args = { "kill", "-9", "-"+pgrpId };
+    String[] args = { "kill", "-" + signal.getValue() , "-"+pgrpId };
     ShellCommandExecutor shexec = new ShellCommandExecutor(args);
     try {
       shexec.execute();
     } catch (IOException e) {
-      LOG.warn("Error sending SIGKILL to process group "+ pgrpId + " ."+ 
+      LOG.warn("Error sending signal " + signal + " to process group "+ 
+               pgrpId + " ."+ 
           StringUtils.stringifyException(e));
     } finally {
-      LOG.info("Killing process group" + pgrpId + " with SIGKILL. Exit code "
-          + shexec.getExitCode());
+      LOG.info("Killing process group" + pgrpId + " with signal " + signal + 
+               ". Exit code " + shexec.getExitCode());
     }
   }
   
@@ -297,7 +202,7 @@ public class ProcessTree {
     private String pid = null;
     private boolean isProcessGroup = false;
 
-    private long sleepTimeBeforeSigKill = DEFAULT_SLEEPTIME_BEFORE_SIGKILL;
+    private final long sleepTimeBeforeSigKill;
 
     private SigKillThread(String pid, boolean isProcessGroup, long interval) {
       this.pid = pid;

+ 2 - 169
src/core/org/apache/hadoop/util/ProcfsBasedProcessTree.java

@@ -29,15 +29,11 @@ import java.util.Map;
 import java.util.HashMap;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
-import java.util.Arrays;
 import java.util.LinkedList;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
-import org.apache.hadoop.util.Shell.ExitCodeException;
-import org.apache.hadoop.util.Shell.ShellCommandExecutor;
-
 /**
  * A Proc file-system based ProcessTree. Works only on Linux.
  */
@@ -47,8 +43,6 @@ public class ProcfsBasedProcessTree extends ProcessTree {
       .getLog(ProcfsBasedProcessTree.class);
 
   private static final String PROCFS = "/proc/";
-  public static final long DEFAULT_SLEEPTIME_BEFORE_SIGKILL = 5000L;
-  private long sleepTimeBeforeSigKill = DEFAULT_SLEEPTIME_BEFORE_SIGKILL;
   private static final Pattern PROCFS_STAT_FILE_FORMAT = Pattern
       .compile("^([0-9-]+)\\s([^\\s]+)\\s[^\\s]\\s([0-9-]+)\\s([0-9-]+)\\s([0-9-]+)\\s([0-9-]+\\s){16}([0-9]+)(\\s[0-9-]+){16}");
 
@@ -61,21 +55,14 @@ public class ProcfsBasedProcessTree extends ProcessTree {
   
   private Integer pid = -1;
 
-  private boolean setsidUsed = false;
-  
-  private long sleeptimeBeforeSigkill = DEFAULT_SLEEPTIME_BEFORE_SIGKILL;
-  
   private Map<Integer, ProcessInfo> processTree = new HashMap<Integer, ProcessInfo>();
 
   public ProcfsBasedProcessTree(String pid) {
-    this(pid, false, DEFAULT_SLEEPTIME_BEFORE_SIGKILL);
+    this(pid, false);
   }
   
-  public ProcfsBasedProcessTree(String pid, boolean setsidUsed,
-      long sigkillInterval) {
+  public ProcfsBasedProcessTree(String pid, boolean setsidUsed) {
     this(pid,PROCFS);
-    this.setsidUsed = setsidUsed;
-    sleeptimeBeforeSigkill = sigkillInterval; 
   }
 
   public ProcfsBasedProcessTree(String pid, String procfsDir) {
@@ -83,17 +70,6 @@ public class ProcfsBasedProcessTree extends ProcessTree {
     this.procfsDir = procfsDir;
   }
   
-  /**
-   * Sets SIGKILL interval
-   * @deprecated Use {@link ProcfsBasedProcessTree#ProcfsBasedProcessTree(
-   *                  String, boolean, long)} instead
-   * @param interval The time to wait before sending SIGKILL
-   *                 after sending SIGTERM
-   */
-  public void setSigKillInterval(long interval) {
-    sleepTimeBeforeSigKill = interval;
-  }
-
   /**
    * Checks if the ProcfsBasedProcessTree is available on this system.
    * 
@@ -218,81 +194,6 @@ public class ProcfsBasedProcessTree extends ProcessTree {
     return false;
   }
 
-  /** Verify that the given process id is same as its process group id.
-   * @param pidStr Process id of the to-be-verified-process
-   */
-  private static boolean assertPidPgrpidForMatch(String pidStr) {
-    Integer pId = Integer.parseInt(pidStr);
-    // Get information for this process
-    ProcessInfo pInfo = new ProcessInfo(pId);
-    pInfo = constructProcessInfo(pInfo);
-    //make sure that pId and its pgrpId match
-    if (!pInfo.getPgrpId().equals(pId)) {
-      LOG.warn("Unexpected: Process with PID " + pId +
-               " is not a process group leader.");
-      return false;
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug(pId + " is a process group leader, as expected.");
-    }
-    return true;
-  }
-
-  /** Make sure that the given pid is a process group leader and then
-   * destroy the process group.
-   * @param pgrpId   Process group id of to-be-killed-processes
-   * @param interval The time to wait before sending SIGKILL
-   *                 after sending SIGTERM
-   * @param inBackground Process is to be killed in the back ground with
-   *                     a separate thread
-   */
-  public static void assertAndDestroyProcessGroup(String pgrpId, long interval,
-                       boolean inBackground)
-         throws IOException {
-    // Make sure that the pid given is a process group leader
-    if (!assertPidPgrpidForMatch(pgrpId)) {
-      throw new IOException("Process with PID " + pgrpId  +
-                          " is not a process group leader.");
-    }
-    destroyProcessGroup(pgrpId, interval, inBackground);
-  }
-
-  /**
-   * Destroy the process-tree.
-   */
-  public void destroy() {
-    destroy(true);
-  }
-  
-  /**
-   * Destroy the process-tree.
-   * @param inBackground Process is to be killed in the back ground with
-   *                     a separate thread
-   */
-  public void destroy(boolean inBackground) {
-    LOG.debug("Killing ProcfsBasedProcessTree of " + pid);
-    if (pid == -1) {
-      return;
-    }
-    if (isAlive(pid.toString())) {
-      if (isSetsidAvailable && setsidUsed) {
-        // In this case, we know that pid got created using setsid. So kill the
-        // whole processGroup.
-        try {
-          assertAndDestroyProcessGroup(pid.toString(), sleeptimeBeforeSigkill,
-                              inBackground);
-        } catch (IOException e) {
-          LOG.warn(StringUtils.stringifyException(e));
-        }
-      }
-      else {
-        //TODO: Destroy all the processes in the subtree in this case also.
-        // For the time being, killing only the root process.
-        destroyProcess(pid.toString(), sleeptimeBeforeSigkill, inBackground);
-      }
-    }
-  }
-
   private static final String PROCESSTREE_DUMP_FORMAT =
       "\t|- %d %d %d %d %s %d %s\n";
 
@@ -382,15 +283,6 @@ public class ProcfsBasedProcessTree extends ProcessTree {
     return processList;
   }
 
-  /**
-   * 
-   * Construct the ProcessInfo using the process' PID and procfs and return the
-   * same. Returns null on failing to read from procfs,
-   */
-  private static ProcessInfo constructProcessInfo(ProcessInfo pinfo) {
-    return constructProcessInfo(pinfo, PROCFS);
-  }
-
   /**
    * Construct the ProcessInfo using the process' PID and procfs rooted at the
    * specified directory and return the same. It is provided mainly to assist
@@ -448,58 +340,6 @@ public class ProcfsBasedProcessTree extends ProcessTree {
     return ret;
   }
   
-  /**
-   * Is the process with PID pid still alive?
-   */
-  private boolean isAlive(Integer pid) {
-    // This method assumes that isAlive is called on a pid that was alive not
-    // too long ago, and hence assumes no chance of pid-wrapping-around.
-    ShellCommandExecutor shexec = null;
-    try {
-      String[] args = { "kill", "-0", pid.toString() };
-      shexec = new ShellCommandExecutor(args);
-      shexec.execute();
-    } catch (ExitCodeException ee) {
-      return false;
-    } catch (IOException ioe) {
-      LOG.warn("Error executing shell command "
-          + Arrays.toString(shexec.getExecString()) + ioe);
-      return false;
-    }
-    return (shexec.getExitCode() == 0 ? true : false);
-  }
-
-  /**
-   * Helper thread class that kills process-tree with SIGKILL in background
-   */
-  private class SigKillThread extends Thread {
-
-    public void run() {
-      this.setName(this.getClass().getName() + "-" + String.valueOf(pid));
-      ShellCommandExecutor shexec = null;
-
-      try {
-        // Sleep for some time before sending SIGKILL
-        Thread.sleep(sleepTimeBeforeSigKill);
-      } catch (InterruptedException i) {
-        LOG.warn("Thread sleep is interrupted.");
-      }
-
-      // Kill the root process with SIGKILL if it is still alive
-      if (ProcfsBasedProcessTree.this.isAlive(pid)) {
-        try {
-          String[] args = { "kill", "-9", pid.toString() };
-          shexec = new ShellCommandExecutor(args);
-          shexec.execute();
-        } catch (IOException ioe) {
-          LOG.warn("Error executing shell command " + ioe);
-        } finally {
-          LOG.info("Killing " + pid + " with SIGKILL. Exit code "
-              + shexec.getExitCode());
-        }
-      }
-    }
-  }
   /**
    * Returns a string printing PIDs of process present in the
    * ProcfsBasedProcessTree. Output format : [pid pid ..]
@@ -563,13 +403,6 @@ public class ProcfsBasedProcessTree extends ProcessTree {
       return age;
     }
     
-    public boolean isParent(ProcessInfo p) {
-      if (pid.equals(p.getPpid())) {
-        return true;
-      }
-      return false;
-    }
-
     public void updateProcessInfo(String name, Integer ppid, Integer pgrpId,
         Integer sessionId, Long vmem) {
       this.name = name;

+ 65 - 214
src/mapred/org/apache/hadoop/filecache/DistributedCache.java

@@ -21,13 +21,9 @@ package org.apache.hadoop.filecache;
 import java.io.*;
 import java.util.*;
 import org.apache.hadoop.conf.*;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.*;
 import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.mapred.DefaultTaskController;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.JobContext;
 
 import java.net.URI;
 
@@ -176,179 +172,8 @@ public class DistributedCache {
   public static final String CACHE_SYMLINK = "mapred.create.symlink";
   
   /**
-   * Get the locally cached file or archive; it could either be 
-   * previously cached (and valid) or copy it from the {@link FileSystem} now.
-   * 
-   * @param cache the cache to be localized, this should be specified as 
-   * new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema 
-   * or hostname:port is provided the file is assumed to be in the filesystem
-   * being used in the Configuration
-   * @param conf The Confguration file which contains the filesystem
-   * @param baseDir The base cache Dir where you wnat to localize the files/archives
-   * @param fileStatus The file status on the dfs.
-   * @param isArchive if the cache is an archive or a file. In case it is an
-   *  archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will
-   *  be unzipped/unjarred/untarred automatically 
-   *  and the directory where the archive is unzipped/unjarred/untarred is
-   *  returned as the Path.
-   *  In case of a file, the path to the file is returned
-   * @param confFileStamp this is the hdfs file modification timestamp to verify that the 
-   * file to be cached hasn't changed since the job started
-   * @param currentWorkDir this is the directory where you would want to create symlinks 
-   * for the locally cached files/archives
-   * @return the path to directory where the archives are unjarred in case of archives,
-   * the path to the file where the file is copied locally 
-   * @throws IOException
-   * @deprecated Internal to MapReduce framework.  Use DistributedCacheManager
-   * instead.
-   */
-  public static Path getLocalCache(URI cache, Configuration conf, 
-                                   Path baseDir, FileStatus fileStatus,
-                                   boolean isArchive, long confFileStamp,
-                                   Path currentWorkDir) 
-      throws IOException {
-    return getLocalCache(cache, conf, baseDir, fileStatus, isArchive, 
-        confFileStamp, currentWorkDir, true);
-  }
-
-  /**
-   * Get the locally cached file or archive; it could either be 
-   * previously cached (and valid) or copy it from the {@link FileSystem} now.
-   * 
-   * @param cache the cache to be localized, this should be specified as 
-   * new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema 
-   * or hostname:port is provided the file is assumed to be in the filesystem
-   * being used in the Configuration
-   * @param conf The Confguration file which contains the filesystem
-   * @param baseDir The base cache Dir where you wnat to localize the files/archives
-   * @param fileStatus The file status on the dfs.
-   * @param isArchive if the cache is an archive or a file. In case it is an
-   *  archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will
-   *  be unzipped/unjarred/untarred automatically 
-   *  and the directory where the archive is unzipped/unjarred/untarred is
-   *  returned as the Path.
-   *  In case of a file, the path to the file is returned
-   * @param confFileStamp this is the hdfs file modification timestamp to verify that the 
-   * file to be cached hasn't changed since the job started
-   * @param currentWorkDir this is the directory where you would want to create symlinks 
-   * for the locally cached files/archives
-   * @param honorSymLinkConf if this is false, then the symlinks are not
-   * created even if conf says so (this is required for an optimization in task
-   * launches
-   * @return the path to directory where the archives are unjarred in case of archives,
-   * the path to the file where the file is copied locally 
-   * @throws IOException
-   * @deprecated Internal to MapReduce framework.  Use DistributedCacheManager
-   * instead.
-   */
-  public static Path getLocalCache(URI cache, Configuration conf, 
-      Path baseDir, FileStatus fileStatus,
-      boolean isArchive, long confFileStamp,
-      Path currentWorkDir, boolean honorSymLinkConf) throws IOException {
-
-    return new TrackerDistributedCacheManager(conf, new DefaultTaskController())
-        .getLocalCache(cache, conf, baseDir.toString(), fileStatus, isArchive,
-            confFileStamp, currentWorkDir, honorSymLinkConf, false);
-  }
-
-  /**
-   * Get the locally cached file or archive; it could either be 
-   * previously cached (and valid) or copy it from the {@link FileSystem} now.
-   * 
-   * @param cache the cache to be localized, this should be specified as 
-   * new URI(hdfs://hostname:port/absolute_path_to_file#LINKNAME). If no schema 
-   * or hostname:port is provided the file is assumed to be in the filesystem
-   * being used in the Configuration
-   * @param conf The Confguration file which contains the filesystem
-   * @param baseDir The base cache Dir where you wnat to localize the files/archives
-   * @param isArchive if the cache is an archive or a file. In case it is an 
-   *  archive with a .zip or .jar or .tar or .tgz or .tar.gz extension it will 
-   *  be unzipped/unjarred/untarred automatically 
-   *  and the directory where the archive is unzipped/unjarred/untarred 
-   *  is returned as the Path.
-   *  In case of a file, the path to the file is returned
-   * @param confFileStamp this is the hdfs file modification timestamp to verify that the 
-   * file to be cached hasn't changed since the job started
-   * @param currentWorkDir this is the directory where you would want to create symlinks 
-   * for the locally cached files/archives
-   * @return the path to directory where the archives are unjarred in case of archives,
-   * the path to the file where the file is copied locally 
-   * @throws IOException
-   * @deprecated Internal to MapReduce framework.  Use DistributedCacheManager
-   * instead.
-   */
-  public static Path getLocalCache(URI cache, Configuration conf, 
-                                   Path baseDir, boolean isArchive,
-                                   long confFileStamp, Path currentWorkDir) 
-      throws IOException {
-    return getLocalCache(cache, conf, 
-                         baseDir, null, isArchive,
-                         confFileStamp, currentWorkDir);
-  }
-
-  /**
-   * This is the opposite of getlocalcache. When you are done with
-   * using the cache, you need to release the cache
-   * @param cache The cache URI to be released
-   * @param conf configuration which contains the filesystem the cache 
-   * is contained in.
-   * @throws IOException
-   * @deprecated Internal to MapReduce framework.  Use DistributedCacheManager
-   * instead.
-   */
-  public static void releaseCache(URI cache, Configuration conf)
-      throws IOException {
-	// find the timestamp of the uri
-    URI[] archives = DistributedCache.getCacheArchives(conf);
-    URI[] files = DistributedCache.getCacheFiles(conf);
-    String[] archivesTimestamps =
-          DistributedCache.getArchiveTimestamps(conf);
-    String[] filesTimestamps =
-          DistributedCache.getFileTimestamps(conf);
-    String timestamp = null;
-    if (archives != null) {
-      for (int i = 0; i < archives.length; i++) {
-        if (archives[i].equals(cache)) {
-          timestamp = archivesTimestamps[i];
-          break;
-        }
-      }
-    }
-    if (timestamp == null && files != null) {
-      for (int i = 0; i < files.length; i++) {
-        if (files[i].equals(cache)) {
-          timestamp = filesTimestamps[i];
-          break;
-        }
-      }
-    }
-    if (timestamp == null) {
-      throw new IOException("TimeStamp of the uri couldnot be found");
-    }
-    new TrackerDistributedCacheManager(conf, new DefaultTaskController())
-        .releaseCache(cache, conf, Long.parseLong(timestamp), 
-            TrackerDistributedCacheManager.getLocalizedCacheOwner(false));
-  }
-  
-  /**
-   * Returns the relative path of the dir this cache will be localized in
-   * relative path that this cache will be localized in. For
-   * hdfs://hostname:port/absolute_path -- the relative path is
-   * hostname/absolute path -- if it is just /absolute_path -- then the
-   * relative path is hostname of DFS this mapred cluster is running
-   * on/absolute_path
-   * @deprecated Internal to MapReduce framework.  Use DistributedCacheManager
-   * instead.
-   */
-  @Deprecated
-  public static String makeRelative(URI cache, Configuration conf)
-      throws IOException {
-    return new TrackerDistributedCacheManager(conf, new DefaultTaskController())
-        .makeRelative(cache, conf);
-  }
-
-  /**
-   * Returns {@link FileStatus} of a given cache file on hdfs.
+   * Returns {@link FileStatus} of a given cache file on hdfs. Internal to 
+   * MapReduce.
    * @param conf configuration
    * @param cache cache file 
    * @return <code>FileStatus</code> of a given cache file on hdfs
@@ -357,13 +182,11 @@ public class DistributedCache {
   public static FileStatus getFileStatus(Configuration conf, URI cache)
     throws IOException {
     FileSystem fileSystem = FileSystem.get(cache, conf);
-    Path filePath = new Path(cache.getPath());
-
-    return fileSystem.getFileStatus(filePath);
+    return fileSystem.getFileStatus(new Path(cache.getPath()));
   }
   
   /**
-   * Returns mtime of a given cache file on hdfs.
+   * Returns mtime of a given cache file on hdfs. Internal to MapReduce.
    * @param conf configuration
    * @param cache cache file 
    * @return mtime of a given cache file on hdfs
@@ -388,17 +211,6 @@ public class DistributedCache {
     TrackerDistributedCacheManager.createAllSymlink(conf, jobCacheDir, workDir);
   }
   
-  private static String getFileSysName(URI url) {
-    String fsname = url.getScheme();
-    if ("hdfs".equals(fsname)) {
-      String host = url.getHost();
-      int port = url.getPort();
-      return (port == (-1)) ? host : (host + ":" + port);
-    } else {
-      return null;
-    }
-  }
-
   /**
    * Set the configuration with the given set of archives. Intended
    * to be used by user code.
@@ -421,11 +233,22 @@ public class DistributedCache {
     conf.set(CACHE_FILES, sfiles);
   }
 
+  private static Path[] parsePaths(String[] strs) {
+    if (strs == null) {
+      return null;
+    }
+    Path[] result = new Path[strs.length];
+    for(int i=0; i < strs.length; ++i) {
+      result[i] = new Path(strs[i]);
+    }
+    return result;
+  }
+
   /**
    * Get cache archives set in the Configuration.  Used by
    * internal DistributedCache and MapReduce code.
    * @param conf The configuration which contains the archives
-   * @return A URI array of the caches set in the Configuration
+   * @return An array of the caches set in the Configuration
    * @throws IOException
    */
   public static URI[] getCacheArchives(Configuration conf) throws IOException {
@@ -436,7 +259,7 @@ public class DistributedCache {
    * Get cache files set in the Configuration.  Used by internal
    * DistributedCache and MapReduce code.
    * @param conf The configuration which contains the files
-   * @return A URI array of the files set in the Configuration
+   * @return Am array of the files set in the Configuration
    * @throws IOException
    */
   public static URI[] getCacheFiles(Configuration conf) throws IOException {
@@ -468,27 +291,42 @@ public class DistributedCache {
     return StringUtils.stringToPath(conf.getStrings(CACHE_LOCALFILES));
   }
 
+  /**
+   * Parse a list of strings into longs.
+   * @param strs the list of strings to parse
+   * @return a list of longs that were parsed. same length as strs.
+   */
+  private static long[] parseTimestamps(String[] strs) {
+    if (strs == null) {
+      return null;
+    }
+    long[] result = new long[strs.length];
+    for(int i=0; i < strs.length; ++i) {
+      result[i] = Long.parseLong(strs[i]);
+    }
+    return result;
+  }
+
   /**
    * Get the timestamps of the archives.  Used by internal
    * DistributedCache and MapReduce code.
    * @param conf The configuration which stored the timestamps
-   * @return a string array of timestamps 
+   * @return a long array of timestamps 
    * @throws IOException
    */
-  public static String[] getArchiveTimestamps(Configuration conf) {
-    return conf.getStrings(CACHE_ARCHIVES_TIMESTAMPS);
+  public static long[] getArchiveTimestamps(Configuration conf) {
+    return parseTimestamps(conf.getStrings(CACHE_ARCHIVES_TIMESTAMPS));
   }
 
-
   /**
    * Get the timestamps of the files.  Used by internal
    * DistributedCache and MapReduce code.
    * @param conf The configuration which stored the timestamps
-   * @return a string array of timestamps 
+   * @return a long array of timestamps 
    * @throws IOException
    */
-  public static String[] getFileTimestamps(Configuration conf) {
-    return conf.getStrings(CACHE_FILES_TIMESTAMPS);
+  public static long[] getFileTimestamps(Configuration conf) {
+    return parseTimestamps(conf.getStrings(CACHE_FILES_TIMESTAMPS));
   }
 
   /**
@@ -532,6 +370,30 @@ public class DistributedCache {
   public static void setLocalFiles(Configuration conf, String str) {
     conf.set(CACHE_LOCALFILES, str);
   }
+  
+  /**
+   * Add a archive that has been localized to the conf.  Used
+   * by internal DistributedCache code.
+   * @param conf The conf to modify to contain the localized caches
+   * @param str a comma separated list of local archives
+   */
+  public static void addLocalArchives(Configuration conf, String str) {
+    String archives = conf.get(CACHE_LOCALARCHIVES);
+    conf.set(CACHE_LOCALARCHIVES, archives == null ? str
+        : archives + "," + str);
+  }
+
+  /**
+   * Add a file that has been localized to the conf..  Used
+   * by internal DistributedCache code.
+   * @param conf The conf to modify to contain the localized caches
+   * @param str a comma separated list of local files
+   */
+  public static void addLocalFiles(Configuration conf, String str) {
+    String files = conf.get(CACHE_LOCALFILES);
+    conf.set(CACHE_LOCALFILES, files == null ? str
+        : files + "," + str);
+  }
 
   /**
    * Add a archives to be localized to the conf.  Intended to
@@ -606,7 +468,7 @@ public class DistributedCache {
     String classpath = conf.get("mapred.job.classpath.files");
     if (classpath == null)
       return null;
-    ArrayList list = Collections.list(new StringTokenizer(classpath, System
+    ArrayList<Object> list = Collections.list(new StringTokenizer(classpath, System
                                                           .getProperty("path.separator")));
     Path[] paths = new Path[list.size()];
     for (int i = 0; i < list.size(); i++) {
@@ -665,7 +527,7 @@ public class DistributedCache {
     String classpath = conf.get("mapred.job.classpath.archives");
     if (classpath == null)
       return null;
-    ArrayList list = Collections.list(new StringTokenizer(classpath, System
+    ArrayList<Object> list = Collections.list(new StringTokenizer(classpath, System
                                                           .getProperty("path.separator")));
     Path[] paths = new Path[list.size()];
     for (int i = 0; i < list.size(); i++) {
@@ -747,15 +609,4 @@ public class DistributedCache {
     return true;
   }
 
-  /**
-   * Clear the entire contents of the cache and delete the backing files. This
-   * should only be used when the server is reinitializing, because the users
-   * are going to lose their files.
-   * @deprecated Internal to MapReduce framework.  Use DistributedCacheManager
-   * instead.
-   */
-  public static void purgeCache(Configuration conf) throws IOException {
-    new TrackerDistributedCacheManager(conf, new DefaultTaskController())
-        .purgeCache();
-  }
 }

+ 58 - 30
src/mapred/org/apache/hadoop/filecache/TaskDistributedCacheManager.java

@@ -30,19 +30,15 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.JobLocalizer;
 
 /**
  * Helper class of {@link TrackerDistributedCacheManager} that represents
- * the cached files of a single task.  This class is used
- * by TaskRunner/LocalJobRunner to parse out the job configuration
- * and setup the local caches.
+ * the cached files of a single job.
  * 
  * <b>This class is internal to Hadoop, and should not be treated as a public
  * interface.</b>
@@ -52,7 +48,7 @@ public class TaskDistributedCacheManager {
   private final Configuration taskConf;
   private final List<CacheFile> cacheFiles = new ArrayList<CacheFile>();
   private final List<String> classPaths = new ArrayList<String>();
- 
+  
   private boolean setupCalled = false;
 
   /**
@@ -94,7 +90,7 @@ public class TaskDistributedCacheManager {
      * files.
      */
     private static List<CacheFile> makeCacheFiles(URI[] uris, 
-        String[] timestamps, String cacheVisibilities[], Path[] paths, 
+        long[] timestamps, boolean cacheVisibilities[], Path[] paths, 
         FileType type) throws IOException {
       List<CacheFile> ret = new ArrayList<CacheFile>();
       if (uris != null) {
@@ -110,9 +106,8 @@ public class TaskDistributedCacheManager {
         for (int i = 0; i < uris.length; ++i) {
           URI u = uris[i];
           boolean isClassPath = (null != classPaths.get(u.getPath()));
-          long t = Long.parseLong(timestamps[i]);
-          ret.add(new CacheFile(u, type, Boolean.valueOf(cacheVisibilities[i]),
-              t, isClassPath));
+          ret.add(new CacheFile(u, type, cacheVisibilities[i],
+              timestamps[i], isClassPath));
         }
       }
       return ret;
@@ -148,36 +143,37 @@ public class TaskDistributedCacheManager {
   }
 
   /**
-   * Retrieve files into the local cache and updates the task configuration 
-   * (which has been passed in via the constructor).
+   * Retrieve public distributed cache files into the local cache and updates
+   * the task configuration (which has been passed in via the constructor).
+   * The private distributed cache is just looked at and the paths where the
+   * files/archives should go to is decided here. The actual localization is
+   * done by {@link JobLocalizer}.
    * 
    * It is the caller's responsibility to re-write the task configuration XML
    * file, if necessary.
    */
-  public void setup(LocalDirAllocator lDirAlloc, File workDir, 
-      String privateCacheSubdir, String publicCacheSubDir) throws IOException {
+  public void setupCache(String publicCacheSubdir, String privateCacheSubdir) 
+  throws IOException {
     setupCalled = true;
-    
-    if (cacheFiles.isEmpty()) {
-      return;
-    }
-
     ArrayList<Path> localArchives = new ArrayList<Path>();
     ArrayList<Path> localFiles = new ArrayList<Path>();
-    Path workdirPath = new Path(workDir.getAbsolutePath());
 
     for (CacheFile cacheFile : cacheFiles) {
       URI uri = cacheFile.uri;
       FileSystem fileSystem = FileSystem.get(uri, taskConf);
       FileStatus fileStatus = fileSystem.getFileStatus(new Path(uri.getPath()));
-      String cacheSubdir = publicCacheSubDir;
-      if (!cacheFile.isPublic) {
-        cacheSubdir = privateCacheSubdir;
+      Path p;
+      if (cacheFile.isPublic) {
+        p = distributedCacheManager.getLocalCache(uri, taskConf,
+            publicCacheSubdir, fileStatus, 
+            cacheFile.type == CacheFile.FileType.ARCHIVE,
+            cacheFile.timestamp, cacheFile.isPublic);
+      } else {
+        p = distributedCacheManager.getLocalCache(uri, taskConf,
+            privateCacheSubdir, fileStatus, 
+            cacheFile.type == CacheFile.FileType.ARCHIVE,
+            cacheFile.timestamp, cacheFile.isPublic);
       }
-      Path p = distributedCacheManager.getLocalCache(uri, taskConf,
-          cacheSubdir, fileStatus, 
-          cacheFile.type == CacheFile.FileType.ARCHIVE,
-          cacheFile.timestamp, workdirPath, false, cacheFile.isPublic);
       cacheFile.setLocalized(true);
 
       if (cacheFile.type == CacheFile.FileType.ARCHIVE) {
@@ -192,11 +188,11 @@ public class TaskDistributedCacheManager {
 
     // Update the configuration object with localized data.
     if (!localArchives.isEmpty()) {
-      DistributedCache.setLocalArchives(taskConf, 
+      DistributedCache.addLocalArchives(taskConf, 
         stringifyPathList(localArchives));
     }
     if (!localFiles.isEmpty()) {
-      DistributedCache.setLocalFiles(taskConf, stringifyPathList(localFiles));
+      DistributedCache.addLocalFiles(taskConf, stringifyPathList(localFiles));
     }
 
   }
@@ -232,6 +228,28 @@ public class TaskDistributedCacheManager {
     }
     return classPaths;
   }
+  
+  private List<String> formClasspath(Path[] paths, URI[] uris, 
+                                     Path[] localizedFiles) {
+    if (uris == null) {
+      return new ArrayList<String>();
+    }
+    Map<String, Path> clMap = new HashMap<String, Path>();
+    List<String> classPaths = new ArrayList<String>();
+    if (paths != null) {
+      for (Path p : paths) {
+        clMap.put(p.toUri().getPath().toString(), p);
+      }
+    }
+    for (int i = 0; i < uris.length; ++i) {
+      URI u = uris[i];
+      boolean isClassPath = (null != clMap.get(u.getPath()));
+      if (isClassPath) {
+        classPaths.add(localizedFiles[i].toString());
+      }
+    }
+    return classPaths;
+  }
 
   /**
    * Releases the cached files/archives, so that space
@@ -246,6 +264,16 @@ public class TaskDistributedCacheManager {
     }
   }
 
+  public void setSizes(long[] sizes) throws IOException {
+    int i = 0;
+    for (CacheFile c: cacheFiles) {
+      if (!c.isPublic) {
+        distributedCacheManager.setSize(c.uri, taskConf, c.timestamp, c.owner, 
+                                        sizes[i++]);
+      }
+    }
+  }
+
   /**
    * Creates a class loader that includes the designated
    * files and archives.

+ 169 - 122
src/mapred/org/apache/hadoop/filecache/TrackerDistributedCacheManager.java

@@ -21,6 +21,9 @@ import java.io.File;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
+import java.text.DateFormat;
+import java.util.Date;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
@@ -33,8 +36,6 @@ import java.util.TreeMap;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapred.TaskController;
-import org.apache.hadoop.mapred.TaskController.DistributedCacheFileContext;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
@@ -44,7 +45,10 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.mapred.InvalidJobConfException;
+import org.apache.hadoop.mapred.TaskController;
+import org.apache.hadoop.mapred.TaskTracker;
 import org.apache.hadoop.mapreduce.JobContext;
+import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.RunJar;
@@ -63,6 +67,11 @@ public class TrackerDistributedCacheManager {
   // cacheID to cacheStatus mapping
   private TreeMap<String, CacheStatus> cachedArchives = 
     new TreeMap<String, CacheStatus>();
+  private Map<JobID, TaskDistributedCacheManager> jobArchives =
+    new HashMap<JobID, TaskDistributedCacheManager>();
+  private final TaskController taskController;
+  private static final FsPermission PUBLIC_CACHE_OBJECT_PERM =
+    FsPermission.createImmutable((short) 0755);
 
   // For holding the properties of each cache directory
   static class CacheDir {
@@ -84,19 +93,17 @@ public class TrackerDistributedCacheManager {
   private final LocalFileSystem localFs;
   
   private LocalDirAllocator lDirAllocator;
-  
-  private TaskController taskController;
-  
+    
   private Configuration trackerConf;
   
   private Random random = new Random();
 
   public TrackerDistributedCacheManager(Configuration conf,
-      TaskController taskController) throws IOException {
+                                        TaskController controller
+                                        ) throws IOException {
     this.localFs = FileSystem.getLocal(conf);
     this.trackerConf = conf;
     this.lDirAllocator = new LocalDirAllocator("mapred.local.dir");
-    this.taskController = taskController;
 
     // setting the cache size to a default of 10GB
     this.allowedCacheSize = conf.getLong
@@ -105,6 +112,7 @@ public class TrackerDistributedCacheManager {
     this.allowedCacheSubdirs = conf.getLong
       ("mapreduce.tasktracker.local.cache.numberdirectories",
        DEFAULT_CACHE_SUBDIR_LIMIT);
+    this.taskController = controller;
   }
 
   /**
@@ -125,13 +133,6 @@ public class TrackerDistributedCacheManager {
    *  In case of a file, the path to the file is returned
    * @param confFileStamp this is the hdfs file modification timestamp to verify
    * that the file to be cached hasn't changed since the job started
-   * @param currentWorkDir this is the directory where you would want to create
-   * symlinks for the locally cached files/archives
-   * @param honorSymLinkConf if this is false, then the symlinks are not
-   * created even if conf says so (this is required for an optimization in task
-   * launches
-   * NOTE: This is effectively always on since r696957, since there is no code
-   * path that does not use this.
    * @param isPublic to know the cache file is accessible to public or private
    * @return the path to directory where the archives are unjarred in case of
    * archives, the path to the file where the file is copied locally
@@ -140,12 +141,14 @@ public class TrackerDistributedCacheManager {
   Path getLocalCache(URI cache, Configuration conf,
       String subDir, FileStatus fileStatus,
       boolean isArchive, long confFileStamp,
-      Path currentWorkDir, boolean honorSymLinkConf, boolean isPublic)
+      boolean isPublic)
       throws IOException {
     String key;
-    key = getKey(cache, conf, confFileStamp, getLocalizedCacheOwner(isPublic));
+    String user = getLocalizedCacheOwner(isPublic);
+    key = getKey(cache, conf, confFileStamp, user);
     CacheStatus lcacheStatus;
     Path localizedPath = null;
+    Path localPath = null;
     synchronized (cachedArchives) {
       lcacheStatus = cachedArchives.get(key);
       if (lcacheStatus == null) {
@@ -156,10 +159,12 @@ public class TrackerDistributedCacheManager {
              + "_" + (confFileStamp % Integer.MAX_VALUE));
         String cachePath = new Path (subDir, 
           new Path(uniqueString, makeRelative(cache, conf))).toString();
-        Path localPath = lDirAllocator.getLocalPathForWrite(cachePath,
+        localPath = lDirAllocator.getLocalPathForWrite(cachePath,
           fileStatus.getLen(), trackerConf);
-        lcacheStatus = new CacheStatus(new Path(localPath.toString().replace(
-          cachePath, "")), localPath, new Path(subDir), uniqueString);
+        lcacheStatus = 
+          new CacheStatus(new Path(localPath.toString().replace(cachePath, "")), 
+                          localPath, new Path(subDir), uniqueString, 
+                          isPublic ? null : user);
         cachedArchives.put(key, lcacheStatus);
       }
 
@@ -172,15 +177,20 @@ public class TrackerDistributedCacheManager {
       // do the localization, after releasing the global lock
       synchronized (lcacheStatus) {
         if (!lcacheStatus.isInited()) {
-          localizedPath = localizeCache(conf, cache, confFileStamp,
-              lcacheStatus, fileStatus, isArchive, isPublic);
+          if (isPublic) {
+            localizedPath = localizePublicCacheObject(conf, 
+                cache, 
+                confFileStamp,
+                lcacheStatus, fileStatus, 
+                isArchive);
+          } else {
+            localizedPath = localPath;
+          }
           lcacheStatus.initComplete();
         } else {
           localizedPath = checkCacheStatusValidity(conf, cache, confFileStamp,
-              lcacheStatus, fileStatus, isArchive);
+              lcacheStatus, fileStatus, isArchive);            
         }
-        createSymlink(conf, cache, lcacheStatus, isArchive,
-            currentWorkDir, honorSymLinkConf);
       }
 
       // try deleting stuff if you can
@@ -240,6 +250,21 @@ public class TrackerDistributedCacheManager {
     }
   }
 
+  void setSize(URI cache, Configuration conf, long timeStamp,
+               String owner, long size) throws IOException {
+    String key = getKey(cache, conf, timeStamp, owner);
+    synchronized (cachedArchives) {
+      CacheStatus lcacheStatus = cachedArchives.get(key);
+      if (lcacheStatus == null) {
+        LOG.warn("Cannot find localized cache: " + cache + 
+                 " (key: " + key + ") in setSize!");
+        return;
+      }
+      lcacheStatus.size = size;
+      addCacheInfoUpdate(lcacheStatus);
+    }
+  }
+
   /*
    * This method is called from unit tests. 
    */
@@ -284,7 +309,6 @@ public class TrackerDistributedCacheManager {
           it.hasNext();) {
         String cacheId = it.next();
         CacheStatus lcacheStatus = cachedArchives.get(cacheId);
-        
         // if reference count is zero 
         // mark the cache for deletion
         if (lcacheStatus.refcount == 0) {
@@ -299,22 +323,28 @@ public class TrackerDistributedCacheManager {
     // do the deletion, after releasing the global lock
     for (CacheStatus lcacheStatus : deleteList) {
       synchronized (lcacheStatus) {
-        FileSystem localFS = FileSystem.getLocal(conf);
-
         Path potentialDeletee = lcacheStatus.localizedLoadPath;
+        Path localizedDir = lcacheStatus.getLocalizedUniqueDir();
+        if (lcacheStatus.user == null) {
 
-        localFS.delete(potentialDeletee, true);
+          localFs.delete(potentialDeletee, true);
 
-        // Update the maps baseDirSize and baseDirNumberSubDir
-        LOG.info("Deleted path " + potentialDeletee);
+          // Update the maps baseDirSize and baseDirNumberSubDir
+          LOG.info("Deleted path " + potentialDeletee);
 
-        try {
-          localFS.delete(lcacheStatus.getLocalizedUniqueDir(), true);
-        } catch (IOException e) {
-          LOG.warn("Could not delete distributed cache empty directory "
-                   + lcacheStatus.getLocalizedUniqueDir());
+          try {
+            localFs.delete(localizedDir, true);
+          } catch (IOException e) {
+            LOG.warn("Could not delete distributed cache empty directory "
+                     + localizedDir);
+          }
+        } else {
+          int userDir = TaskTracker.getUserDir(lcacheStatus.user).length();
+          taskController.deleteAsUser(lcacheStatus.user,
+                                      potentialDeletee.toString().substring(userDir));
+          taskController.deleteAsUser(lcacheStatus.user,
+                                      localizedDir.toString().substring(userDir));          
         }
-
         deleteCacheInfoUpdate(lcacheStatus);
       }
     }
@@ -409,51 +439,54 @@ public class TrackerDistributedCacheManager {
     return false;
   }
 
-  private void createSymlink(Configuration conf, URI cache,
-      CacheStatus cacheStatus, boolean isArchive,
-      Path currentWorkDir, boolean honorSymLinkConf) throws IOException {
-    boolean doSymlink = honorSymLinkConf && DistributedCache.getSymlink(conf);
-    if(cache.getFragment() == null) {
-      doSymlink = false;
-    }
-
-    String link = 
-      currentWorkDir.toString() + Path.SEPARATOR + cache.getFragment();
-    File flink = new File(link);
-    if (doSymlink){
-      if (!flink.exists()) {
-        FileUtil.symLink(cacheStatus.localizedLoadPath.toString(), link);
-      }
-    }
-  }
-
-  //the method which actually copies the caches locally and unjars/unzips them
-  // and does chmod for the files
-  Path localizeCache(Configuration conf,
-                                      URI cache, long confFileStamp,
-                                      CacheStatus cacheStatus,
-                                      FileStatus fileStatus,
-                                      boolean isArchive, boolean isPublic)
-      throws IOException {
-    FileSystem fs = FileSystem.get(cache, conf);
+  /**
+   * Download a given path to the local file system.
+   * @param conf the job's configuration
+   * @param source the source to copy from
+   * @param destination where to copy the file. must be local fs
+   * @param desiredTimestamp the required modification timestamp of the source
+   * @param isArchive is this an archive that should be expanded
+   * @param permission the desired permissions of the file.
+   * @return for archives, the number of bytes in the unpacked directory
+   * @throws IOException
+   */
+  public static long downloadCacheObject(Configuration conf,
+                                         URI source,
+                                         Path destination,
+                                         long desiredTimestamp,
+                                         boolean isArchive,
+                                         FsPermission permission
+                                         ) throws IOException {
+    FileSystem sourceFs = FileSystem.get(source, conf);
     FileSystem localFs = FileSystem.getLocal(conf);
+    
+    Path sourcePath = new Path(source.getPath());
+    long modifiedTime = 
+      sourceFs.getFileStatus(sourcePath).getModificationTime();
+    if (modifiedTime != desiredTimestamp) {
+      DateFormat df = DateFormat.getDateTimeInstance(DateFormat.SHORT, 
+                                                     DateFormat.SHORT);
+      throw new IOException("The distributed cache object " + source + 
+                            " changed during the job from " + 
+                            df.format(new Date(desiredTimestamp)) + " to " +
+                            df.format(new Date(modifiedTime)));
+    }
+    
     Path parchive = null;
     if (isArchive) {
-      parchive = new Path(cacheStatus.localizedLoadPath,
-        new Path(cacheStatus.localizedLoadPath.getName()));
+      parchive = new Path(destination,
+        new Path(destination.getName()));
     } else {
-      parchive = cacheStatus.localizedLoadPath;
+      parchive = destination;
     }
-
-    if (!localFs.mkdirs(parchive.getParent())) {
+    if (!localFs.mkdirs(destination.getParent())) {
       throw new IOException("Mkdirs failed to create directory " +
-          cacheStatus.localizedLoadPath.toString());
+                            destination);
     }
 
-    String cacheId = cache.getPath();
-    fs.copyToLocalFile(new Path(cacheId), parchive);
+    sourceFs.copyToLocalFile(sourcePath, parchive);
     if (isArchive) {
-      String tmpArchive = parchive.toString().toLowerCase();
+      String tmpArchive = destination.toString().toLowerCase();
       File srcFile = new File(parchive.toString());
       File destDir = new File(parchive.getParent().toString());
       LOG.info(String.format("Extracting %s to %s",
@@ -472,46 +505,37 @@ public class TrackerDistributedCacheManager {
         // and copy the file into the dir as it is
       }
     }
-    long cacheSize =
+    // set proper permissions for the localized directory
+    localFs.setPermission(destination, permission);
+
+    LOG.info(String.format("Cached %s as %s",
+             source.toString(), destination.toString()));
+    long cacheSize = 
       FileUtil.getDU(new File(parchive.getParent().toString()));
-    cacheStatus.size = cacheSize;
+    return cacheSize;
+  }
+
+  //the method which actually copies the caches locally and unjars/unzips them
+  // and does chmod for the files
+  Path localizePublicCacheObject(Configuration conf,
+                                 URI cache, long confFileStamp,
+                                 CacheStatus cacheStatus,
+                                 FileStatus fileStatus,
+                                 boolean isArchive) throws IOException {
+    long size = downloadCacheObject(conf, cache, cacheStatus.localizedLoadPath,
+                                    confFileStamp, isArchive, 
+                                    PUBLIC_CACHE_OBJECT_PERM);
+    cacheStatus.size = size;
     
     // Increase the size and sub directory count of the cache
     // from baseDirSize and baseDirNumberSubDir.
     addCacheInfoUpdate(cacheStatus);
 
-    // set proper permissions for the localized directory
-    setPermissions(conf, cacheStatus, isPublic);
-
-    // update cacheStatus to reflect the newly cached file
-    cacheStatus.mtime = DistributedCache.getTimestamp(conf, cache);
-
     LOG.info(String.format("Cached %s as %s",
              cache.toString(), cacheStatus.localizedLoadPath));
     return cacheStatus.localizedLoadPath;
   }
 
-  private void setPermissions(Configuration conf, CacheStatus cacheStatus,
-      boolean isPublic) throws IOException {
-    if (isPublic) {
-      Path localizedUniqueDir = cacheStatus.getLocalizedUniqueDir();
-      LOG.info("Doing chmod on localdir :" + localizedUniqueDir);
-      try {
-        FileUtil.chmod(localizedUniqueDir.toString(), "ugo+rx", true);
-      } catch (InterruptedException e) {
-        LOG.warn("Exception in chmod" + e.toString());
-        throw new IOException(e);
-      }
-    } else {
-      // invoke taskcontroller to set permissions
-      DistributedCacheFileContext context = new DistributedCacheFileContext(
-          conf.get("user.name"), new File(cacheStatus.localizedBaseDir
-              .toString()), cacheStatus.localizedBaseDir,
-          cacheStatus.uniqueString);
-      taskController.initializeDistributedCacheFile(context);
-    }
-  }
-
   private static boolean isTarFile(String filename) {
     return (filename.endsWith(".tgz") || filename.endsWith(".tar.gz") ||
            filename.endsWith(".tar"));
@@ -537,9 +561,6 @@ public class TrackerDistributedCacheManager {
       " has changed on HDFS since job started");
     }
 
-    if (dfsFileStamp != lcacheStatus.mtime) {
-      return false;
-    }
     return true;
   }
 
@@ -595,9 +616,6 @@ public class TrackerDistributedCacheManager {
     // number of instances using this cache
     int refcount;
 
-    // the cache-file modification time
-    long mtime;
-
     // is it initialized ?
     boolean inited = false;
 
@@ -607,17 +625,20 @@ public class TrackerDistributedCacheManager {
     
     // unique string used in the construction of local load path
     String uniqueString;
+    
+    // The user that owns the cache entry or null if it is public
+    final String user;
 
     public CacheStatus(Path baseDir, Path localLoadPath, Path subDir,
-        String uniqueString) {
+                       String uniqueString, String user) {
       super();
       this.localizedLoadPath = localLoadPath;
       this.refcount = 0;
-      this.mtime = -1;
       this.localizedBaseDir = baseDir;
       this.size = 0;
       this.subDir = subDir;
       this.uniqueString = uniqueString;
+      this.user = user;
     }
     
     Path getBaseDir(){
@@ -657,11 +678,29 @@ public class TrackerDistributedCacheManager {
     }
   }
 
-  public TaskDistributedCacheManager newTaskDistributedCacheManager(
-      Configuration taskConf) throws IOException {
-    return new TaskDistributedCacheManager(this, taskConf);
+  public TaskDistributedCacheManager 
+  newTaskDistributedCacheManager(JobID jobId,
+                                 Configuration taskConf) throws IOException {
+    TaskDistributedCacheManager result = 
+      new TaskDistributedCacheManager(this, taskConf);
+    jobArchives.put(jobId, result);
+    return result;
+  }
+
+  public void releaseJob(JobID jobId) throws IOException {
+    TaskDistributedCacheManager mgr = jobArchives.get(jobId);
+    if (mgr != null) {
+      mgr.release();
+      jobArchives.remove(jobId);
+    }
   }
 
+  public void setArchiveSizes(JobID jobId, long[] sizes) throws IOException {
+    TaskDistributedCacheManager mgr = jobArchives.get(jobId);
+    if (mgr != null) {
+      mgr.setSizes(sizes);
+    }
+  }
 
   /**
    * Determines timestamps of files to be cached, and stores those
@@ -744,25 +783,37 @@ public class TrackerDistributedCacheManager {
     }
   }
   
+  private static boolean[] parseBooleans(String[] strs) {
+    if (null == strs) {
+      return null;
+    }
+    boolean[] result = new boolean[strs.length];
+    for(int i=0; i < strs.length; ++i) {
+      result[i] = Boolean.parseBoolean(strs[i]);
+    }
+    return result;
+  }
+
   /**
    * Get the booleans on whether the files are public or not.  Used by 
    * internal DistributedCache and MapReduce code.
    * @param conf The configuration which stored the timestamps
-   * @return a string array of booleans 
+   * @return array of booleans 
    * @throws IOException
    */
-  static String[] getFileVisibilities(Configuration conf) {
-    return conf.getStrings(JobContext.CACHE_FILE_VISIBILITIES);
+  public static boolean[] getFileVisibilities(Configuration conf) {
+    return parseBooleans(conf.getStrings(JobContext.CACHE_FILE_VISIBILITIES));
   }
 
   /**
    * Get the booleans on whether the archives are public or not.  Used by 
    * internal DistributedCache and MapReduce code.
    * @param conf The configuration which stored the timestamps
-   * @return a string array of booleans 
+   * @return array of booleans 
    */
-  static String[] getArchiveVisibilities(Configuration conf) {
-    return conf.getStrings(JobContext.CACHE_ARCHIVES_VISIBILITIES);
+  public static boolean[] getArchiveVisibilities(Configuration conf) {
+    return parseBooleans(conf.getStrings(JobContext.
+                                           CACHE_ARCHIVES_VISIBILITIES));
   }
 
   /**
@@ -837,8 +888,6 @@ public class TrackerDistributedCacheManager {
 
     Path thisSubject = null;
 
-    String thisCategory = DistributedCache.CACHE_ARCHIVES;
-
     if (archiveStrings != null && fileStrings != null) {
       final Set<Path> archivesSet = new HashSet<Path>();
 
@@ -846,8 +895,6 @@ public class TrackerDistributedCacheManager {
         archivesSet.add(coreLocation(archiveString, conf));
       }
 
-      thisCategory = DistributedCache.CACHE_FILES;
-
       for (String fileString : fileStrings) {
         thisSubject = coreLocation(fileString, conf);
 

+ 43 - 6
src/mapred/org/apache/hadoop/mapred/Child.java

@@ -24,19 +24,27 @@ import java.io.IOException;
 import java.io.PrintStream;
 import java.net.InetSocketAddress;
 import java.security.PrivilegedExceptionAction;
+import java.util.Arrays;
+import java.util.List;
+import java.util.regex.Pattern;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FSError;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.mapreduce.security.TokenCache;
 import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
 import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
+import org.apache.hadoop.mapreduce.server.tasktracker.JVMInfo;
+import org.apache.hadoop.mapreduce.server.tasktracker.Localizer;
 import org.apache.hadoop.metrics2.MetricsException;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.source.JvmMetricsSource;
+import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
@@ -55,6 +63,7 @@ class Child {
 
   static volatile TaskAttemptID taskid = null;
   static volatile boolean isCleanup;
+  static String cwd;
 
   public static void main(String[] args) throws Throwable {
     LOG.debug("Child starting");
@@ -69,6 +78,12 @@ class Child {
     int jvmIdInt = Integer.parseInt(args[4]);
     JVMId jvmId = new JVMId(firstTaskid.getJobID(),firstTaskid.isMap(),jvmIdInt);
     String prefix = firstTaskid.isMap() ? "MapTask" : "ReduceTask";
+    
+    cwd = System.getenv().get(TaskRunner.HADOOP_WORK_DIR);
+    if (cwd == null) {
+      throw new IOException("Environment variable " + 
+                             TaskRunner.HADOOP_WORK_DIR + " is not set");
+    }
 
     // file name is passed thru env
     String jobTokenFile = 
@@ -171,10 +186,6 @@ class Child {
         isCleanup = task.isTaskCleanupTask();
         // reset the statistics for the task
         FileSystem.clearStatistics();
-
-        //create the index file so that the log files 
-        //are viewable immediately
-        TaskLog.syncLogs(logLocation, taskid, isCleanup);
         
         // Create the job-conf and set credentials
         final JobConf job = new JobConf(task.getJobFile());
@@ -187,12 +198,19 @@ class Child {
         // setup the child's mapred-local-dir. The child is now sandboxed and
         // can only see files down and under attemtdir only.
         TaskRunner.setupChildMapredLocalDirs(task, job);
+        
+        // setup the child's attempt directories
+        localizeTask(task, job, logLocation);
 
         //setupWorkDir actually sets up the symlinks for the distributed
         //cache. After a task exits we wipe the workdir clean, and hence
         //the symlinks have to be rebuilt.
-        TaskRunner.setupWorkDir(job, new File(".").getAbsoluteFile());
-
+        TaskRunner.setupWorkDir(job, new File(cwd));
+        
+        //create the index file so that the log files 
+        //are viewable immediately
+        TaskLog.syncLogs(logLocation, taskid, isCleanup);
+        
         numTasksToExecute = job.getNumTasksToExecutePerJvm();
         assert(numTasksToExecute != 0);
 
@@ -219,6 +237,10 @@ class Child {
               taskFinal.run(job, umbilical);             // run the task
             } finally {
               TaskLog.syncLogs(logLocation, taskid, isCleanup);
+              TaskLogsTruncater trunc = new TaskLogsTruncater(defaultConf);
+              trunc.truncateLogs(new JVMInfo(
+                  TaskLog.getAttemptDir(taskFinal.getTaskID(),
+                    taskFinal.isTaskCleanupTask()), Arrays.asList(taskFinal)));
             }
 
             return null;
@@ -288,4 +310,19 @@ class Child {
     DefaultMetricsSystem.INSTANCE.shutdown();
   }
 
+  static void localizeTask(Task task, JobConf jobConf, String logLocation) 
+  throws IOException{
+    
+    // Do the task-type specific localization
+    task.localizeConfiguration(jobConf);
+    
+    //write the localized task jobconf
+    LocalDirAllocator lDirAlloc = 
+      new LocalDirAllocator(JobConf.MAPRED_LOCAL_DIR_PROPERTY);
+    Path localTaskFile =
+      lDirAlloc.getLocalPathForWrite(TaskTracker.JOBFILE, jobConf);
+    JobLocalizer.writeLocalJobFile(localTaskFile, jobConf);
+    task.setJobFile(localTaskFile.toString());
+    task.setConf(jobConf);
+  }
 }

+ 11 - 27
src/mapred/org/apache/hadoop/mapred/CleanupQueue.java

@@ -24,7 +24,7 @@ import java.util.concurrent.LinkedBlockingQueue;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
-import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 
 class CleanupQueue {
@@ -54,24 +54,23 @@ class CleanupQueue {
    * Contains info related to the path of the file/dir to be deleted
    */
   static class PathDeletionContext {
-    String fullPath;// full path of file or dir
-    FileSystem fs;
+    Path fullPath;// full path of file or dir
+    Configuration conf;
 
-    public PathDeletionContext(FileSystem fs, String fullPath) {
-      this.fs = fs;
+    public PathDeletionContext(Path fullPath, Configuration conf) {
       this.fullPath = fullPath;
+      this.conf = conf;
     }
     
-    protected String getPathForCleanup() {
+    protected Path getPathForCleanup() {
       return fullPath;
     }
 
     /**
-     * Makes the path(and its subdirectories recursively) fully deletable
+     * Deletes the path (and its subdirectories recursively)
      */
-    protected void enablePathForCleanup() throws IOException {
-      // Do nothing by default.
-      // Subclasses can override to provide enabling for deletion.
+    protected void deletePath() throws IOException {
+      fullPath.getFileSystem(conf).delete(fullPath, true);
     }
   }
 
@@ -82,19 +81,6 @@ class CleanupQueue {
     cleanupThread.addToQueue(contexts);
   }
 
-  protected static boolean deletePath(PathDeletionContext context)
-            throws IOException {
-    context.enablePathForCleanup();
-
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Trying to delete " + context.fullPath);
-    }
-    if (context.fs.exists(new Path(context.fullPath))) {
-      return context.fs.delete(new Path(context.fullPath), true);
-    }
-    return true;
-  }
-
   // currently used by tests only
   protected boolean isQueueEmpty() {
     return (cleanupThread.queue.size() == 0);
@@ -128,11 +114,9 @@ class CleanupQueue {
       while (true) {
         try {
           context = queue.take();
+          context.deletePath();
           // delete the path.
-          if (!deletePath(context)) {
-            LOG.warn("CleanupThread:Unable to delete path " + context.fullPath);
-          }
-          else if (LOG.isDebugEnabled()) {
+          if (LOG.isDebugEnabled()) {
             LOG.debug("DELETED " + context.fullPath);
           }
         } catch (InterruptedException t) {

+ 166 - 122
src/mapred/org/apache/hadoop/mapred/DefaultTaskController.java

@@ -18,20 +18,23 @@
 
 package org.apache.hadoop.mapred;
 
+import java.io.File;
 import java.io.IOException;
 import java.util.List;
 
-
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.mapred.CleanupQueue.PathDeletionContext;
-import org.apache.hadoop.mapred.JvmManager.JvmEnv;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.server.tasktracker.Localizer;
+import org.apache.hadoop.util.ProcessTree.Signal;
 import org.apache.hadoop.util.ProcessTree;
-import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.Path;
 
 /**
  * The default implementation for controlling tasks.
@@ -48,147 +51,188 @@ public class DefaultTaskController extends TaskController {
 
   private static final Log LOG = 
       LogFactory.getLog(DefaultTaskController.class);
-  /**
-   * Launch a new JVM for the task.
-   * 
-   * This method launches the new JVM for the task by executing the
-   * the JVM command using the {@link Shell.ShellCommandExecutor}
-   */
-  void launchTaskJVM(TaskController.TaskControllerContext context) 
-                                      throws IOException {
-    initializeTask(context);
-
-    JvmEnv env = context.env;
-    List<String> wrappedCommand = 
-      TaskLog.captureOutAndError(env.setup, env.vargs, env.stdout, env.stderr,
-          env.logSize, true);
-    ShellCommandExecutor shexec = 
-        new ShellCommandExecutor(wrappedCommand.toArray(new String[0]), 
-                                  env.workDir, env.env);
-    // set the ShellCommandExecutor for later use.
-    context.shExec = shexec;
-    shexec.execute();
-  }
-    
-  /**
-   * Initialize the task environment.
-   * 
-   * Since tasks are launched as the tasktracker user itself, this
-   * method has no action to perform.
-   */
-  void initializeTask(TaskController.TaskControllerContext context) {
-    // The default task controller does not need to set up
-    // any permissions for proper execution.
-    // So this is a dummy method.
-    return;
-  }
-
-  /*
-   * No need to do anything as we don't need to do as we dont need anything
-   * extra from what TaskTracker has done.
-   */
+  private FileSystem fs;
   @Override
-  void initializeJob(JobInitializationContext context) {
+  public void setConf(Configuration conf) {
+    super.setConf(conf);
+    try {
+      fs = FileSystem.getLocal(conf).getRaw();
+    } catch (IOException ie) {
+      throw new RuntimeException("Failed getting LocalFileSystem", ie);
+    }
   }
 
+  /**
+   * Create all of the directories for the task and launches the child jvm.
+   * @param user the user name
+   * @param attemptId the attempt id
+   * @throws IOException
+   */
   @Override
-  void terminateTask(TaskControllerContext context) {
-    ShellCommandExecutor shexec = context.shExec;
-    if (shexec != null) {
-      Process process = shexec.getProcess();
-      if (Shell.WINDOWS) {
-        // Currently we don't use setsid on WINDOWS. 
-        //So kill the process alone.
-        if (process != null) {
-          process.destroy();
-        }
-      }
-      else { // In addition to the task JVM, kill its subprocesses also.
-        String pid = context.pid;
-        if (pid != null) {
-          if(ProcessTree.isSetsidAvailable) {
-            ProcessTree.terminateProcessGroup(pid);
-          }else {
-            ProcessTree.terminateProcess(pid);
-          }
-        }
+  public int launchTask(String user, 
+                                  String jobId,
+                                  String attemptId,
+                                  List<String> setup,
+                                  List<String> jvmArguments,
+                                  File currentWorkDirectory,
+                                  String stdout,
+                                  String stderr) throws IOException {
+    
+    ShellCommandExecutor shExec = null;
+    try {
+      FileSystem localFs = FileSystem.getLocal(getConf());
+      
+      //create the attempt dirs
+      new Localizer(localFs, 
+          getConf().getStrings(JobConf.MAPRED_LOCAL_DIR_PROPERTY)).
+          initializeAttemptDirs(user, jobId, attemptId);
+      
+      // create the working-directory of the task 
+      if (!currentWorkDirectory.mkdir()) {
+        throw new IOException("Mkdirs failed to create " 
+                    + currentWorkDirectory.toString());
       }
-    }
-  }
-  
-  @Override
-  void killTask(TaskControllerContext context) {
-    ShellCommandExecutor shexec = context.shExec;
-    if (shexec != null) {
-      if (Shell.WINDOWS) {
-        //We don't do send kill process signal in case of windows as 
-        //already we have done a process.destroy() in termintateTaskJVM()
-        return;
+      
+      //mkdir the loglocation
+      String logLocation = TaskLog.getAttemptDir(jobId, attemptId).toString();
+      if (!localFs.mkdirs(new Path(logLocation))) {
+        throw new IOException("Mkdirs failed to create " 
+                   + logLocation);
       }
-      String pid = context.pid;
-      if (pid != null) {
-        if(ProcessTree.isSetsidAvailable) {
-          ProcessTree.killProcessGroup(pid);
-        }else {
-          ProcessTree.killProcess(pid);
-        }
+      //read the configuration for the job
+      FileSystem rawFs = FileSystem.getLocal(getConf()).getRaw();
+      long logSize = 0; //TODO: Ref BUG:2854624
+      // get the JVM command line.
+      String cmdLine = 
+        TaskLog.buildCommandLine(setup, jvmArguments,
+            new File(stdout), new File(stderr), logSize, true);
+
+      // write the command to a file in the
+      // task specific cache directory
+      // TODO copy to user dir
+      Path p = new Path(allocator.getLocalPathForWrite(
+          TaskTracker.getPrivateDirTaskScriptLocation(user, jobId, attemptId),
+          getConf()), COMMAND_FILE);
+
+      String commandFile = writeCommand(cmdLine, rawFs, p);
+      rawFs.setPermission(p, TaskController.TASK_LAUNCH_SCRIPT_PERMISSION);
+      shExec = new ShellCommandExecutor(new String[]{
+          "bash", "-c", commandFile},
+          currentWorkDirectory);
+      shExec.execute();
+    } catch (Exception e) {
+      if (shExec == null) {
+        return -1;
       }
+      int exitCode = shExec.getExitCode();
+      LOG.warn("Exit code from task is : " + exitCode);
+      LOG.info("Output from DefaultTaskController's launchTask follows:");
+      logOutput(shExec.getOutput());
+      return exitCode;
     }
+    return 0;
   }
-
+    
   /**
-   * Enables the task for cleanup by changing permissions of the specified path
-   * in the local filesystem
+   * This routine initializes the local file system for running a job.
+   * Details:
+   * <ul>
+   * <li>Copies the credentials file from the TaskTracker's private space to
+   * the job's private space </li>
+   * <li>Creates the job work directory and set 
+   * {@link TaskTracker#JOB_LOCAL_DIR} in the configuration</li>
+   * <li>Downloads the job.jar, unjars it, and updates the configuration to 
+   * reflect the localized path of the job.jar</li>
+   * <li>Creates a base JobConf in the job's private space</li>
+   * <li>Sets up the distributed cache</li>
+   * <li>Sets up the user logs directory for the job</li>
+   * </ul>
+   * This method must be invoked in the access control context of the job owner 
+   * user. This is because the distributed cache is also setup here and the 
+   * access to the hdfs files requires authentication tokens in case where 
+   * security is enabled.
+   * @param user the user in question (the job owner)
+   * @param jobid the ID of the job in question
+   * @param credentials the path to the credentials file that the TaskTracker
+   * downloaded
+   * @param jobConf the path to the job configuration file that the TaskTracker
+   * downloaded
+   * @param taskTracker the connection to the task tracker
+   * @throws IOException
    */
   @Override
-  void enableTaskForCleanup(PathDeletionContext context)
-         throws IOException {
-    enablePathForCleanup(context);
+  public void initializeJob(String user, String jobid, 
+                            Path credentials, Path jobConf, 
+                            TaskUmbilicalProtocol taskTracker
+                            ) throws IOException {
+    final LocalDirAllocator lDirAlloc = allocator;
+    FileSystem localFs = FileSystem.getLocal(getConf());
+    JobLocalizer localizer = new JobLocalizer((JobConf)getConf(), user, jobid);
+    localizer.createLocalDirs();
+    localizer.createUserDirs();
+    localizer.createJobDirs();
+
+    JobConf jConf = new JobConf(jobConf);
+    localizer.createWorkDir(jConf);
+    //copy the credential file
+    Path localJobTokenFile = lDirAlloc.getLocalPathForWrite(
+        TaskTracker.getLocalJobTokenFile(user, jobid), getConf());
+    FileUtil.copy(
+        localFs, credentials, localFs, localJobTokenFile, false, getConf());
+
+
+    //setup the user logs dir
+    localizer.initializeJobLogDir();
+
+    // Download the job.jar for this job from the system FS
+    // setup the distributed cache
+    // write job acls
+    // write localized config
+    localizer.localizeJobFiles(JobID.forName(jobid), jConf, localJobTokenFile, 
+                               taskTracker);
   }
-  
+
+  @Override
+  public void signalTask(String user, int taskPid, Signal signal) {
+    if (ProcessTree.isSetsidAvailable) {
+      ProcessTree.killProcessGroup(Integer.toString(taskPid), signal);
+    } else {
+      ProcessTree.killProcess(Integer.toString(taskPid), signal);      
+    }
+  }
+
   /**
-   * Enables the job for cleanup by changing permissions of the specified path
-   * in the local filesystem
+   * Delete the user's files under all of the task tracker root directories.
+   * @param user the user name
+   * @param subDir the path relative to the user's subdirectory under
+   *        the task tracker root directories.
+   * @throws IOException
    */
   @Override
-  void enableJobForCleanup(PathDeletionContext context)
-         throws IOException {
-    enablePathForCleanup(context);
+  public void deleteAsUser(String user, 
+                           String subDir) throws IOException {
+    String dir = TaskTracker.getUserDir(user) + Path.SEPARATOR + subDir;
+    for(Path fullDir: allocator.getAllLocalPathsToRead(dir, getConf())) {
+      fs.delete(fullDir, true);
+    }
   }
   
   /**
-   * Enables the path for cleanup by changing permissions of the specified path
-   * in the local filesystem
+   * Delete the user's files under the userlogs directory.
+   * @param user the user to work as
+   * @param subDir the path under the userlogs directory.
+   * @throws IOException
    */
-  private void enablePathForCleanup(PathDeletionContext context)
-         throws IOException {
-    try {
-      FileUtil.chmod(context.fullPath, "u+rwx", true);
-    } catch(InterruptedException e) {
-      LOG.warn("Interrupted while setting permissions for " + context.fullPath +
-          " for deletion.");
-    } catch(IOException ioe) {
-      LOG.warn("Unable to change permissions of " + context.fullPath);
-    }
-  }
-
   @Override
-  public void initializeDistributedCacheFile(DistributedCacheFileContext context)
-      throws IOException {
-    Path localizedUniqueDir = context.getLocalizedUniqueDir();
-    try {
-      // Setting recursive execute permission on localized dir
-      LOG.info("Doing chmod on localdir :" + localizedUniqueDir);
-      FileUtil.chmod(localizedUniqueDir.toString(), "+x", true);
-    } catch (InterruptedException ie) {
-      LOG.warn("Exception in doing chmod on" + localizedUniqueDir, ie);
-      throw new IOException(ie);
-    }
+  public void deleteLogAsUser(String user, 
+                              String subDir) throws IOException {
+    Path dir = new Path(TaskLog.getUserLogDir().getAbsolutePath(), subDir);
+    fs.delete(dir, true);
   }
 
   @Override
-  public void initializeUser(InitializationContext context) {
-    // Do nothing.
+  public void setup(LocalDirAllocator allocator) {
+    this.allocator = allocator;
   }
   
 }

+ 17 - 1
src/mapred/org/apache/hadoop/mapred/IsolationRunner.java

@@ -28,6 +28,7 @@ import java.util.List;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.Path;
@@ -116,6 +117,13 @@ public class IsolationRunner {
         SortedRanges.Range range) throws IOException {
       LOG.info("Task " + taskid + " reportedNextRecordRange " + range);
     }
+
+    @Override
+    public void 
+    updatePrivateDistributedCacheSizes(org.apache.hadoop.mapreduce.JobID jobId,
+                                       long[] sizes){
+      // NOTHING
+    }
   }
   
   private ClassLoader makeClassLoader(JobConf conf, 
@@ -176,9 +184,17 @@ public class IsolationRunner {
     // setup the local and user working directories
     FileSystem local = FileSystem.getLocal(conf);
     LocalDirAllocator lDirAlloc = new LocalDirAllocator("mapred.local.dir");
+    Path workDirName;
+    boolean workDirExists = lDirAlloc.ifExists(MRConstants.WORKDIR, conf);
+    if (workDirExists) {
+      workDirName = TaskRunner.formWorkDir(lDirAlloc, conf);
+    } else {
+      workDirName = lDirAlloc.getLocalPathForWrite(MRConstants.WORKDIR, 
+                                                       conf);
+    }
 
-    File workDirName = TaskRunner.formWorkDir(lDirAlloc, taskId, false, conf);
     local.setWorkingDirectory(new Path(workDirName.toString()));
+    
     FileSystem.get(conf).setWorkingDirectory(conf.getWorkingDirectory());
     
     // set up a classloader with the right classpath

+ 1 - 2
src/mapred/org/apache/hadoop/mapred/JobInProgress.java

@@ -3140,8 +3140,7 @@ public class JobInProgress {
         }
 
         Path tempDir = jobtracker.getSystemDirectoryForJob(getJobID());
-        new CleanupQueue().addToQueue(new PathDeletionContext(
-            jobtracker.getFileSystem(), tempDir.toUri().getPath())); 
+        new CleanupQueue().addToQueue(new PathDeletionContext(tempDir, conf)); 
       } catch (IOException e) {
         LOG.warn("Error cleaning up "+profile.getJobID()+": "+e);
       }

+ 531 - 0
src/mapred/org/apache/hadoop/mapred/JobLocalizer.java

@@ -0,0 +1,531 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapred;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.InetSocketAddress;
+import java.net.URI;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.security.PrivilegedExceptionAction;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.filecache.DistributedCache;
+import org.apache.hadoop.filecache.TaskDistributedCacheManager;
+import org.apache.hadoop.filecache.TrackerDistributedCacheManager;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.mapred.QueueManager.QueueACL;
+import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.security.TokenCache;
+import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.util.RunJar;
+
+/**
+ * Internal class responsible for initializing the job, not intended for users.
+ * Creates the following hierarchy:
+ *   <li>$mapred.local.dir/taskTracker/$user</li>
+ *   <li>$mapred.local.dir/taskTracker/$user/jobcache</li>
+ *   <li>$mapred.local.dir/taskTracker/$user/jobcache/$jobid/work</li>
+ *   <li>$mapred.local.dir/taskTracker/$user/jobcache/$jobid/jars</li>
+ *   <li>$mapred.local.dir/taskTracker/$user/jobcache/$jobid/jars/job.jar</li>
+ *   <li>$mapred.local.dir/taskTracker/$user/jobcache/$jobid/job.xml</li>
+ *   <li>$mapred.local.dir/taskTracker/$user/jobcache/$jobid/jobToken</li>
+ *   <li>$mapred.local.dir/taskTracker/$user/distcache</li>
+ */
+public class JobLocalizer {
+
+  static final Log LOG = LogFactory.getLog(JobLocalizer.class);
+
+  private static final FsPermission urwx =
+    FsPermission.createImmutable((short) 0700);
+  private static final FsPermission urwx_gx =
+    FsPermission.createImmutable((short) 0710);
+  private static final FsPermission urw_gr =
+    FsPermission.createImmutable((short) 0640);
+
+  private final String user;
+  private final String jobid;
+  private final FileSystem lfs;
+  private final List<Path> localDirs;
+  private final LocalDirAllocator lDirAlloc;
+  private final JobConf ttConf;
+
+  private final String JOBDIR;
+  private final String DISTDIR;
+  private final String WORKDIR;
+  private final String JARDST;
+  private final String JOBCONF;
+  private final String JOBTOKEN;
+  private static final String JOB_LOCAL_CTXT = "mapred.job.local.dir";
+
+  public JobLocalizer(JobConf ttConf, String user, String jobid)
+      throws IOException {
+    this(ttConf, user, jobid,
+        ttConf.getStrings(JobConf.MAPRED_LOCAL_DIR_PROPERTY));
+  }
+
+  public JobLocalizer(JobConf ttConf, String user, String jobid,
+      String... localDirs) throws IOException {
+    if (null == user) {
+      throw new IOException("Cannot initialize for null user");
+    }
+    this.user = user;
+    if (null == jobid) {
+      throw new IOException("Cannot initialize for null jobid");
+    }
+    this.jobid = jobid;
+    this.ttConf = ttConf;
+    lfs = FileSystem.getLocal(ttConf).getRaw();
+    this.localDirs = createPaths(user, localDirs);
+    ttConf.setStrings(JOB_LOCAL_CTXT, localDirs);
+    Collections.shuffle(this.localDirs);
+    lDirAlloc = new LocalDirAllocator(JOB_LOCAL_CTXT);
+    JOBDIR = TaskTracker.JOBCACHE + Path.SEPARATOR + jobid;
+    DISTDIR = JOBDIR + "/" + TaskTracker.DISTCACHEDIR;
+    WORKDIR = JOBDIR + "/work";
+    JARDST = JOBDIR + "/" + TaskTracker.JARSDIR + "/job.jar";
+    JOBCONF = JOBDIR + "/" + TaskTracker.JOBFILE;
+    JOBTOKEN = JOBDIR + "/" + TaskTracker.JOB_TOKEN_FILE;
+  }
+
+  private static List<Path> createPaths(String user, final String[] str)
+      throws IOException {
+    if (null == str || 0 == str.length) {
+      throw new IOException("mapred.local.dir contains no entries");
+    }
+    final List<Path> ret = new ArrayList<Path>(str.length);
+    for (int i = 0; i < str.length; ++i) {
+      final Path p = new Path(str[i], TaskTracker.getUserDir(user));
+      ret.add(p);
+      str[i] = p.toString();
+    }
+    return ret;
+  }
+
+  public void createLocalDirs() throws IOException {
+    boolean userDirStatus = false;
+    // create all directories as rwx------
+    for (Path localDir : localDirs) {
+      // create $mapred.local.dir/taskTracker/$user
+      if (!lfs.mkdirs(localDir, urwx)) {
+        LOG.warn("Unable to create the user directory : " + localDir);
+        continue;
+      }
+      userDirStatus = true;
+    }
+    if (!userDirStatus) {
+      throw new IOException("Not able to initialize user directories "
+          + "in any of the configured local directories for user " + user);
+    }
+  }
+
+  /**
+   * Initialize the local directories for a particular user on this TT. This
+   * involves creation and setting permissions of the following directories
+   * <ul>
+   * <li>$mapred.local.dir/taskTracker/$user</li>
+   * <li>$mapred.local.dir/taskTracker/$user/jobcache</li>
+   * <li>$mapred.local.dir/taskTracker/$user/distcache</li>
+   * </ul>
+   */
+  public void createUserDirs() throws IOException {
+    LOG.info("Initializing user " + user + " on this TT.");
+
+    boolean jobCacheDirStatus = false;
+    boolean distributedCacheDirStatus = false;
+
+    // create all directories as rwx------
+    for (Path localDir : localDirs) {
+      // create $mapred.local.dir/taskTracker/$user/jobcache
+      final Path jobDir =
+        new Path(localDir, TaskTracker.JOBCACHE);
+      if (!lfs.mkdirs(jobDir, urwx)) {
+        LOG.warn("Unable to create job cache directory : " + jobDir);
+      } else {
+        jobCacheDirStatus = true;
+      }
+      // create $mapred.local.dir/taskTracker/$user/distcache
+      final Path distDir =
+        new Path(localDir, TaskTracker.DISTCACHEDIR);
+      if (!lfs.mkdirs(distDir, urwx)) {
+        LOG.warn("Unable to create distributed-cache directory : " + distDir);
+      } else {
+        distributedCacheDirStatus = true;
+      }
+    }
+    if (!jobCacheDirStatus) {
+      throw new IOException("Not able to initialize job-cache directories "
+          + "in any of the configured local directories for user " + user);
+    }
+    if (!distributedCacheDirStatus) {
+      throw new IOException(
+          "Not able to initialize distributed-cache directories "
+              + "in any of the configured local directories for user "
+              + user);
+    }
+  }
+
+  /**
+   * Prepare the job directories for a given job. To be called by the job
+   * localization code, only if the job is not already localized.
+   * <br>
+   * Here, we set 700 permissions on the job directories created on all disks.
+   * This we do so as to avoid any misuse by other users till the time
+   * {@link TaskController#initializeJob} is run at a
+   * later time to set proper private permissions on the job directories. <br>
+   */
+  public void createJobDirs() throws IOException {
+    boolean initJobDirStatus = false;
+    for (Path localDir : localDirs) {
+      Path fullJobDir = new Path(localDir, JOBDIR);
+      if (lfs.exists(fullJobDir)) {
+        // this will happen on a partial execution of localizeJob. Sometimes
+        // copying job.xml to the local disk succeeds but copying job.jar might
+        // throw out an exception. We should clean up and then try again.
+        lfs.delete(fullJobDir, true);
+      }
+      // create $mapred.local.dir/taskTracker/$user/jobcache/$jobid
+      if (!lfs.mkdirs(fullJobDir, urwx)) {
+        LOG.warn("Not able to create job directory " + fullJobDir.toString());
+      } else {
+        initJobDirStatus = true;
+      }
+    }
+    if (!initJobDirStatus) {
+      throw new IOException("Not able to initialize job directories "
+          + "in any of the configured local directories for job "
+          + jobid.toString());
+    }
+  }
+
+  /**
+   * Create job log directory and set appropriate permissions for the directory.
+   */
+  public void initializeJobLogDir() throws IOException {
+    Path jobUserLogDir = new Path(TaskLog.getJobDir(jobid).toURI().toString());
+    if (!lfs.mkdirs(jobUserLogDir, urwx_gx)) {
+      throw new IOException(
+          "Could not create job user log directory: " + jobUserLogDir);
+    }
+  }
+
+  /**
+   * Download the job jar file from FS to the local file system and unjar it.
+   * Set the local jar file in the passed configuration.
+   *
+   * @param localJobConf
+   * @throws IOException
+   */
+  private void localizeJobJarFile(JobConf localJobConf) throws IOException {
+    // copy Jar file to the local FS and unjar it.
+    String jarFile = localJobConf.getJar();
+    FileStatus status = null;
+    long jarFileSize = -1;
+    if (jarFile != null) {
+      Path jarFilePath = new Path(jarFile);
+      FileSystem userFs = jarFilePath.getFileSystem(localJobConf);
+      try {
+        status = userFs.getFileStatus(jarFilePath);
+        jarFileSize = status.getLen();
+      } catch (FileNotFoundException fe) {
+        jarFileSize = -1;
+      }
+      // Here we check for five times the size of jarFileSize to accommodate for
+      // unjarring the jar file in the jars directory
+      Path localJarFile =
+        lDirAlloc.getLocalPathForWrite(JARDST, 5 * jarFileSize, ttConf);
+
+      //Download job.jar
+      userFs.copyToLocalFile(jarFilePath, localJarFile);
+      localJobConf.setJar(localJarFile.toString());
+      // Also un-jar the job.jar files. We un-jar it so that classes inside
+      // sub-directories, for e.g., lib/, classes/ are available on class-path
+      RunJar.unJar(new File(localJarFile.toString()),
+          new File(localJarFile.getParent().toString()));
+    }
+  }
+
+  /**
+   * The permissions to use for the private distributed cache objects.
+   * It is already protected by the user directory, so keep the group and other
+   * the same so that LocalFileSystem will use the java File methods to
+   * set permission.
+   */
+  private static final FsPermission privateCachePerms =
+    FsPermission.createImmutable((short) 0755);
+  
+  /**
+   * Given a list of objects, download each one.
+   * @param conf the job's configuration
+   * @param sources the list of objects to download from
+   * @param dests the list of paths to download them to
+   * @param times the desired modification times
+   * @param isPublic are the objects in the public cache?
+   * @param isArchive are these archive files?
+   * @throws IOException
+   * @return for archives, return the list of each of the sizes.
+   */
+  private static long[] downloadPrivateCacheObjects(Configuration conf,
+                                             URI[] sources,
+                                             Path[] dests,
+                                             long[] times,
+                                             boolean[] isPublic,
+                                             boolean isArchive
+                                             ) throws IOException {
+    if (null == sources && null == dests && null == times && null == isPublic) {
+      return new long[0];
+    }
+    if (sources.length != dests.length ||
+        sources.length != times.length ||
+        sources.length != isPublic.length) {
+      throw new IOException("Distributed cache entry arrays have different " +
+                            "lengths: " + sources.length + ", " + dests.length +
+                            ", " + times.length + ", " + isPublic.length);
+    }
+    long[] result = new long[sources.length];
+    for(int i=0; i < sources.length; i++) {
+      // public objects are already downloaded by the Task Tracker, we
+      // only need to handle the private ones here
+      if (!isPublic[i]) {
+        result[i] = 
+          TrackerDistributedCacheManager.downloadCacheObject(conf, sources[i], 
+                                                             dests[i], 
+                                                             times[i], 
+                                                             isArchive, 
+                                                             privateCachePerms);
+      }
+    }
+    return result;
+  }
+
+  /**
+   * Download the parts of the distributed cache that are private.
+   * @param conf the job's configuration
+   * @throws IOException
+   * @return the size of the archive objects
+   */
+  public static long[] downloadPrivateCache(Configuration conf) throws IOException {
+    downloadPrivateCacheObjects(conf,
+                                DistributedCache.getCacheFiles(conf),
+                                DistributedCache.getLocalCacheFiles(conf),
+                                DistributedCache.getFileTimestamps(conf),
+                                TrackerDistributedCacheManager.
+                                  getFileVisibilities(conf),
+                                false);
+    return 
+      downloadPrivateCacheObjects(conf,
+                                  DistributedCache.getCacheArchives(conf),
+                                  DistributedCache.getLocalCacheArchives(conf),
+                                  DistributedCache.getArchiveTimestamps(conf),
+                                  TrackerDistributedCacheManager.
+                                    getArchiveVisibilities(conf),
+                                  true);
+  }
+
+  public void localizeJobFiles(JobID jobid, JobConf jConf,
+      Path localJobTokenFile, TaskUmbilicalProtocol taskTracker)
+      throws IOException {
+    localizeJobFiles(jobid, jConf,
+        lDirAlloc.getLocalPathForWrite(JOBCONF, ttConf), localJobTokenFile,
+        taskTracker);
+  }
+
+  public void localizeJobFiles(JobID jobid, JobConf jConf,
+      Path localJobFile, Path localJobTokenFile,
+      TaskUmbilicalProtocol taskTracker) throws IOException {
+    // Download the job.jar for this job from the system FS
+    localizeJobJarFile(jConf);
+
+    //setup the distributed cache
+    jConf.set(JOB_LOCAL_CTXT, ttConf.get(JOB_LOCAL_CTXT));
+    downloadPrivateCache(jConf);
+
+    //update the config some more
+    jConf.set(TokenCache.JOB_TOKENS_FILENAME, localJobTokenFile.toString());
+    jConf.set(JobConf.MAPRED_LOCAL_DIR_PROPERTY, 
+        ttConf.get(JobConf.MAPRED_LOCAL_DIR_PROPERTY));
+    TaskTracker.resetNumTasksPerJvm(jConf);
+
+    long[] sizes = downloadPrivateCache(jConf);
+    taskTracker.updatePrivateDistributedCacheSizes(jobid, sizes);
+
+    // Create job-acls.xml file in job userlog dir and write the needed
+    // info for authorization of users for viewing task logs of this job.
+    writeJobACLs(jConf, new Path(TaskLog.getJobDir(jobid).toURI().toString()));
+
+    //write the updated jobConf file in the job directory
+    JobLocalizer.writeLocalJobFile(localJobFile, jConf);
+  }
+
+  /**
+   *  Creates job-acls.xml under the given directory logDir and writes
+   *  job-view-acl, queue-admins-acl, jobOwner name and queue name into this
+   *  file.
+   *  queue name is the queue to which the job was submitted to.
+   *  queue-admins-acl is the queue admins ACL of the queue to which this
+   *  job was submitted to.
+   * @param conf   job configuration
+   * @param logDir job userlog dir
+   * @throws IOException
+   */
+  private void writeJobACLs(JobConf conf, Path logDir) throws IOException {
+    JobConf aclConf = new JobConf(false);
+
+    // set the job view acl in aclConf
+    String jobViewACL = conf.get(JobContext.JOB_ACL_VIEW_JOB, " ");
+    aclConf.set(JobContext.JOB_ACL_VIEW_JOB, jobViewACL);
+
+    // set the job queue name in aclConf
+    String queue = conf.getQueueName();
+    aclConf.setQueueName(queue);
+
+    // set the queue admins acl in aclConf
+    String qACLName = QueueManager.toFullPropertyName(queue,
+        QueueACL.ADMINISTER_JOBS.getAclName());
+    String queueAdminsACL = conf.get(qACLName, " ");
+    aclConf.set(qACLName, queueAdminsACL);
+
+    // set jobOwner as user.name in aclConf
+    aclConf.set("user.name", user);
+
+    OutputStream out = null;
+    Path aclFile = new Path(logDir, TaskTracker.jobACLsFile);
+    try {
+      out = lfs.create(aclFile);
+      aclConf.writeXml(out);
+    } finally {
+      IOUtils.cleanup(LOG, out);
+    }
+    lfs.setPermission(aclFile, urw_gr);
+  }
+
+  public void createWorkDir(JobConf jConf) throws IOException {
+    // create $mapred.local.dir/taskTracker/$user/jobcache/$jobid/work
+    final Path workDir = lDirAlloc.getLocalPathForWrite(WORKDIR, ttConf);
+    if (!lfs.mkdirs(workDir)) {
+      throw new IOException("Mkdirs failed to create "
+          + workDir.toString());
+    }
+    jConf.set(TaskTracker.JOB_LOCAL_DIR, workDir.toUri().getPath());
+  }
+
+  public Path findCredentials() throws IOException {
+    return lDirAlloc.getLocalPathToRead(JOBTOKEN, ttConf);
+  }
+
+  public int runSetup(String user, String jobid, Path localJobTokenFile,
+                      TaskUmbilicalProtocol taskTracker) throws IOException {
+    // load user credentials, configuration
+    // ASSUME
+    // let $x = $mapred.local.dir
+    // forall $x, exists $x/$user
+    // exists $x/$user/jobcache/$jobid/job.xml
+    // exists $x/$user/jobcache/$jobid/jobToken
+    // exists $logdir/userlogs/$jobid
+    final Path localJobFile = lDirAlloc.getLocalPathToRead(JOBCONF, ttConf);
+    final JobConf cfgJob = new JobConf(localJobFile);
+    createWorkDir(ttConf);
+    localizeJobFiles(JobID.forName(jobid), cfgJob, localJobFile,
+        localJobTokenFile, taskTracker);
+
+    // $mapred.local.dir/taskTracker/$user/distcache
+    return 0;
+  }
+
+  public static void main(String[] argv)
+      throws IOException, InterruptedException {
+    // $logdir
+    // let $x = $root/tasktracker for some $mapred.local.dir
+    //   create $x/$user/jobcache/$jobid/work
+    //   fetch  $x/$user/jobcache/$jobid/jars/job.jar
+    //   setup  $x/$user/distcache
+    //   verify $x/distcache
+    //   write  $x/$user/jobcache/$jobid/job.xml
+    final String user = argv[0];
+    final String jobid = argv[1];
+    final InetSocketAddress ttAddr = 
+      new InetSocketAddress(argv[2], Integer.parseInt(argv[3]));
+    final String uid = UserGroupInformation.getCurrentUser().getShortUserName();
+    if (!user.equals(uid)) {
+      LOG.warn("Localization running as " + uid + " not " + user);
+    }
+
+    // Pull in user's tokens to complete setup
+    final JobConf conf = new JobConf();
+    final JobLocalizer localizer =
+      new JobLocalizer(conf, user, jobid);
+    final Path jobTokenFile = localizer.findCredentials();
+    final Credentials creds = TokenCache.loadTokens(
+        jobTokenFile.toUri().toString(), conf);
+    LOG.debug("Loaded tokens from " + jobTokenFile);
+    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
+    for (Token<? extends TokenIdentifier> token : creds.getAllTokens()) {
+      ugi.addToken(token);
+    }
+    final TaskUmbilicalProtocol taskTracker =
+      (TaskUmbilicalProtocol) RPC.getProxy(TaskUmbilicalProtocol.class,
+                                           TaskUmbilicalProtocol.versionID,
+                                           ttAddr, conf);
+    System.exit(
+      ugi.doAs(new PrivilegedExceptionAction<Integer>() {
+        public Integer run() {
+          try {
+            return localizer.runSetup(user, jobid, jobTokenFile, taskTracker);
+          } catch (Throwable e) {
+            e.printStackTrace(System.out);
+            return -1;
+          }
+        }
+      }));
+  }
+
+  /**
+   * Write the task specific job-configuration file.
+   * @throws IOException
+   */
+  public static void writeLocalJobFile(Path jobFile, JobConf conf)
+      throws IOException {
+    FileSystem localFs = FileSystem.getLocal(conf);
+    localFs.delete(jobFile);
+    OutputStream out = null;
+    try {
+      out = FileSystem.create(localFs, jobFile, urw_gr);
+      conf.writeXml(out);
+    } finally {
+      IOUtils.cleanup(LOG, out);
+    }
+  }
+
+}

+ 110 - 79
src/mapred/org/apache/hadoop/mapred/JvmManager.java

@@ -30,12 +30,12 @@ import java.util.Vector;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.TaskController.TaskControllerContext;
 import org.apache.hadoop.mapred.TaskTracker.TaskInProgress;
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.server.tasktracker.JVMInfo;
 import org.apache.hadoop.mapreduce.server.tasktracker.userlogs.JvmFinishedEvent;
 import org.apache.hadoop.util.ProcessTree;
+import org.apache.hadoop.util.ProcessTree.Signal;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 
 class JvmManager {
@@ -49,8 +49,8 @@ class JvmManager {
   
   public JvmEnv constructJvmEnv(List<String> setup, Vector<String>vargs,
       File stdout,File stderr,long logSize, File workDir, 
-      Map<String,String> env, JobConf conf) {
-    return new JvmEnv(setup,vargs,stdout,stderr,logSize,workDir,env,conf);
+      JobConf conf) {
+    return new JvmEnv(setup,vargs,stdout,stderr,logSize,workDir,conf);
   }
   
   public JvmManager(TaskTracker tracker) {
@@ -103,7 +103,7 @@ class JvmManager {
   }
   
   
-  public void stop() {
+  public void stop() throws IOException, InterruptedException {
     mapJvmManager.stop();
     reduceJvmManager.stop();
   }
@@ -116,7 +116,8 @@ class JvmManager {
     }
   }
 
-  public void launchJvm(TaskRunner t, JvmEnv env) {
+  public void launchJvm(TaskRunner t, JvmEnv env
+                        ) throws IOException, InterruptedException {
     if (t.getTask().isMapTask()) {
       mapJvmManager.reapJvm(t, env);
     } else {
@@ -140,7 +141,8 @@ class JvmManager {
     }
   }
 
-  public void taskKilled(TaskRunner tr) {
+  public void taskKilled(TaskRunner tr
+                         ) throws IOException, InterruptedException {
     if (tr.getTask().isMapTask()) {
       mapJvmManager.taskKilled(tr);
     } else {
@@ -148,7 +150,7 @@ class JvmManager {
     }
   }
 
-  public void killJvm(JVMId jvmId) {
+  public void killJvm(JVMId jvmId) throws IOException, InterruptedException {
     if (jvmId.isMap) {
       mapJvmManager.killJvm(jvmId);
     } else {
@@ -161,15 +163,19 @@ class JvmManager {
    * asynchronous deletion of work dir.
    * @param tracker taskTracker
    * @param task    the task whose work dir needs to be deleted
-   * @throws IOException
    */
-  static void deleteWorkDir(TaskTracker tracker, Task task) throws IOException {
+  static void deleteWorkDir(TaskTracker tracker, Task task) {
+    String user = task.getUser();
+    String jobid = task.getJobID().toString();
+    String taskid = task.getTaskID().toString();
+    String workDir = TaskTracker.getTaskWorkDir(user, jobid, taskid, 
+                                                task.isTaskCleanupTask());
+    String userDir = TaskTracker.getUserDir(user);
     tracker.getCleanupThread().addToQueue(
-        TaskTracker.buildTaskControllerTaskPathDeletionContexts(
-          tracker.getLocalFileSystem(),
-          tracker.getLocalFiles(tracker.getJobConf(), ""),
-          task, true /* workDir */,
-          tracker.getTaskController()));
+     new TaskController.DeletionContext(tracker.getTaskController(), false,
+                                        user, 
+                                        workDir.substring(userDir.length())));
+                                           
   }
 
   static class JvmManagerForType {
@@ -188,8 +194,13 @@ class JvmManager {
     
     int maxJvms;
     boolean isMap;
+    private final long sleeptimeBeforeSigkill;
     
     Random rand = new Random(System.currentTimeMillis());
+    private static final String DELAY_BEFORE_KILL_KEY =
+      "mapred.tasktracker.tasks.sleeptime-before-sigkill";
+    // number of milliseconds to wait between TERM and KILL.
+    private static final long DEFAULT_SLEEPTIME_BEFORE_SIGKILL = 250;
     private TaskTracker tracker;
 
     public JvmManagerForType(int maxJvms, boolean isMap, 
@@ -197,6 +208,9 @@ class JvmManager {
       this.maxJvms = maxJvms;
       this.isMap = isMap;
       this.tracker = tracker;
+      sleeptimeBeforeSigkill =
+        tracker.getJobConf().getLong(DELAY_BEFORE_KILL_KEY,
+                                     DEFAULT_SLEEPTIME_BEFORE_SIGKILL);
     }
 
     synchronized public void setRunningTaskForJvm(JVMId jvmId, 
@@ -216,25 +230,6 @@ class JvmManager {
         JvmRunner jvmRunner = jvmIdToRunner.get(jvmId);
         Task task = taskRunner.getTaskInProgress().getTask();
 
-        // Initialize task dirs
-        TaskControllerContext context =
-            new TaskController.TaskControllerContext();
-        context.env = jvmRunner.env;
-        context.task = task;
-        // If we are returning the same task as which the JVM was launched
-        // we don't initialize task once again.
-        if (!jvmRunner.env.conf.get("mapred.task.id").equals(
-            task.getTaskID().toString())) {
-          try {
-            tracker.getTaskController().initializeTask(context);
-          } catch (IOException e) {
-            LOG.warn("Failed to initialize the new task "
-                + task.getTaskID().toString() + " to be given to JVM with id "
-                + jvmId);
-            throw e;
-          }
-        }
-
         jvmRunner.taskGiven(task);
         return taskRunner.getTaskInProgress();
 
@@ -257,7 +252,9 @@ class JvmManager {
       }
     }
 
-    synchronized public void taskKilled(TaskRunner tr) {
+    synchronized public void taskKilled(TaskRunner tr
+                                        ) throws IOException,
+                                                 InterruptedException {
       JVMId jvmId = runningTaskToJvm.remove(tr);
       if (jvmId != null) {
         jvmToRunningTask.remove(jvmId);
@@ -265,14 +262,15 @@ class JvmManager {
       }
     }
 
-    synchronized public void killJvm(JVMId jvmId) {
+    synchronized public void killJvm(JVMId jvmId) throws IOException, 
+                                                         InterruptedException {
       JvmRunner jvmRunner;
       if ((jvmRunner = jvmIdToRunner.get(jvmId)) != null) {
         killJvmRunner(jvmRunner);
       }
     }
     
-    synchronized public void stop() {
+    synchronized public void stop() throws IOException, InterruptedException {
       //since the kill() method invoked later on would remove
       //an entry from the jvmIdToRunner map, we create a
       //copy of the values and iterate over it (if we don't
@@ -285,7 +283,9 @@ class JvmManager {
       }
     }
 
-    private synchronized void killJvmRunner(JvmRunner jvmRunner) {
+    private synchronized void killJvmRunner(JvmRunner jvmRunner
+                                            ) throws IOException,
+                                                     InterruptedException {
       jvmRunner.kill();
       removeJvm(jvmRunner.jvmId);
     }
@@ -295,7 +295,7 @@ class JvmManager {
       jvmIdToPid.remove(jvmId);
     }
     private synchronized void reapJvm( 
-        TaskRunner t, JvmEnv env) {
+        TaskRunner t, JvmEnv env) throws IOException, InterruptedException {
       if (t.getTaskInProgress().wasKilled()) {
         //the task was killed in-flight
         //no need to do the rest of the operations
@@ -384,7 +384,7 @@ class JvmManager {
 
     private void spawnNewJvm(JobID jobId, JvmEnv env,  
         TaskRunner t) {
-      JvmRunner jvmRunner = new JvmRunner(env,jobId);
+      JvmRunner jvmRunner = new JvmRunner(env, jobId, t.getTask());
       jvmIdToRunner.put(jvmRunner.jvmId, jvmRunner);
       //spawn the JVM in a new thread. Note that there will be very little
       //extra overhead of launching the new thread for a new JVM since
@@ -420,8 +420,7 @@ class JvmManager {
       JVMId jvmId;
       volatile boolean busy = true;
       private ShellCommandExecutor shexec; // shell terminal for running the task
-      //context used for starting JVM
-      private TaskControllerContext initalContext;
+      private Task firstTask;
 
       private List<Task> tasksGiven = new ArrayList<Task>();
 
@@ -429,65 +428,99 @@ class JvmManager {
         tasksGiven.add(task);
       }
 
-      public JvmRunner(JvmEnv env, JobID jobId) {
+      public JvmRunner(JvmEnv env, JobID jobId, Task firstTask) {
         this.env = env;
         this.jvmId = new JVMId(jobId, isMap, rand.nextInt());
         this.numTasksToRun = env.conf.getNumTasksToExecutePerJvm();
+        this.firstTask = firstTask;
         LOG.info("In JvmRunner constructed JVM ID: " + jvmId);
       }
+
+      @Override
       public void run() {
-        runChild(env);
-        jvmFinished();
+        try {
+          runChild(env);
+        } catch (InterruptedException ie) {
+          return;
+        } catch (IOException e) {
+          LOG.warn("Caught IOException in JVMRunner", e);
+        } catch (Throwable e) {
+          LOG.error("Caught Throwable in JVMRunner. Aborting TaskTracker.", e);
+          System.exit(1);
+        } finally {
+          jvmFinished();
+        }
       }
 
-      public void runChild(JvmEnv env) {
-        initalContext = new TaskControllerContext();
+      public void runChild(JvmEnv env) throws IOException, InterruptedException{
+        int exitCode = 0;
         try {
           env.vargs.add(Integer.toString(jvmId.getId()));
           //Launch the task controller to run task JVM
-          initalContext.task = jvmToRunningTask.get(jvmId).getTask();
-          initalContext.env = env;
-          tracker.getTaskController().launchTaskJVM(initalContext);
+          String user = jvmToRunningTask.get(jvmId).getTask().getUser();
+          TaskAttemptID taskAttemptId = 
+            jvmToRunningTask.get(jvmId).getTask().getTaskID();
+          String taskAttemptIdStr = 
+            jvmToRunningTask.get(jvmId).getTask().isTaskCleanupTask() ? 
+                (taskAttemptId.toString() + TaskTracker.TASK_CLEANUP_SUFFIX) :
+                  taskAttemptId.toString(); 
+          exitCode = tracker.getTaskController().launchTask(user,
+              jvmId.jobId.toString(), taskAttemptIdStr, env.setup,
+              env.vargs, env.workDir, env.stdout.toString(),
+              env.stderr.toString());
         } catch (IOException ioe) {
           // do nothing
           // error and output are appropriately redirected
         } finally { // handle the exit code
-          shexec = initalContext.shExec;
-          if (shexec == null) {
-            return;
-          }
-
+          // although the process has exited before we get here,
+          // make sure the entire process group has also been killed.
           kill();
-
-          int exitCode = shexec.getExitCode();
           updateOnJvmExit(jvmId, exitCode);
           LOG.info("JVM : " + jvmId + " exited with exit code " + exitCode
               + ". Number of tasks it ran: " + numTasksRan);
+          deleteWorkDir(tracker, firstTask);
+        }
+      }
+
+      private class DelayedProcessKiller extends Thread {
+        private final String user;
+        private final int pid;
+        private final long delay;
+        private final Signal signal;
+        DelayedProcessKiller(String user, int pid, long delay, Signal signal) {
+          this.user = user;
+          this.pid = pid;
+          this.delay = delay;
+          this.signal = signal;
+          setName("Task killer for " + pid);
+          setDaemon(false);
+        }
+        @Override
+        public void run() {
           try {
-            // In case of jvm-reuse,
-            //the task jvm cleans up the common workdir for every 
-            //task at the beginning of each task in the task JVM.
-            //For the last task, we do it here.
-            if (env.conf.getNumTasksToExecutePerJvm() != 1) {
-              deleteWorkDir(tracker, initalContext.task);
-            }
-          } catch (IOException ie){}
+            Thread.sleep(delay);
+            tracker.getTaskController().signalTask(user, pid, signal);
+          } catch (InterruptedException e) {
+            return;
+          } catch (IOException e) {
+            LOG.warn("Exception when killing task " + pid, e);
+          }
         }
       }
 
-      synchronized void kill() {
+      synchronized void kill() throws IOException, InterruptedException {
         if (!killed) {
           TaskController controller = tracker.getTaskController();
           // Check inital context before issuing a kill to prevent situations
           // where kill is issued before task is launched.
-          if (initalContext != null && initalContext.env != null) {
-            initalContext.pid = jvmIdToPid.get(jvmId);
-            initalContext.sleeptimeBeforeSigkill = tracker.getJobConf()
-              .getLong("mapred.tasktracker.tasks.sleeptime-before-sigkill",
-                  ProcessTree.DEFAULT_SLEEPTIME_BEFORE_SIGKILL);
-
-            // Destroy the task jvm
-            controller.destroyTaskJVM(initalContext);
+          String pidStr = jvmIdToPid.get(jvmId);
+          if (pidStr != null) {
+            String user = env.conf.getUser();
+            int pid = Integer.parseInt(pidStr);
+            // start a thread that will kill the process dead
+            //new DelayedProcessKiller(user, pid, sleeptimeBeforeSigkill, 
+            //                         Signal.KILL).start();
+            controller.signalTask(user, pid, Signal.KILL);
           } else {
             LOG.info(String.format("JVM Not killed %s but just removed", jvmId
                 .toString()));
@@ -498,9 +531,9 @@ class JvmManager {
 
       // Post-JVM-exit logs processing. inform user log manager
       private void jvmFinished() {
-        Task firstTask = initalContext.task;
         JvmFinishedEvent jfe = new JvmFinishedEvent(new JVMInfo(
-            TaskLog.getAttemptDir(firstTask.getTaskID(), firstTask.isTaskCleanupTask()),
+            TaskLog.getAttemptDir(firstTask.getTaskID(), 
+                                  firstTask.isTaskCleanupTask()),
             tasksGiven));
         tracker.getUserLogManager().addLogEvent(jfe);
       }
@@ -531,15 +564,13 @@ class JvmManager {
     JobConf conf;
     Map<String, String> env;
 
-    public JvmEnv(List<String> setup, Vector<String> vargs, File stdout, 
-        File stderr, long logSize, File workDir, Map<String,String> env,
-        JobConf conf) {
+    public JvmEnv(List <String> setup, Vector<String> vargs, File stdout, 
+        File stderr, long logSize, File workDir, JobConf conf) {
       this.setup = setup;
       this.vargs = vargs;
       this.stdout = stdout;
       this.stderr = stderr;
       this.workDir = workDir;
-      this.env = env;
       this.conf = conf;
     }
   }

+ 170 - 494
src/mapred/org/apache/hadoop/mapred/LinuxTaskController.java

@@ -17,27 +17,23 @@
 */
 package org.apache.hadoop.mapred;
 
-import java.io.BufferedWriter;
 import java.io.File;
-import java.io.FileWriter;
 import java.io.IOException;
-import java.io.PrintWriter;
+import java.net.InetSocketAddress;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
-import java.util.Map;
-import java.util.Map.Entry;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocalFileSystem;
-import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapred.CleanupQueue.PathDeletionContext;
-import org.apache.hadoop.mapred.JvmManager.JvmEnv;
-import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.ProcessTree.Signal;
 import org.apache.hadoop.util.Shell.ExitCodeException;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
+import org.apache.hadoop.util.StringUtils;
 
 /**
  * A {@link TaskController} that runs the task JVMs as the user 
@@ -62,22 +58,22 @@ class LinuxTaskController extends TaskController {
 
   private static final Log LOG = 
             LogFactory.getLog(LinuxTaskController.class);
-
-  // Name of the executable script that will contain the child
-  // JVM command line. See writeCommand for details.
-  private static final String COMMAND_FILE = "taskjvm.sh";
   
   // Path to the setuid executable.
-  private static String taskControllerExe;
+  private String taskControllerExe;
+  private static final String TASK_CONTROLLER_EXEC_KEY =
+    "mapreduce.tasktracker.task-controller.exe";
   
-  static {
-    // the task-controller is expected to be under the $HADOOP_HOME/bin
-    // directory.
+  @Override
+  public void setConf(Configuration conf) {
+    super.setConf(conf);
     File hadoopBin = new File(System.getenv("HADOOP_HOME"), "bin");
-    taskControllerExe = 
+    String defaultTaskController = 
         new File(hadoopBin, "task-controller").getAbsolutePath();
+    taskControllerExe = conf.get(TASK_CONTROLLER_EXEC_KEY, 
+                                 defaultTaskController);       
   }
-  
+
   public LinuxTaskController() {
     super();
   }
@@ -85,27 +81,49 @@ class LinuxTaskController extends TaskController {
   /**
    * List of commands that the setuid script will execute.
    */
-  enum TaskControllerCommands {
-    INITIALIZE_USER,
-    INITIALIZE_JOB,
-    INITIALIZE_DISTRIBUTEDCACHE_FILE,
-    LAUNCH_TASK_JVM,
-    INITIALIZE_TASK,
-    TERMINATE_TASK_JVM,
-    KILL_TASK_JVM,
-    ENABLE_TASK_FOR_CLEANUP,
-    ENABLE_JOB_FOR_CLEANUP
+  enum Commands {
+    INITIALIZE_JOB(0),
+    LAUNCH_TASK_JVM(1),
+    SIGNAL_TASK(2),
+    DELETE_AS_USER(3),
+    DELETE_LOG_AS_USER(4);
+
+    private int value;
+    Commands(int value) {
+      this.value = value;
+    }
+    int getValue() {
+      return value;
+    }
+  }
+
+  /**
+   * Result codes returned from the C task-controller.
+   * These must match the values in task-controller.h.
+   */
+  enum ResultCode {
+    OK(0),
+    INVALID_USER_NAME(2),
+    INVALID_TASK_PID(9),
+    INVALID_CONFIG_FILE(24);
+
+    private final int value;
+    ResultCode(int value) {
+      this.value = value;
+    }
+    int getValue() {
+      return value;
+    }
   }
 
   @Override
-  public void setup() throws IOException {
-    super.setup();
+  public void setup(LocalDirAllocator allocator) throws IOException {
 
     // Check the permissions of the task-controller binary by running it plainly.
     // If permissions are correct, it returns an error code 1, else it returns
     // 24 or something else if some other bugs are also present.
     String[] taskControllerCmd =
-        new String[] { getTaskControllerExecutablePath() };
+        new String[] { taskControllerExe };
     ShellCommandExecutor shExec = new ShellCommandExecutor(taskControllerCmd);
     try {
       shExec.execute();
@@ -118,500 +136,158 @@ class LinuxTaskController extends TaskController {
           + "permissions/ownership with exit code " + exitCode, e);
       }
     }
+    this.allocator = allocator;
   }
+  
 
-  /**
-   * Launch a task JVM that will run as the owner of the job.
-   * 
-   * This method launches a task JVM by executing a setuid executable that will
-   * switch to the user and run the task. Also does initialization of the first
-   * task in the same setuid process launch.
-   */
   @Override
-  void launchTaskJVM(TaskController.TaskControllerContext context) 
-                                        throws IOException {
-    JvmEnv env = context.env;
-    // get the JVM command line.
-    String cmdLine = 
-      TaskLog.buildCommandLine(env.setup, env.vargs, env.stdout, env.stderr,
-          env.logSize, true);
-
-    StringBuffer sb = new StringBuffer();
-    //export out all the environment variable before child command as
-    //the setuid/setgid binaries would not be getting, any environmental
-    //variables which begin with LD_*.
-    for(Entry<String, String> entry : env.env.entrySet()) {
-      sb.append("export ");
-      sb.append(entry.getKey());
-      sb.append("=");
-      sb.append(entry.getValue());
-      sb.append("\n");
+  public void initializeJob(String user, String jobid, Path credentials,
+                            Path jobConf, TaskUmbilicalProtocol taskTracker
+                            ) throws IOException {
+    List<String> command = new ArrayList<String>(
+      Arrays.asList(taskControllerExe, 
+                    user, 
+                    Integer.toString(Commands.INITIALIZE_JOB.getValue()),
+                    jobid,
+                    credentials.toUri().getPath().toString(),
+                    jobConf.toUri().getPath().toString()));
+    File jvm =                                  // use same jvm as parent
+      new File(new File(System.getProperty("java.home"), "bin"), "java");
+    command.add(jvm.toString());
+    command.add("-classpath");
+    command.add(System.getProperty("java.class.path"));
+    command.add("-Dhadoop.log.dir=" + TaskLog.getBaseLogDir());
+    command.add("-Dhadoop.root.logger=INFO,console");
+    command.add(JobLocalizer.class.getName());  // main of JobLocalizer
+    command.add(user);
+    command.add(jobid);
+    // add the task tracker's reporting address
+    InetSocketAddress ttAddr = 
+      ((TaskTracker) taskTracker).getTaskTrackerReportAddress();
+    command.add(ttAddr.getHostName());
+    command.add(Integer.toString(ttAddr.getPort()));
+    String[] commandArray = command.toArray(new String[0]);
+    ShellCommandExecutor shExec = new ShellCommandExecutor(commandArray);
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("initializeJob: " + Arrays.toString(commandArray));
     }
-    sb.append(cmdLine);
-    // write the command to a file in the
-    // task specific cache directory
-    writeCommand(sb.toString(), getTaskCacheDirectory(context));
-    
-    // Call the taskcontroller with the right parameters.
-    List<String> launchTaskJVMArgs = buildLaunchTaskArgs(context);
-    ShellCommandExecutor shExec =  buildTaskControllerExecutor(
-                                    TaskControllerCommands.LAUNCH_TASK_JVM, 
-                                    env.conf.getUser(),
-                                    launchTaskJVMArgs, env.workDir, env.env);
-    context.shExec = shExec;
     try {
       shExec.execute();
-    } catch (Exception e) {
-      int exitCode = shExec.getExitCode();
-      LOG.warn("Exit code from task is : " + exitCode);
-      // 143 (SIGTERM) and 137 (SIGKILL) exit codes means the task was
-      // terminated/killed forcefully. In all other cases, log the
-      // task-controller output
-      if (exitCode != 143 && exitCode != 137) {
-        LOG.warn("Exception thrown while launching task JVM : "
-            + StringUtils.stringifyException(e));
-        LOG.info("Output from LinuxTaskController's launchTaskJVM follows:");
+      if (LOG.isDebugEnabled()) {
         logOutput(shExec.getOutput());
       }
-      throw new IOException(e);
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.info("Output from LinuxTaskController's launchTaskJVM follows:");
-      logOutput(shExec.getOutput());
-    }
-  }
-
-  /**
-   * Helper method that runs a LinuxTaskController command
-   * 
-   * @param taskControllerCommand
-   * @param user
-   * @param cmdArgs
-   * @param env
-   * @throws IOException
-   */
-  private void runCommand(TaskControllerCommands taskControllerCommand, 
-      String user, List<String> cmdArgs, File workDir, Map<String, String> env)
-      throws IOException {
-
-    ShellCommandExecutor shExec =
-        buildTaskControllerExecutor(taskControllerCommand, user, cmdArgs, 
-                                    workDir, env);
-    try {
-      shExec.execute();
-    } catch (Exception e) {
-      LOG.warn("Exit code from " + taskControllerCommand.toString() + " is : "
-          + shExec.getExitCode());
-      LOG.warn("Exception thrown by " + taskControllerCommand.toString() + " : "
-          + StringUtils.stringifyException(e));
-      LOG.info("Output from LinuxTaskController's " 
-               + taskControllerCommand.toString() + " follows:");
-      logOutput(shExec.getOutput());
-      throw new IOException(e);
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.info("Output from LinuxTaskController's " 
-               + taskControllerCommand.toString() + " follows:");
+    } catch (ExitCodeException e) {
+      int exitCode = shExec.getExitCode();
       logOutput(shExec.getOutput());
+      throw new IOException("Job initialization failed (" + exitCode + ")", e);
     }
   }
 
-  /**
-   * Returns list of arguments to be passed while initializing a new task. See
-   * {@code buildTaskControllerExecutor(TaskControllerCommands, String, 
-   * List<String>, JvmEnv)} documentation.
-   * 
-   * @param context
-   * @return Argument to be used while launching Task VM
-   */
-  private List<String> buildInitializeTaskArgs(TaskControllerContext context) {
-    List<String> commandArgs = new ArrayList<String>(3);
-    String taskId = context.task.getTaskID().toString();
-    String jobId = getJobId(context);
-    commandArgs.add(jobId);
-    if (!context.task.isTaskCleanupTask()) {
-      commandArgs.add(taskId);
-    } else {
-      commandArgs.add(taskId + TaskTracker.TASK_CLEANUP_SUFFIX);
-    }
-    return commandArgs;
-  }
-
   @Override
-  void initializeTask(TaskControllerContext context)
-      throws IOException {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Going to do " 
-                + TaskControllerCommands.INITIALIZE_TASK.toString()
-                + " for " + context.task.getTaskID().toString());
-    }
-    runCommand(TaskControllerCommands.INITIALIZE_TASK, 
-        context.env.conf.getUser(),
-        buildInitializeTaskArgs(context), context.env.workDir, context.env.env);
-  }
-
-  /**
-   * Builds the args to be passed to task-controller for enabling of task for
-   * cleanup. Last arg in this List is either $attemptId or $attemptId/work
-   */
-  private List<String> buildTaskCleanupArgs(
-      TaskControllerTaskPathDeletionContext context) {
-    List<String> commandArgs = new ArrayList<String>(3);
-    commandArgs.add(context.mapredLocalDir.toUri().getPath());
-    commandArgs.add(context.task.getJobID().toString());
-
-    String workDir = "";
-    if (context.isWorkDir) {
-      workDir = "/work";
-    }
-    if (context.task.isTaskCleanupTask()) {
-      commandArgs.add(context.task.getTaskID() + TaskTracker.TASK_CLEANUP_SUFFIX
-                      + workDir);
-    } else {
-      commandArgs.add(context.task.getTaskID() + workDir);
-    }
+  public int launchTask(String user, 
+                                  String jobId,
+                                  String attemptId,
+                                  List<String> setup,
+                                  List<String> jvmArguments,
+                                  File currentWorkDirectory,
+                                  String stdout,
+                                  String stderr) throws IOException {
 
-    return commandArgs;
-  }
+    ShellCommandExecutor shExec = null;
+    try {
+      FileSystem rawFs = FileSystem.getLocal(getConf()).getRaw();
+      long logSize = 0; //TODO, Ref BUG:2854624
+      // get the JVM command line.
+      String cmdLine = 
+        TaskLog.buildCommandLine(setup, jvmArguments,
+            new File(stdout), new File(stderr), logSize, true);
 
-  /**
-   * Builds the args to be passed to task-controller for enabling of job for
-   * cleanup. Last arg in this List is $jobid.
-   */
-  private List<String> buildJobCleanupArgs(
-      TaskControllerJobPathDeletionContext context) {
-    List<String> commandArgs = new ArrayList<String>(2);
-    commandArgs.add(context.mapredLocalDir.toUri().getPath());
-    commandArgs.add(context.jobId.toString());
+      // write the command to a file in the
+      // task specific cache directory
+      Path p = new Path(allocator.getLocalPathForWrite(
+          TaskTracker.getPrivateDirTaskScriptLocation(user, jobId, attemptId),
+          getConf()), COMMAND_FILE);
+      String commandFile = writeCommand(cmdLine, rawFs, p); 
 
-    return commandArgs;
-  }
-  
-  /**
-   * Enables the task for cleanup by changing permissions of the specified path
-   * in the local filesystem
-   */
-  @Override
-  void enableTaskForCleanup(PathDeletionContext context)
-      throws IOException {
-    if (context instanceof TaskControllerTaskPathDeletionContext) {
-      TaskControllerTaskPathDeletionContext tContext =
-        (TaskControllerTaskPathDeletionContext) context;
-      enablePathForCleanup(tContext, 
-                           TaskControllerCommands.ENABLE_TASK_FOR_CLEANUP,
-                           buildTaskCleanupArgs(tContext));
-    }
-    else {
-      throw new IllegalArgumentException("PathDeletionContext provided is not "
-          + "TaskControllerTaskPathDeletionContext.");
-    }
-  }
+      String[] command = 
+        new String[]{taskControllerExe, 
+          user,
+          Integer.toString(Commands.LAUNCH_TASK_JVM.getValue()),
+          jobId,
+          attemptId,
+          currentWorkDirectory.toString(),
+          commandFile};
+      shExec = new ShellCommandExecutor(command);
 
-  /**
-   * Enables the job for cleanup by changing permissions of the specified path
-   * in the local filesystem
-   */
-  @Override
-  void enableJobForCleanup(PathDeletionContext context)
-      throws IOException {
-    if (context instanceof TaskControllerJobPathDeletionContext) {
-      TaskControllerJobPathDeletionContext tContext =
-        (TaskControllerJobPathDeletionContext) context;
-      enablePathForCleanup(tContext, 
-                           TaskControllerCommands.ENABLE_JOB_FOR_CLEANUP,
-                           buildJobCleanupArgs(tContext));
-    } else {
-      throw new IllegalArgumentException("PathDeletionContext provided is not "
-                  + "TaskControllerJobPathDeletionContext.");
-    }
-  }
-  
-  /**
-   * Enable a path for cleanup
-   * @param c {@link TaskControllerPathDeletionContext} for the path to be 
-   *          cleaned up
-   * @param command {@link TaskControllerCommands} for task/job cleanup
-   * @param cleanupArgs arguments for the {@link LinuxTaskController} to enable 
-   *                    path cleanup
-   */
-  private void enablePathForCleanup(TaskControllerPathDeletionContext c,
-                                    TaskControllerCommands command,
-                                    List<String> cleanupArgs) {
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Going to do " + command.toString() + " for " + c.fullPath);
-    }
-
-    if ( c.user != null && c.fs instanceof LocalFileSystem) {
-      try {
-        runCommand(command, c.user, cleanupArgs, null, null);
-      } catch(IOException e) {
-        LOG.warn("Unable to change permissions for " + c.fullPath);
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("launchTask: " + Arrays.toString(command));
       }
-    }
-    else {
-      throw new IllegalArgumentException("Either user is null or the " 
-                  + "file system is not local file system.");
-    }
-  }
-
-  private void logOutput(String output) {
-    String shExecOutput = output;
-    if (shExecOutput != null) {
-      for (String str : shExecOutput.split("\n")) {
-        LOG.info(str);
+      shExec.execute();
+    } catch (Exception e) {
+      if (shExec == null) {
+        return -1;
       }
-    }
-  }
-
-  private String getJobId(TaskControllerContext context) {
-    String taskId = context.task.getTaskID().toString();
-    TaskAttemptID tId = TaskAttemptID.forName(taskId);
-    String jobId = tId.getJobID().toString();
-    return jobId;
-  }
-
-  /**
-   * Returns list of arguments to be passed while launching task VM.
-   * See {@code buildTaskControllerExecutor(TaskControllerCommands, 
-   * String, List<String>, JvmEnv)} documentation.
-   * @param context
-   * @return Argument to be used while launching Task VM
-   */
-  private List<String> buildLaunchTaskArgs(TaskControllerContext context) {
-    List<String> commandArgs = new ArrayList<String>(3);
-    LOG.debug("getting the task directory as: " 
-        + getTaskCacheDirectory(context));
-    LOG.debug("getting the tt_root as " +getDirectoryChosenForTask(
-        new File(getTaskCacheDirectory(context)), 
-        context) );
-    commandArgs.add(getDirectoryChosenForTask(
-        new File(getTaskCacheDirectory(context)), 
-        context));
-    commandArgs.addAll(buildInitializeTaskArgs(context));
-    return commandArgs;
-  }
-
-  // Get the directory from the list of directories configured
-  // in mapred.local.dir chosen for storing data pertaining to
-  // this task.
-  private String getDirectoryChosenForTask(File directory,
-      TaskControllerContext context) {
-    String jobId = getJobId(context);
-    String taskId = context.task.getTaskID().toString();
-    for (String dir : mapredLocalDirs) {
-      File mapredDir = new File(dir);
-      File taskDir =
-          new File(mapredDir, TaskTracker.getTaskWorkDir(context.task
-              .getUser(), jobId, taskId, context.task.isTaskCleanupTask()))
-              .getParentFile();
-      if (directory.equals(taskDir)) {
-        return dir;
+      int exitCode = shExec.getExitCode();
+      LOG.warn("Exit code from task is : " + exitCode);
+      // 143 (SIGTERM) and 137 (SIGKILL) exit codes means the task was
+      // terminated/killed forcefully. In all other cases, log the
+      // task-controller output
+      if (exitCode != 143 && exitCode != 137) {
+        LOG.warn("Exception thrown while launching task JVM : "
+            + StringUtils.stringifyException(e));
+        LOG.info("Output from LinuxTaskController's launchTaskJVM follows:");
+        logOutput(shExec.getOutput());
       }
+      return exitCode;
     }
-    
-    LOG.error("Couldn't parse task cache directory correctly");
-    throw new IllegalArgumentException("invalid task cache directory "
-                + directory.getAbsolutePath());
-  }
-
-  @Override
-  public void initializeDistributedCacheFile(DistributedCacheFileContext context)
-      throws IOException {
     if (LOG.isDebugEnabled()) {
-      LOG.debug("Going to initialize distributed cache for " + context.user
-          + " with localizedBaseDir " + context.localizedBaseDir + 
-          " and uniqueString " + context.uniqueString);
+      LOG.debug("Output from LinuxTaskController's launchTask follows:");
+      logOutput(shExec.getOutput());
     }
-    List<String> args = new ArrayList<String>();
-    // Here, uniqueString might start with '-'. Adding -- in front of the 
-    // arguments indicates that they are non-option parameters.
-    args.add("--");
-    args.add(context.localizedBaseDir.toString());
-    args.add(context.uniqueString);
-    runCommand(TaskControllerCommands.INITIALIZE_DISTRIBUTEDCACHE_FILE, 
-        context.user, args, context.workDir, null);
+    return 0;
   }
 
   @Override
-  public void initializeUser(InitializationContext context)
-      throws IOException {
-    LOG.debug("Going to initialize user directories for " + context.user
-        + " on the TT");
-    runCommand(TaskControllerCommands.INITIALIZE_USER, context.user,
-        new ArrayList<String>(), context.workDir, null);
-  }
-
-  /**
-   * Builds the command line for launching/terminating/killing task JVM.
-   * Following is the format for launching/terminating/killing task JVM
-   * <br/>
-   * For launching following is command line argument:
-   * <br/>
-   * {@code user-name command tt-root job_id task_id} 
-   * <br/>
-   * For terminating/killing task jvm.
-   * {@code user-name command tt-root task-pid}
-   * 
-   * @param command command to be executed.
-   * @param userName user name
-   * @param cmdArgs list of extra arguments
-   * @param workDir working directory for the task-controller
-   * @param env JVM environment variables.
-   * @return {@link ShellCommandExecutor}
-   * @throws IOException
-   */
-  private ShellCommandExecutor buildTaskControllerExecutor(
-      TaskControllerCommands command, String userName, List<String> cmdArgs,
-      File workDir, Map<String, String> env)
-      throws IOException {
-    String[] taskControllerCmd = new String[3 + cmdArgs.size()];
-    taskControllerCmd[0] = getTaskControllerExecutablePath();
-    taskControllerCmd[1] = userName;
-    taskControllerCmd[2] = String.valueOf(command.ordinal());
-    int i = 3;
-    for (String cmdArg : cmdArgs) {
-      taskControllerCmd[i++] = cmdArg;
-    }
-    if (LOG.isDebugEnabled()) {
-      for (String cmd : taskControllerCmd) {
-        LOG.debug("taskctrl command = " + cmd);
-      }
-    }
-    ShellCommandExecutor shExec = null;
-    if(workDir != null && workDir.exists()) {
-      shExec = new ShellCommandExecutor(taskControllerCmd,
-          workDir, env);
-    } else {
-      shExec = new ShellCommandExecutor(taskControllerCmd);
-    }
-    
-    return shExec;
-  }
-  
-  // Return the task specific directory under the cache.
-  private String getTaskCacheDirectory(TaskControllerContext context) {
-    // In the case of JVM reuse, the task specific directory
-    // is different from what is set with respect with
-    // env.workDir. Hence building this from the taskId everytime.
-    String taskId = context.task.getTaskID().toString();
-    File cacheDirForJob = context.env.workDir.getParentFile().getParentFile();
-    if(context.task.isTaskCleanupTask()) {
-      taskId = taskId + TaskTracker.TASK_CLEANUP_SUFFIX;
-    }
-    return new File(cacheDirForJob, taskId).getAbsolutePath(); 
-  }
-  
-  // Write the JVM command line to a file under the specified directory
-  // Note that the JVM will be launched using a setuid executable, and
-  // could potentially contain strings defined by a user. Hence, to
-  // prevent special character attacks, we write the command line to
-  // a file and execute it.
-  private void writeCommand(String cmdLine, 
-                                      String directory) throws IOException {
-    
-    PrintWriter pw = null;
-    String commandFile = directory + File.separator + COMMAND_FILE;
-    LOG.info("Writing commands to " + commandFile);
-    try {
-      FileWriter fw = new FileWriter(commandFile);
-      BufferedWriter bw = new BufferedWriter(fw);
-      pw = new PrintWriter(bw);
-      pw.write(cmdLine);
-    } catch (IOException ioe) {
-      LOG.error("Caught IOException while writing JVM command line to file. "
-                + ioe.getMessage());
-    } finally {
-      if (pw != null) {
-        pw.close();
-      }
-      // set execute permissions for all on the file.
-      File f = new File(commandFile);
-      if (f.exists()) {
-        f.setReadable(true, false);
-        f.setExecutable(true, false);
-      }
-    }
+  public void deleteAsUser(String user, String subDir) throws IOException {
+    String[] command = 
+      new String[]{taskControllerExe, 
+                   user,
+                   Integer.toString(Commands.DELETE_AS_USER.getValue()),
+                   subDir};
+    ShellCommandExecutor shExec = new ShellCommandExecutor(command);
+    shExec.execute();
   }
 
-  protected String getTaskControllerExecutablePath() {
-    return taskControllerExe;
-  }  
-
-  private List<String> buildInitializeJobCommandArgs(
-      JobInitializationContext context) {
-    List<String> initJobCmdArgs = new ArrayList<String>();
-    initJobCmdArgs.add(context.jobid.toString());
-    return initJobCmdArgs;
+  @Override
+  public void deleteLogAsUser(String user, String subDir) throws IOException {
+    String[] command = 
+      new String[]{taskControllerExe, 
+                   user,
+                   Integer.toString(Commands.DELETE_LOG_AS_USER.getValue()),
+                   subDir};
+    ShellCommandExecutor shExec = new ShellCommandExecutor(command);
+    shExec.execute();
   }
 
   @Override
-  void initializeJob(JobInitializationContext context)
-      throws IOException {
-    LOG.debug("Going to initialize job " + context.jobid.toString()
-        + " on the TT");
-    runCommand(TaskControllerCommands.INITIALIZE_JOB, context.user,
-        buildInitializeJobCommandArgs(context), context.workDir, null);
-  }
-  
-  /**
-   * API which builds the command line to be pass to LinuxTaskController
-   * binary to terminate/kill the task. See 
-   * {@code buildTaskControllerExecutor(TaskControllerCommands, 
-   * String, List<String>, JvmEnv)} documentation.
-   * 
-   * 
-   * @param context context of task which has to be passed kill signal.
-   * 
-   */
-  private List<String> buildKillTaskCommandArgs(TaskControllerContext 
-      context){
-    List<String> killTaskJVMArgs = new ArrayList<String>();
-    killTaskJVMArgs.add(context.pid);
-    return killTaskJVMArgs;
-  }
-  
-  /**
-   * Convenience method used to sending appropriate Kill signal to the task 
-   * VM
-   * @param context
-   * @param command
-   * @throws IOException
-   */
-  private void finishTask(TaskControllerContext context,
-      TaskControllerCommands command) throws IOException{
-    if(context.task == null) {
-      LOG.info("Context task null not killing the JVM");
-      return;
-    }
-    ShellCommandExecutor shExec = buildTaskControllerExecutor(
-        command, context.env.conf.getUser(), 
-        buildKillTaskCommandArgs(context), context.env.workDir,
-        context.env.env);
+  public void signalTask(String user, int taskPid, 
+                         Signal signal) throws IOException {
+    String[] command = 
+      new String[]{taskControllerExe, 
+                   user,
+                   Integer.toString(Commands.SIGNAL_TASK.getValue()),
+                   Integer.toString(taskPid),
+                   Integer.toString(signal.getValue())};
+    ShellCommandExecutor shExec = new ShellCommandExecutor(command);
     try {
       shExec.execute();
-    } catch (Exception e) {
-      LOG.warn("Output from task-contoller is : " + shExec.getOutput());
-      throw new IOException(e);
-    }
-  }
-  
-  @Override
-  void terminateTask(TaskControllerContext context) {
-    try {
-      finishTask(context, TaskControllerCommands.TERMINATE_TASK_JVM);
-    } catch (Exception e) {
-      LOG.warn("Exception thrown while sending kill to the Task VM " + 
-          StringUtils.stringifyException(e));
-    }
-  }
-  
-  @Override
-  void killTask(TaskControllerContext context) {
-    try {
-      finishTask(context, TaskControllerCommands.KILL_TASK_JVM);
-    } catch (Exception e) {
-      LOG.warn("Exception thrown while sending destroy to the Task VM " + 
-          StringUtils.stringifyException(e));
+    } catch (ExitCodeException e) {
+      int ret_code = shExec.getExitCode();
+      if (ret_code != ResultCode.INVALID_TASK_PID.getValue()) {
+        logOutput(shExec.getOutput());
+        throw new IOException("Problem signalling task " + taskPid + " with " +
+                              signal + "; exit = " + ret_code);
+      }
     }
   }
 }

+ 18 - 10
src/mapred/org/apache/hadoop/mapred/LocalJobRunner.java

@@ -33,7 +33,6 @@ import org.apache.hadoop.filecache.DistributedCache;
 import org.apache.hadoop.filecache.TaskDistributedCacheManager;
 import org.apache.hadoop.filecache.TrackerDistributedCacheManager;
 import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.io.DataOutputBuffer;
@@ -59,6 +58,7 @@ class LocalJobRunner implements JobSubmissionProtocol {
   private int map_tasks = 0;
   private int reduce_tasks = 0;
   final Random rand = new Random();
+  private final TaskController taskController = new DefaultTaskController();
 
   private JobTrackerInstrumentation myMetrics = null;
 
@@ -89,7 +89,7 @@ class LocalJobRunner implements JobSubmissionProtocol {
     private FileSystem localFs;
     boolean killed = false;
     
-    private TrackerDistributedCacheManager trackerDistributerdCacheManager;
+    private TrackerDistributedCacheManager trackerDistributedCacheManager;
     private TaskDistributedCacheManager taskDistributedCacheManager;
     
     // Counters summed over all the map/reduce tasks which
@@ -115,14 +115,13 @@ class LocalJobRunner implements JobSubmissionProtocol {
 
       // Manage the distributed cache.  If there are files to be copied,
       // this will trigger localFile to be re-written again.
-      this.trackerDistributerdCacheManager =
-        new TrackerDistributedCacheManager(conf, new DefaultTaskController());
+      this.trackerDistributedCacheManager =
+        new TrackerDistributedCacheManager(conf, taskController);
       this.taskDistributedCacheManager =
-        trackerDistributerdCacheManager.newTaskDistributedCacheManager(conf);
-      taskDistributedCacheManager.setup(
-          new LocalDirAllocator("mapred.local.dir"),
-          new File(systemJobDir.toString()),
-          "archive", "archive");
+        trackerDistributedCacheManager.newTaskDistributedCacheManager(jobid, conf);
+      taskDistributedCacheManager.setupCache(
+          "archive",
+          "archive");
       
       if (DistributedCache.getSymlink(conf)) {
         // This is not supported largely because,
@@ -302,7 +301,7 @@ class LocalJobRunner implements JobSubmissionProtocol {
           localFs.delete(localJobFile, true);              // delete local copy
           // Cleanup distributed cache
           taskDistributedCacheManager.release();
-          trackerDistributerdCacheManager.purgeCache();
+          trackerDistributedCacheManager.purgeCache();
         } catch (IOException e) {
           LOG.warn("Error cleaning up "+id+": "+e);
         }
@@ -395,6 +394,14 @@ class LocalJobRunner implements JobSubmissionProtocol {
       return new MapTaskCompletionEventsUpdate(TaskCompletionEvent.EMPTY_ARRAY,
                                                false);
     }
+
+    @Override
+    public void updatePrivateDistributedCacheSizes(
+                                                   org.apache.hadoop.mapreduce.JobID jobId,
+                                                   long[] sizes)
+                                                                throws IOException {
+      trackerDistributedCacheManager.setArchiveSizes(jobId, sizes);
+    }
     
   }
 
@@ -402,6 +409,7 @@ class LocalJobRunner implements JobSubmissionProtocol {
     this.fs = FileSystem.getLocal(conf);
     this.conf = conf;
     myMetrics = JobTrackerInstrumentation.create(null, new JobConf(conf));
+    taskController.setConf(conf);
   }
 
   // JobSubmissionProtocol methods

+ 4 - 2
src/mapred/org/apache/hadoop/mapred/MapTask.java

@@ -128,8 +128,10 @@ class MapTask extends Task {
   
   @Override
   public TaskRunner createRunner(TaskTracker tracker, 
-      TaskTracker.TaskInProgress tip) {
-    return new MapTaskRunner(tip, tracker, this.conf);
+                                 TaskTracker.TaskInProgress tip,
+                                 TaskTracker.RunningJob rjob
+                                 ) throws IOException {
+    return new MapTaskRunner(tip, tracker, this.conf, rjob);
   }
 
   @Override

+ 4 - 2
src/mapred/org/apache/hadoop/mapred/MapTaskRunner.java

@@ -24,8 +24,10 @@ import org.apache.hadoop.mapred.TaskTracker.TaskInProgress;
 /** Runs a map task. */
 class MapTaskRunner extends TaskRunner {
   
-  public MapTaskRunner(TaskInProgress task, TaskTracker tracker, JobConf conf) {
-    super(task, tracker, conf);
+  public MapTaskRunner(TaskInProgress task, TaskTracker tracker, JobConf conf,
+                       TaskTracker.RunningJob rjob) 
+  throws IOException {
+    super(task, tracker, conf, rjob);
   }
   
   /** Delete any temporary files from previous failed attempts. */

+ 4 - 3
src/mapred/org/apache/hadoop/mapred/ReduceTask.java

@@ -172,9 +172,10 @@ class ReduceTask extends Task {
   }
 
   @Override
-  public TaskRunner createRunner(TaskTracker tracker, TaskInProgress tip) 
-  throws IOException {
-    return new ReduceTaskRunner(tip, tracker, this.conf);
+  public TaskRunner createRunner(TaskTracker tracker, TaskInProgress tip,
+                                 TaskTracker.RunningJob rjob
+                                 ) throws IOException {
+    return new ReduceTaskRunner(tip, tracker, this.conf, rjob);
   }
 
   @Override

+ 3 - 2
src/mapred/org/apache/hadoop/mapred/ReduceTaskRunner.java

@@ -25,9 +25,10 @@ import org.apache.hadoop.mapred.TaskTracker.TaskInProgress;
 class ReduceTaskRunner extends TaskRunner {
 
   public ReduceTaskRunner(TaskInProgress task, TaskTracker tracker, 
-                          JobConf conf) throws IOException {
+                          JobConf conf, TaskTracker.RunningJob rjob
+                          ) throws IOException {
     
-    super(task, tracker, conf);
+    super(task, tracker, conf, rjob);
   }
 
   /** Assemble all of the map output files */

+ 3 - 1
src/mapred/org/apache/hadoop/mapred/Task.java

@@ -451,7 +451,9 @@ abstract public class Task implements Writable, Configurable {
   /** Return an approprate thread runner for this task. 
    * @param tip TODO*/
   public abstract TaskRunner createRunner(TaskTracker tracker, 
-      TaskTracker.TaskInProgress tip) throws IOException;
+                                          TaskTracker.TaskInProgress tip, 
+                                          TaskTracker.RunningJob rjob
+                                          ) throws IOException;
 
   /** The number of milliseconds between progress reports. */
   public static final int PROGRESS_INTERVAL = 3000;

+ 120 - 322
src/mapred/org/apache/hadoop/mapred/TaskController.java

@@ -17,19 +17,24 @@
 */
 package org.apache.hadoop.mapred;
 
+import java.io.BufferedWriter;
 import java.io.File;
+import java.io.FileWriter;
 import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapred.CleanupQueue.PathDeletionContext;
-import org.apache.hadoop.mapred.JvmManager.JvmEnv;
-import org.apache.hadoop.mapreduce.server.tasktracker.Localizer;
-import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.util.ProcessTree.Signal;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 
 /**
@@ -48,360 +53,153 @@ import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 public abstract class TaskController implements Configurable {
   
   private Configuration conf;
-  
+
   public static final Log LOG = LogFactory.getLog(TaskController.class);
   
+  //Name of the executable script that will contain the child
+  // JVM command line. See writeCommand for details.
+  protected static final String COMMAND_FILE = "taskjvm.sh";
+  
+  protected LocalDirAllocator allocator;
+
+  final public static FsPermission TASK_LAUNCH_SCRIPT_PERMISSION =
+  FsPermission.createImmutable((short) 0700); // rwx--------
+  
   public Configuration getConf() {
     return conf;
   }
 
-  // The list of directory paths specified in the variable mapred.local.dir.
-  // This is used to determine which among the list of directories is picked up
-  // for storing data for a particular task.
-  protected String[] mapredLocalDirs;
-
   public void setConf(Configuration conf) {
     this.conf = conf;
-    mapredLocalDirs = conf.getStrings("mapred.local.dir");
   }
 
   /**
-   * Sets up the permissions of the following directories on all the configured
-   * disks:
-   * <ul>
-   * <li>mapred-local directories</li>
-   * <li>Hadoop log directories</li>
-   * </ul>
+   * Does initialization and setup.
+   * @param allocator the local dir allocator to use
    */
-  public void setup() throws IOException {
-    for (String localDir : this.mapredLocalDirs) {
-      // Set up the mapred-local directories.
-      File mapredlocalDir = new File(localDir);
-      if (!mapredlocalDir.exists() && !mapredlocalDir.mkdirs()) {
-        LOG.warn("Unable to create mapred-local directory : "
-            + mapredlocalDir.getPath());
-      } else {
-        Localizer.PermissionsHandler.setPermissions(mapredlocalDir,
-            Localizer.PermissionsHandler.sevenFiveFive);
-      }
-    }
-
-    // Set up the user log directory
-    File taskLog = TaskLog.getUserLogDir();
-    if (!taskLog.exists() && !taskLog.mkdirs()) {
-      LOG.warn("Unable to create taskLog directory : " + taskLog.getPath());
-    } else {
-      Localizer.PermissionsHandler.setPermissions(taskLog,
-          Localizer.PermissionsHandler.sevenFiveFive);
-    }
-  }
-
+  public abstract void setup(LocalDirAllocator allocator) throws IOException;
+  
   /**
-   * Take task-controller specific actions to initialize job. This involves
-   * setting appropriate permissions to job-files so as to secure the files to
-   * be accessible only by the user's tasks.
-   * 
+   * Create all of the directories necessary for the job to start and download
+   * all of the job and private distributed cache files.
+   * Creates both the user directories and the job log directory.
+   * @param user the user name
+   * @param jobid the job
+   * @param credentials a filename containing the job secrets
+   * @param jobConf the path to the localized configuration file
+   * @param taskTracker the connection the task tracker
    * @throws IOException
    */
-  abstract void initializeJob(JobInitializationContext context) throws IOException;
-
+  public abstract void initializeJob(String user, String jobid, 
+                                     Path credentials, Path jobConf,
+                                     TaskUmbilicalProtocol taskTracker) 
+  throws IOException;
+  
   /**
-   * Take task-controller specific actions to initialize the distributed cache
-   * file. This involves setting appropriate permissions for these files so as
-   * to secure them to be accessible only their owners.
-   * 
-   * @param context
+   * Create all of the directories for the task and launches the child jvm.
+   * @param user the user name
+   * @param jobId the jobId in question
+   * @param attemptId the attempt id (cleanup attempts have .cleanup suffix)
+   * @param setup list of shell commands to execute before the jvm
+   * @param jvmArguments list of jvm arguments
+   * @param currentWorkDirectory the full path of the cwd for the task
+   * @param stdout the file to redirect stdout to
+   * @param stderr the file to redirect stderr to
+   * @return the exit code for the task
    * @throws IOException
    */
-  public abstract void initializeDistributedCacheFile(DistributedCacheFileContext context)
-      throws IOException;
-
-  /**
-   * Launch a task JVM
-   * 
-   * This method defines how a JVM will be launched to run a task. Each
-   * task-controller should also do an
-   * {@link #initializeTask(TaskControllerContext)} inside this method so as to
-   * initialize the task before launching it. This is for reasons of
-   * task-controller specific optimizations w.r.t combining initialization and
-   * launching of tasks.
-   * 
-   * @param context the context associated to the task
-   */
-  abstract void launchTaskJVM(TaskControllerContext context)
-                                      throws IOException;
-
+  public abstract
+  int launchTask(String user, 
+                 String jobId,
+                 String attemptId,
+                 List<String> setup,
+                 List<String> jvmArguments,
+                 File currentWorkDirectory,
+                 String stdout,
+                 String stderr) throws IOException;
+  
   /**
-   * Top level cleanup a task JVM method.
-   *
-   * The current implementation does the following.
-   * <ol>
-   * <li>Sends a graceful terminate signal to task JVM allowing its sub-process
-   * to cleanup.</li>
-   * <li>Waits for stipulated period</li>
-   * <li>Sends a forceful kill signal to task JVM, terminating all its
-   * sub-process forcefully.</li>
-   * </ol>
-   * 
-   * @param context the task for which kill signal has to be sent.
+   * Send a signal to a task pid as the user.
+   * @param user the user name
+   * @param taskPid the pid of the task
+   * @param signal the id of the signal to send
    */
-  final void destroyTaskJVM(TaskControllerContext context) {
-    terminateTask(context);
-    try {
-      Thread.sleep(context.sleeptimeBeforeSigkill);
-    } catch (InterruptedException e) {
-      LOG.warn("Sleep interrupted : " + 
-          StringUtils.stringifyException(e));
-    }
-    killTask(context);
-  }
-
-  /** Perform initializing actions required before a task can run.
-    * 
-    * For instance, this method can be used to setup appropriate
-    * access permissions for files and directories that will be
-    * used by tasks. Tasks use the job cache, log, and distributed cache
-    * directories and files as part of their functioning. Typically,
-    * these files are shared between the daemon and the tasks
-    * themselves. So, a TaskController that is launching tasks
-    * as different users can implement this method to setup
-    * appropriate ownership and permissions for these directories
-    * and files.
-    */
-  abstract void initializeTask(TaskControllerContext context)
-      throws IOException;
-
+  public abstract void signalTask(String user, int taskPid, 
+                                  Signal signal) throws IOException;
+  
   /**
-   * Contains task information required for the task controller.  
+   * Delete the user's files under all of the task tracker root directories.
+   * @param user the user name
+   * @param subDir the path relative to the user's subdirectory under
+   *        the task tracker root directories.
+   * @throws IOException
    */
-  static class TaskControllerContext {
-    // task being executed
-    Task task;
-    ShellCommandExecutor shExec;     // the Shell executor executing the JVM for this task.
-
-    // Information used only when this context is used for launching new tasks.
-    JvmEnv env;     // the JVM environment for the task.
-
-    // Information used only when this context is used for destroying a task jvm.
-    String pid; // process handle of task JVM.
-    long sleeptimeBeforeSigkill; // waiting time before sending SIGKILL to task JVM after sending SIGTERM
-  }
-
+  public abstract void deleteAsUser(String user, 
+                                    String subDir) throws IOException;
+  
   /**
-   * Contains info related to the path of the file/dir to be deleted. This info
-   * is needed by task-controller to build the full path of the file/dir
+   * Delete the user's files under the userlogs directory.
+   * @param user the user to work as
+   * @param subDir the path under the userlogs directory.
+   * @throws IOException
    */
-  static abstract class TaskControllerPathDeletionContext 
-  extends PathDeletionContext {
-    TaskController taskController;
-    String user;
-
-    /**
-     * mapredLocalDir is the base dir under which to-be-deleted jobLocalDir, 
-     * taskWorkDir or taskAttemptDir exists. fullPath of jobLocalDir, 
-     * taskAttemptDir or taskWorkDir is built using mapredLocalDir, jobId, 
-     * taskId, etc.
-     */
-    Path mapredLocalDir;
-
-    public TaskControllerPathDeletionContext(FileSystem fs, Path mapredLocalDir,
-                                             TaskController taskController,
-                                             String user) {
-      super(fs, null);
-      this.taskController = taskController;
-      this.mapredLocalDir = mapredLocalDir;
+  public abstract void deleteLogAsUser(String user, 
+                                       String subDir) throws IOException;
+  
+  static class DeletionContext extends CleanupQueue.PathDeletionContext {
+    private TaskController controller;
+    private boolean isLog;
+    private String user;
+    private String subDir;
+    DeletionContext(TaskController controller, boolean isLog, String user, 
+                    String subDir) {
+      super(null, null);
+      this.controller = controller;
+      this.isLog = isLog;
       this.user = user;
-    }
-
-    @Override
-    protected String getPathForCleanup() {
-      if (fullPath == null) {
-        fullPath = buildPathForDeletion();
-      }
-      return fullPath;
-    }
-
-    /**
-     * Return the component of the path under the {@link #mapredLocalDir} to be 
-     * cleaned up. Its the responsibility of the class that extends 
-     * {@link TaskControllerPathDeletionContext} to provide the correct 
-     * component. For example 
-     *  - For task related cleanups, either the task-work-dir or task-local-dir
-     *    might be returned depending on jvm reuse.
-     *  - For job related cleanup, simply the job-local-dir might be returned.
-     */
-    abstract protected String getPath();
-    
-    /**
-     * Builds the path of taskAttemptDir OR taskWorkDir based on
-     * mapredLocalDir, jobId, taskId, etc
-     */
-    String buildPathForDeletion() {
-      return mapredLocalDir.toUri().getPath() + Path.SEPARATOR + getPath();
-    }
-  }
-
-  /** Contains info related to the path of the file/dir to be deleted. This info
-   * is needed by task-controller to build the full path of the task-work-dir or
-   * task-local-dir depending on whether the jvm is reused or not.
-   */
-  static class TaskControllerTaskPathDeletionContext 
-  extends TaskControllerPathDeletionContext {
-    final Task task;
-    final boolean isWorkDir;
-    
-    public TaskControllerTaskPathDeletionContext(FileSystem fs, 
-        Path mapredLocalDir, Task task, boolean isWorkDir, 
-        TaskController taskController) {
-      super(fs, mapredLocalDir, taskController, task.getUser());
-      this.task = task;
-      this.isWorkDir = isWorkDir;
+      this.subDir = subDir;
     }
     
-    /**
-     * Returns the taskWorkDir or taskLocalDir based on whether 
-     * {@link TaskControllerTaskPathDeletionContext} is configured to delete
-     * the workDir.
-     */
-    @Override
-    protected String getPath() {
-      String subDir = (isWorkDir) ? TaskTracker.getTaskWorkDir(task.getUser(),
-          task.getJobID().toString(), task.getTaskID().toString(),
-          task.isTaskCleanupTask())
-        : TaskTracker.getLocalTaskDir(task.getUser(),
-          task.getJobID().toString(), task.getTaskID().toString(),
-          task.isTaskCleanupTask());
-      return subDir;
-    }
-
-    /**
-     * Makes the path(and its subdirectories recursively) fully deletable by
-     * setting proper permissions(770) by task-controller
-     */
     @Override
-    protected void enablePathForCleanup() throws IOException {
-      getPathForCleanup();// allow init of fullPath, if not inited already
-      if (fs.exists(new Path(fullPath))) {
-        taskController.enableTaskForCleanup(this);
+    protected void deletePath() throws IOException {
+      if (isLog) {
+        controller.deleteLogAsUser(user, subDir);
+      } else {
+        controller.deleteAsUser(user, subDir);
       }
     }
   }
 
-  /** Contains info related to the path of the file/dir to be deleted. This info
-   * is needed by task-controller to build the full path of the job-local-dir.
-   */
-  static class TaskControllerJobPathDeletionContext 
-  extends TaskControllerPathDeletionContext {
-    final JobID jobId;
-    
-    public TaskControllerJobPathDeletionContext(FileSystem fs, 
-        Path mapredLocalDir, JobID id, String user, 
-        TaskController taskController) {
-      super(fs, mapredLocalDir, taskController, user);
-      this.jobId = id;
-    }
-    
-    /**
-     * Returns the jobLocalDir of the job to be cleaned up.
-     */
-    @Override
-    protected String getPath() {
-      return TaskTracker.getLocalJobDir(user, jobId.toString());
-    }
-    
-    /**
-     * Makes the path(and its sub-directories recursively) fully deletable by
-     * setting proper permissions(770) by task-controller
-     */
-    @Override
-    protected void enablePathForCleanup() throws IOException {
-      getPathForCleanup();// allow init of fullPath, if not inited already
-      if (fs.exists(new Path(fullPath))) {
-        taskController.enableJobForCleanup(this);
+  //Write the JVM command line to a file under the specified directory
+  // Note that the JVM will be launched using a setuid executable, and
+  // could potentially contain strings defined by a user. Hence, to
+  // prevent special character attacks, we write the command line to
+  // a file and execute it.
+  protected static String writeCommand(String cmdLine, FileSystem fs,
+      Path commandFile) throws IOException {
+    PrintWriter pw = null;
+    LOG.info("Writing commands to " + commandFile);
+    try {
+      pw = new PrintWriter(FileSystem.create(
+            fs, commandFile, TASK_LAUNCH_SCRIPT_PERMISSION));
+      pw.write(cmdLine);
+    } catch (IOException ioe) {
+      LOG.error("Caught IOException while writing JVM command line to file. ",
+          ioe);
+    } finally {
+      if (pw != null) {
+        pw.close();
       }
     }
+    return commandFile.makeQualified(fs).toUri().getPath();
   }
   
-  /**
-   * NOTE: This class is internal only class and not intended for users!!
-   * 
-   */
-  public static class InitializationContext {
-    public File workDir;
-    public String user;
-    
-    public InitializationContext() {
-    }
-    
-    public InitializationContext(String user, File workDir) {
-      this.user = user;
-      this.workDir = workDir;
-    }
-  }
-  
-  /**
-   * This is used for initializing the private localized files in distributed
-   * cache. Initialization would involve changing permission, ownership and etc.
-   */
-  public static class DistributedCacheFileContext extends InitializationContext {
-    // base directory under which file has been localized
-    Path localizedBaseDir;
-    // the unique string used to construct the localized path
-    String uniqueString;
-
-    public DistributedCacheFileContext(String user, File workDir,
-        Path localizedBaseDir, String uniqueString) {
-      super(user, workDir);
-      this.localizedBaseDir = localizedBaseDir;
-      this.uniqueString = uniqueString;
-    }
-
-    public Path getLocalizedUniqueDir() {
-      return new Path(localizedBaseDir, new Path(TaskTracker
-          .getPrivateDistributedCacheDir(user), uniqueString));
+  protected void logOutput(String output) {
+    String shExecOutput = output;
+    if (shExecOutput != null) {
+      for (String str : shExecOutput.split("\n")) {
+        LOG.info(str);
+      }
     }
   }
-
-  static class JobInitializationContext extends InitializationContext {
-    JobID jobid;
-  }
-
-  /**
-   * Sends a graceful terminate signal to taskJVM and it sub-processes. 
-   *   
-   * @param context task context
-   */
-  abstract void terminateTask(TaskControllerContext context);
-  
-  /**
-   * Enable the task for cleanup by changing permissions of the path
-   * @param context   path deletion context
-   * @throws IOException
-   */
-  abstract void enableTaskForCleanup(PathDeletionContext context)
-      throws IOException;
-  /**
-   * Sends a KILL signal to forcefully terminate the taskJVM and its
-   * sub-processes.
-   * 
-   * @param context task context
-   */
-  abstract void killTask(TaskControllerContext context);
-
-  /**
-   * Initialize user on this TaskTracer in a TaskController specific manner.
-   * 
-   * @param context
-   * @throws IOException
-   */
-  public abstract void initializeUser(InitializationContext context)
-      throws IOException;
-  
-  /**
-   * Enable the job for cleanup by changing permissions of the path
-   * @param context   path deletion context
-   * @throws IOException
-   */
-  abstract void enableJobForCleanup(PathDeletionContext context)
-    throws IOException;
 }

+ 33 - 9
src/mapred/org/apache/hadoop/mapred/TaskLog.java

@@ -44,6 +44,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.util.ProcessTree;
 import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.StringUtils;
 import org.apache.log4j.Appender;
 import org.apache.log4j.LogManager;
 import org.apache.log4j.Logger;
@@ -158,7 +159,14 @@ public class TaskLog {
 
   static File getAttemptDir(TaskAttemptID taskid, boolean isCleanup) {
     String cleanupSuffix = isCleanup ? ".cleanup" : "";
-    return new File(getJobDir(taskid.getJobID()), taskid + cleanupSuffix);
+    return getAttemptDir(taskid.getJobID().toString(), 
+        taskid.toString() + cleanupSuffix);
+  }
+  
+  static File getAttemptDir(String jobid, String taskid) {
+    // taskid should be fully formed and it should have the optional 
+    // .cleanup suffix
+    return new File(getJobDir(jobid), taskid);
   }
 
   static final List<LogName> LOGS_TRACKED_BY_INDEX_FILES =
@@ -232,6 +240,9 @@ public class TaskLog {
         }
       }
     }
+    if (currentTaskid == null) {
+      currentTaskid = taskid;
+    }
     // set start and end
     for (LogName logName : LOGS_TRACKED_BY_INDEX_FILES) {
       if (currentTaskid != taskid) {
@@ -493,7 +504,8 @@ public class TaskLog {
     List<String> result = new ArrayList<String>(3);
     result.add(bashCommand);
     result.add("-c");
-    String mergedCmd = buildCommandLine(setup, cmd,
+    String mergedCmd = buildCommandLine(setup,
+        cmd,
         stdoutFilename,
         stderrFilename, tailLength,
         useSetsid);
@@ -511,15 +523,17 @@ public class TaskLog {
     
     String stdout = FileUtil.makeShellPath(stdoutFilename);
     String stderr = FileUtil.makeShellPath(stderrFilename);
-    StringBuffer mergedCmd = new StringBuffer();
+    StringBuilder mergedCmd = new StringBuilder();
     
     if (!Shell.WINDOWS) {
-      mergedCmd.append(" export JVM_PID=`echo $$` ; ");
+      mergedCmd.append("export JVM_PID=`echo $$`\n");
     }
 
-    if (setup != null && setup.size() > 0) {
-      mergedCmd.append(addCommand(setup, false));
-      mergedCmd.append(";");
+    if (setup != null) {
+      for (String s : setup) {
+        mergedCmd.append(s);
+        mergedCmd.append("\n");
+      }
     }
     if (tailLength > 0) {
       mergedCmd.append("(");
@@ -627,11 +641,21 @@ public class TaskLog {
   /**
    * Get the user log directory for the job jobid.
    * 
-   * @param jobid
+   * @param jobid string representation of the jobid
+   * @return user log directory for the job
+   */
+  public static File getJobDir(String jobid) {
+    return new File(getUserLogDir(), jobid);
+  }
+  
+  /**
+   * Get the user log directory for the job jobid.
+   * 
+   * @param jobid the jobid object
    * @return user log directory for the job
    */
   public static File getJobDir(JobID jobid) {
-    return new File(getUserLogDir(), jobid.toString());
+    return getJobDir(jobid.toString());
   }
 
 } // TaskLog

+ 3 - 12
src/mapred/org/apache/hadoop/mapred/TaskMemoryManagerThread.java

@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.mapred;
 
-import java.io.File;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Map;
@@ -184,13 +183,8 @@ class TaskMemoryManagerThread extends Thread {
               // itself is still retained in runningTasks till successful
               // transmission to JT
 
-              // create process tree object
-              long sleeptimeBeforeSigkill = taskTracker.getJobConf().getLong(
-                  "mapred.tasktracker.tasks.sleeptime-before-sigkill",
-                  ProcessTree.DEFAULT_SLEEPTIME_BEFORE_SIGKILL);
-              
-              ProcfsBasedProcessTree pt = new ProcfsBasedProcessTree(
-                  pId,ProcessTree.isSetsidAvailable, sleeptimeBeforeSigkill);
+              ProcfsBasedProcessTree pt = 
+                new ProcfsBasedProcessTree(pId, ProcessTree.isSetsidAvailable);
               LOG.debug("Tracking ProcessTree " + pId + " for the first time");
 
               ptInfo.setPid(pId);
@@ -228,10 +222,9 @@ class TaskMemoryManagerThread extends Thread {
                     + "bytes. Killing task. \nDump of the process-tree for "
                     + tid + " : \n" + pTree.getProcessTreeDump();
             LOG.warn(msg);
+            // kill the task
             taskTracker.cleanUpOverMemoryTask(tid, true, msg);
 
-            // Now destroy the ProcessTree, remove it from monitoring map.
-            pTree.destroy(true/*in the background*/);
             it.remove();
             LOG.info("Removed ProcessTree with root " + pId);
           } else {
@@ -365,8 +358,6 @@ class TaskMemoryManagerThread extends Thread {
         taskTracker.cleanUpOverMemoryTask(tid, false, msg);
         // Now destroy the ProcessTree, remove it from monitoring map.
         ProcessTreeInfo ptInfo = processTreeInfoMap.get(tid);
-        ProcfsBasedProcessTree pTree = ptInfo.getProcessTree();
-        pTree.destroy(true/*in the background*/);
         processTreeInfoMap.remove(tid);
         LOG.info("Removed ProcessTree with root " + ptInfo.getPID());
       }

+ 86 - 87
src/mapred/org/apache/hadoop/mapred/TaskRunner.java

@@ -20,16 +20,16 @@ package org.apache.hadoop.mapred;
 import java.io.ByteArrayOutputStream;
 import java.io.File;
 import java.io.IOException;
-import java.io.OutputStream;
 import java.io.PrintStream;
 import java.net.InetSocketAddress;
 import java.net.URI;
-import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Random;
 import java.util.Vector;
+import java.util.Map.Entry;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -46,7 +46,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.mapreduce.JobContext;
 
 /** Base class that runs a task in a separate process.  Tasks are run in a
  * separate process in order to isolate the map/reduce system code from bugs in
@@ -74,6 +73,8 @@ abstract class TaskRunner extends Thread {
 
   static final String DEFAULT_HOME_DIR= "/homes/";
   
+  static final String HADOOP_WORK_DIR = "HADOOP_WORK_DIR";
+  
   static final String MAPRED_ADMIN_USER_ENV =
     "mapreduce.admin.user.env";
 
@@ -88,13 +89,19 @@ abstract class TaskRunner extends Thread {
   private int exitCode = -1;
   private boolean exitCodeSet = false;
   
-  private static String SYSTEM_PATH_SEPARATOR = System.getProperty("path.separator");
+  private static String SYSTEM_PATH_SEPARATOR = 
+    System.getProperty("path.separator");
   static final String MAPREDUCE_USER_CLASSPATH_FIRST =
         "mapreduce.user.classpath.first"; //a semi-hidden config
 
   
   private TaskTracker tracker;
-  private TaskDistributedCacheManager taskDistributedCacheManager;
+  private final TaskDistributedCacheManager taskDistributedCacheManager;
+  private String[] localdirs;
+  final private static Random rand;
+  static {
+    rand = new Random();
+  }
 
   protected JobConf conf;
   JvmManager jvmManager;
@@ -105,7 +112,8 @@ abstract class TaskRunner extends Thread {
   protected MapOutputFile mapOutputFile;
 
   public TaskRunner(TaskTracker.TaskInProgress tip, TaskTracker tracker, 
-      JobConf conf) {
+                    JobConf conf, TaskTracker.RunningJob rjob
+                    ) throws IOException {
     this.tip = tip;
     this.t = tip.getTask();
     this.tracker = tracker;
@@ -113,6 +121,8 @@ abstract class TaskRunner extends Thread {
     this.mapOutputFile = new MapOutputFile();
     this.mapOutputFile.setConf(conf);
     this.jvmManager = tracker.getJvmManagerInstance();
+    this.localdirs = conf.getLocalDirs();
+    taskDistributedCacheManager = rjob.distCacheMgr;
   }
 
   public Task getTask() { return t; }
@@ -182,27 +192,20 @@ abstract class TaskRunner extends Thread {
       //all the archives
       TaskAttemptID taskid = t.getTaskID();
       final LocalDirAllocator lDirAlloc = new LocalDirAllocator("mapred.local.dir");
-      final File workDir = formWorkDir(lDirAlloc, taskid, t.isTaskCleanupTask(), conf);
+      //simply get the location of the workDir and pass it to the child. The
+      //child will do the actual dir creation
+      final File workDir =
+      new File(new Path(localdirs[rand.nextInt(localdirs.length)], 
+          TaskTracker.getTaskWorkDir(t.getUser(), taskid.getJobID().toString(), 
+          taskid.toString(),
+          t.isTaskCleanupTask())).toString());
       
-      // We don't create any symlinks yet, so presence/absence of workDir
-      // actually on the file system doesn't matter.
       String user = tip.getUGI().getUserName();
-      tip.getUGI().doAs(new PrivilegedExceptionAction<Void>() {
-        public Void run() throws IOException {
-          taskDistributedCacheManager =
-            tracker.getTrackerDistributedCacheManager()
-            .newTaskDistributedCacheManager(conf);
-          taskDistributedCacheManager.setup(lDirAlloc, workDir, TaskTracker
-              .getPrivateDistributedCacheDir(conf.getUser()),
-                   TaskTracker.getPublicDistributedCacheDir());
-          return null;
-        }
-      });
       
       // Set up the child task's configuration. After this call, no localization
       // of files should happen in the TaskTracker's process space. Any changes to
       // the conf object after this will NOT be reflected to the child.
-      setupChildTaskConfiguration(lDirAlloc);
+      // setupChildTaskConfiguration(lDirAlloc);
 
       if (!prepare()) {
         return;
@@ -220,7 +223,7 @@ abstract class TaskRunner extends Thread {
       tracker.addToMemoryManager(t.getTaskID(), t.isMapTask(), conf);
 
       // set memory limit using ulimit if feasible and necessary ...
-      List<String> setup = getVMSetupCmd();
+      String setup = getVMSetupCmd();
       // Set up the redirection of the task's stdout and stderr streams
       File[] logFiles = prepareLogFiles(taskid, t.isTaskCleanupTask());
       File stdout = logFiles[0];
@@ -231,8 +234,21 @@ abstract class TaskRunner extends Thread {
       Map<String, String> env = new HashMap<String, String>();
       errorInfo = getVMEnvironment(errorInfo, user, workDir, conf, env, taskid,
                                    logSize);
-
-      launchJvmAndWait(setup, vargs, stdout, stderr, logSize, workDir, env);
+      
+      // flatten the env as a set of export commands
+      List <String> setupCmds = new ArrayList<String>();
+      for(Entry<String, String> entry : env.entrySet()) {
+        StringBuffer sb = new StringBuffer();
+        sb.append("export ");
+        sb.append(entry.getKey());
+        sb.append("=\"");
+        sb.append(entry.getValue());
+        sb.append("\"");
+        setupCmds.add(sb.toString());
+      }
+      setupCmds.add(setup);
+      
+      launchJvmAndWait(setupCmds, vargs, stdout, stderr, logSize, workDir);
       tracker.getTaskTrackerInstrumentation().reportTaskEnd(t.getTaskID());
       if (exitCodeSet) {
         if (!killed && exitCode != 0) {
@@ -261,13 +277,6 @@ abstract class TaskRunner extends Thread {
         LOG.warn(t.getTaskID()+" Reporting Diagnostics", e);
       }
     } finally {
-      try{
-        if (taskDistributedCacheManager != null) {
-          taskDistributedCacheManager.release();
-        }
-      }catch(IOException ie){
-        LOG.warn("Error releasing caches : Cache files might not have been cleaned up");
-      }
       
       // It is safe to call TaskTracker.TaskInProgress.reportTaskFinished with
       // *false* since the task has either
@@ -277,11 +286,11 @@ abstract class TaskRunner extends Thread {
     }
   }
 
-  void launchJvmAndWait(List<String> setup, Vector<String> vargs, File stdout,
-      File stderr, long logSize, File workDir, Map<String, String> env)
-      throws InterruptedException {
+  void launchJvmAndWait(List <String> setup, Vector<String> vargs, File stdout,
+      File stderr, long logSize, File workDir)
+      throws InterruptedException, IOException {
     jvmManager.launchJvm(this, jvmManager.constructJvmEnv(setup, vargs, stdout,
-        stderr, logSize, workDir, env, conf));
+        stderr, logSize, workDir, conf));
     synchronized (lock) {
       while (!done) {
         lock.wait();
@@ -332,7 +341,7 @@ abstract class TaskRunner extends Thread {
                 .isTaskCleanupTask()), conf);
 
     // write the child's task configuration file to the local disk
-    writeLocalTaskFile(localTaskFile.toString(), conf);
+    JobLocalizer.writeLocalJobFile(localTaskFile, conf);
 
     // Set the final job file in the task. The child needs to know the correct
     // path to job.xml. So set this path accordingly.
@@ -342,16 +351,21 @@ abstract class TaskRunner extends Thread {
   /**
    * @return
    */
-  private List<String> getVMSetupCmd() {
-    String[] ulimitCmd = Shell.getUlimitMemoryCommand(getChildUlimit(conf));
-    List<String> setup = null;
-    if (ulimitCmd != null) {
-      setup = new ArrayList<String>();
-      for (String arg : ulimitCmd) {
-        setup.add(arg);
-      }
+  private String getVMSetupCmd() {
+    final int ulimit = getChildUlimit(conf);
+    if (ulimit <= 0) {
+      return "";
     }
-    return setup;
+    String setup[] = Shell.getUlimitMemoryCommand(ulimit);
+    StringBuilder command = new StringBuilder();
+    for (String str : setup) {
+      command.append('\'');
+      command.append(str);
+      command.append('\'');
+      command.append(" ");
+    }
+    command.append("\n");
+    return command.toString();
   }
 
   /**
@@ -434,7 +448,7 @@ abstract class TaskRunner extends Thread {
       vargs.add(javaOptsSplit[i]);
     }
 
-    Path childTmpDir = createChildTmpDir(workDir, conf);
+    Path childTmpDir = createChildTmpDir(workDir, conf, false);
     vargs.add("-Djava.io.tmpdir=" + childTmpDir);
 
     // Add classpath.
@@ -483,7 +497,7 @@ abstract class TaskRunner extends Thread {
    * @throws IOException
    */
   static Path createChildTmpDir(File workDir,
-      JobConf conf)
+      JobConf conf, boolean createDir)
       throws IOException {
 
     // add java.io.tmpdir given by mapred.child.tmp
@@ -493,10 +507,13 @@ abstract class TaskRunner extends Thread {
     // if temp directory path is not absolute, prepend it with workDir.
     if (!tmpDir.isAbsolute()) {
       tmpDir = new Path(workDir.toString(), tmp);
-
-      FileSystem localFs = FileSystem.getLocal(conf);
-      if (!localFs.mkdirs(tmpDir) && !localFs.getFileStatus(tmpDir).isDir()) {
-        throw new IOException("Mkdirs failed to create " + tmpDir.toString());
+      if (createDir) {
+        FileSystem localFs = FileSystem.getLocal(conf);
+        if (!localFs.mkdirs(tmpDir) && 
+            !localFs.getFileStatus(tmpDir).isDir()) {
+          throw new IOException("Mkdirs failed to create " +
+              tmpDir.toString());
+        }
       }
     }
     return tmpDir;
@@ -535,9 +552,10 @@ abstract class TaskRunner extends Thread {
     return classPaths;
   }
 
-  private String getVMEnvironment(String errorInfo, String user, File workDir, JobConf conf,
-      Map<String, String> env, TaskAttemptID taskid, long logSize)
-      throws Throwable {
+  private String getVMEnvironment(String errorInfo, String user, File workDir, 
+                                  JobConf conf, Map<String, String> env, 
+                                  TaskAttemptID taskid, long logSize
+                                  ) throws Throwable {
     StringBuffer ldLibraryPath = new StringBuffer();
     ldLibraryPath.append(workDir.toString());
     String oldLdLibraryPath = null;
@@ -547,11 +565,13 @@ abstract class TaskRunner extends Thread {
       ldLibraryPath.append(oldLdLibraryPath);
     }
     env.put("LD_LIBRARY_PATH", ldLibraryPath.toString());
+    env.put(HADOOP_WORK_DIR, workDir.toString());
     //update user configured login-shell properties
     updateUserLoginEnv(errorInfo, user, conf, env);
+    // put jobTokenFile name into env
     String jobTokenFile = conf.get(TokenCache.JOB_TOKENS_FILENAME);
-    LOG.debug("putting jobToken file name into environment fn=" + jobTokenFile);
-    env.put(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION, jobTokenFile);    
+    LOG.debug("putting jobToken file name into environment " + jobTokenFile);
+    env.put(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION, jobTokenFile);
     // for the child of task jvm, set hadoop.root.logger
     env.put("HADOOP_ROOT_LOGGER","INFO,TLA");
     String hadoopClientOpts = System.getenv("HADOOP_CLIENT_OPTS");
@@ -560,9 +580,9 @@ abstract class TaskRunner extends Thread {
     } else {
       hadoopClientOpts = hadoopClientOpts + " ";
     }
-    hadoopClientOpts = hadoopClientOpts + "-Dhadoop.tasklog.taskid=" + taskid
-                       + " -Dhadoop.tasklog.iscleanup=" + t.isTaskCleanupTask()
-                       + " -Dhadoop.tasklog.totalLogFileSize=" + logSize;
+    hadoopClientOpts = hadoopClientOpts + "-Dhadoop.tasklog.taskid=" +
+      taskid + " -Dhadoop.tasklog.iscleanup=" + t.isTaskCleanupTask() +
+      " -Dhadoop.tasklog.totalLogFileSize=" + logSize;
     // following line is a backport from jira MAPREDUCE-1286 
     env.put("HADOOP_CLIENT_OPTS", hadoopClientOpts);
 
@@ -638,25 +658,6 @@ abstract class TaskRunner extends Thread {
     return errorInfo;
   }
 
-  /**
-   * Write the task specific job-configuration file.
-   * 
-   * @param localFs
-   * @throws IOException
-   */
-  private static void writeLocalTaskFile(String jobFile, JobConf conf)
-      throws IOException {
-    Path localTaskFile = new Path(jobFile);
-    FileSystem localFs = FileSystem.getLocal(conf);
-    localFs.delete(localTaskFile, true);
-    OutputStream out = localFs.create(localTaskFile);
-    try {
-      conf.writeXml(out);
-    } finally {
-      out.close();
-    }
-  }
-
   /**
    * Prepare the mapred.local.dir for the child. The child is sand-boxed now.
    * Whenever it uses LocalDirAllocator from now on inside the child, it will
@@ -681,15 +682,11 @@ abstract class TaskRunner extends Thread {
   }
 
   /** Creates the working directory pathname for a task attempt. */ 
-  static File formWorkDir(LocalDirAllocator lDirAlloc, 
-      TaskAttemptID task, boolean isCleanup, JobConf conf) 
+  static Path formWorkDir(LocalDirAllocator lDirAlloc, JobConf conf) 
       throws IOException {
     Path workDir =
-        lDirAlloc.getLocalPathToRead(TaskTracker.getTaskWorkDir(
-            conf.getUser(), task.getJobID().toString(), task.toString(),
-            isCleanup), conf);
-
-    return new File(workDir.toString());
+        lDirAlloc.getLocalPathToRead(MRConstants.WORKDIR, conf);
+    return workDir;
   }
 
   private static void appendSystemClasspaths(List<String> classPaths) {
@@ -780,7 +777,7 @@ abstract class TaskRunner extends Thread {
       }
     }
 
-    createChildTmpDir(workDir, conf);
+    createChildTmpDir(workDir, conf, true);
   }
 
   /**
@@ -804,8 +801,10 @@ abstract class TaskRunner extends Thread {
 
   /**
    * Kill the child process
+   * @throws InterruptedException 
+   * @throws IOException 
    */
-  public void kill() {
+  public void kill() throws IOException, InterruptedException {
     killed = true;
     jvmManager.taskKilled(this);
     signalDone();

+ 275 - 372
src/mapred/org/apache/hadoop/mapred/TaskTracker.java

@@ -55,6 +55,7 @@ import javax.servlet.http.HttpServletResponse;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.filecache.TaskDistributedCacheManager;
 import org.apache.hadoop.filecache.TrackerDistributedCacheManager;
 import org.apache.hadoop.mapreduce.server.tasktracker.*;
 import org.apache.hadoop.mapreduce.server.tasktracker.userlogs.*;
@@ -66,17 +67,15 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.mapred.QueueManager.QueueACL;
-import org.apache.hadoop.mapred.TaskController.JobInitializationContext;
 import org.apache.hadoop.mapred.CleanupQueue.PathDeletionContext;
-import org.apache.hadoop.mapred.TaskController.TaskControllerPathDeletionContext;
-import org.apache.hadoop.mapred.TaskController.TaskControllerTaskPathDeletionContext;
-import org.apache.hadoop.mapred.TaskController.TaskControllerJobPathDeletionContext;
+import org.apache.hadoop.mapred.TaskController.DeletionContext;
 import org.apache.hadoop.mapred.TaskLog.LogFileDetail;
 import org.apache.hadoop.mapred.TaskLog.LogName;
 import org.apache.hadoop.mapred.TaskStatus.Phase;
@@ -157,14 +156,15 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
   public static final Log ClientTraceLog =
     LogFactory.getLog(TaskTracker.class.getName() + ".clienttrace");
 
-  // Job ACLs file is created by TaskTracker under userlogs/$jobid directory for
-  // each job at job localization time. This will be used by TaskLogServlet for
-  // authorizing viewing of task logs of that job
+  //Job ACLs file is created by TaskController under userlogs/$jobid directory
+  //for each job at job localization time. This will be used by TaskLogServlet 
+  //for authorizing viewing of task logs of that job
   static String jobACLsFile = "job-acls.xml";
 
   volatile boolean running = true;
 
   private LocalDirAllocator localDirAllocator;
+  private String[] localdirs;
   String taskTrackerName;
   String localHostname;
   InetSocketAddress jobTrackAddr;
@@ -242,9 +242,11 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
   static final String DISTCACHEDIR = "distcache";
   static final String JOBCACHE = "jobcache";
   static final String OUTPUT = "output";
-  private static final String JARSDIR = "jars";
+  static final String JARSDIR = "jars";
   static final String LOCAL_SPLIT_FILE = "split.info";
   static final String JOBFILE = "job.xml";
+  static final String TT_PRIVATE_DIR = "ttprivate";
+  static final String JVM_EXTRA_ENV_FILE = "jvm.extra.env";
 
   static final String JOB_LOCAL_DIR = "job.local.dir";
   static final String JOB_TOKEN_FILE="jobToken"; //localized file
@@ -440,18 +442,28 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
   static String getLocalJobConfFile(String user, String jobid) {
     return getLocalJobDir(user, jobid) + Path.SEPARATOR + TaskTracker.JOBFILE;
   }
+  
+  static String getPrivateDirJobConfFile(String user, String jobid) {
+    return TT_PRIVATE_DIR + Path.SEPARATOR + getLocalJobConfFile(user, jobid);
+  }
 
   static String getTaskConfFile(String user, String jobid, String taskid,
       boolean isCleanupAttempt) {
     return getLocalTaskDir(user, jobid, taskid, isCleanupAttempt)
     + Path.SEPARATOR + TaskTracker.JOBFILE;
   }
+  
+  static String getPrivateDirTaskScriptLocation(String user, String jobid, 
+     String taskid) {
+    return TT_PRIVATE_DIR + Path.SEPARATOR + 
+           getLocalTaskDir(user, jobid, taskid);
+  }
 
   static String getJobJarsDir(String user, String jobid) {
     return getLocalJobDir(user, jobid) + Path.SEPARATOR + TaskTracker.JARSDIR;
   }
 
-  static String getJobJarFile(String user, String jobid) {
+  public static String getJobJarFile(String user, String jobid) {
     return getJobJarsDir(user, jobid) + Path.SEPARATOR + "job.jar";
   }
   
@@ -470,7 +482,8 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
     + TaskTracker.OUTPUT;
   }
 
-  static String getLocalTaskDir(String user, String jobid, String taskid) {
+  public static String getLocalTaskDir(String user, String jobid, 
+      String taskid) {
     return getLocalTaskDir(user, jobid, taskid, false);
   }
   
@@ -492,6 +505,15 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
   static String getLocalJobTokenFile(String user, String jobid) {
     return getLocalJobDir(user, jobid) + Path.SEPARATOR + TaskTracker.JOB_TOKEN_FILE;
   }
+  
+  static String getPrivateDirJobTokenFile(String user, String jobid) {
+    return TT_PRIVATE_DIR + Path.SEPARATOR + 
+           getLocalJobTokenFile(user, jobid); 
+  }
+  
+  static String getPrivateDirForJob(String user, String jobid) {
+    return TT_PRIVATE_DIR + Path.SEPARATOR + getLocalJobDir(user, jobid) ;
+  }
 
   private FileSystem getFS(final Path filePath, JobID jobId,
       final Configuration conf) throws IOException, InterruptedException {
@@ -522,6 +544,23 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
     }
   }
 
+  /**
+   * Delete all of the user directories.
+   * @param conf the TT configuration
+   * @throws IOException
+   */
+  private void deleteUserDirectories(Configuration conf) throws IOException {
+    for(String root: localdirs) {
+      for(FileStatus status: localFs.listStatus(new Path(root, SUBDIR))) {
+        String owner = status.getOwner();
+        String path = status.getPath().getName();
+        if (path.endsWith("/" + owner)) {
+          taskController.deleteAsUser(owner, "");
+        }
+      }
+    }
+  }
+
   public static final String TT_USER_NAME = "mapreduce.tasktracker.kerberos.principal";
   public static final String TT_KEYTAB_FILE =
     "mapreduce.tasktracker.keytab.file";  
@@ -548,8 +587,18 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
     }
  
     //check local disk
-    checkLocalDirs(this.fConf.getLocalDirs());
+    checkLocalDirs((localdirs = this.fConf.getLocalDirs()));
+    deleteUserDirectories(fConf);
     fConf.deleteLocalFiles(SUBDIR);
+    final FsPermission ttdir = FsPermission.createImmutable((short) 0755);
+    for (String s : localdirs) {
+      localFs.mkdirs(new Path(s, SUBDIR), ttdir);
+    }
+    fConf.deleteLocalFiles(TT_PRIVATE_DIR);
+    final FsPermission priv = FsPermission.createImmutable((short) 0700);
+    for (String s : localdirs) {
+      localFs.mkdirs(new Path(s, TT_PRIVATE_DIR), priv);
+    }
 
     // Clear out state tables
     this.tasks.clear();
@@ -608,14 +657,6 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
     this.taskTrackerName = "tracker_" + localHostname + ":" + taskReportAddress;
     LOG.info("Starting tracker " + taskTrackerName);
 
-    Class<? extends TaskController> taskControllerClass = fConf.getClass(
-        "mapred.task.tracker.task-controller", DefaultTaskController.class, TaskController.class);
-    taskController = (TaskController) ReflectionUtils.newInstance(
-        taskControllerClass, fConf);
-
-    // setup and create jobcache directory with appropriate permissions
-    taskController.setup();
-
     // Initialize DistributedCache
     this.distributedCacheManager = new TrackerDistributedCacheManager(
         this.fConf, taskController);
@@ -651,7 +692,7 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
     reduceLauncher.start();
 
     // create a localizer instance
-    setLocalizer(new Localizer(localFs, fConf.getLocalDirs(), taskController));
+    setLocalizer(new Localizer(localFs, fConf.getLocalDirs()));
 
     //Start up node health checker service.
     if (shouldStartHealthMonitor(this.fConf)) {
@@ -897,25 +938,18 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
     JobID jobId = t.getJobID();
     RunningJob rjob = addTaskToJob(jobId, tip);
 
-    // Initialize the user directories if needed.
-    getLocalizer().initializeUserDirs(t.getUser());
-
     synchronized (rjob) {
       if (!rjob.localized) {
-        JobConf localJobConf = localizeJobFiles(t, rjob);
-        // initialize job log directory
-        initializeJobLogDir(jobId, localJobConf);
-
-        // Now initialize the job via task-controller so as to set
-        // ownership/permissions of jars, job-work-dir. Note that initializeJob
-        // should be the last call after every other directory/file to be
-        // directly under the job directory is created.
-        JobInitializationContext context = new JobInitializationContext();
-        context.jobid = jobId;
-        context.user = t.getUser();
-        context.workDir = new File(localJobConf.get(JOB_LOCAL_DIR));
-        taskController.initializeJob(context);
-        
+        Path localJobConfPath = initializeJob(t, rjob);
+        JobConf localJobConf = new JobConf(localJobConfPath);
+        //to be doubly sure, overwrite the user in the config with the one the TT 
+        //thinks it is
+        localJobConf.setUser(t.getUser());
+        //also reset the #tasks per jvm
+        resetNumTasksPerJvm(localJobConf);
+        //set the base jobconf path in rjob; all tasks will use
+        //this as the base path when they run
+        rjob.localizedJobConf = localJobConfPath;
         rjob.jobConf = localJobConf;  
         rjob.keepJobFiles = ((localJobConf.getKeepTaskFilesPattern() != null) ||
                              localJobConf.getKeepFailedTaskFiles());
@@ -930,35 +964,31 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
    * Localize the job on this tasktracker. Specifically
    * <ul>
    * <li>Cleanup and create job directories on all disks</li>
+   * <li>Download the credentials file</li>
    * <li>Download the job config file job.xml from the FS</li>
-   * <li>Create the job work directory and set {@link TaskTracker#JOB_LOCAL_DIR}
-   * in the configuration.
-   * <li>Download the job jar file job.jar from the FS, unjar it and set jar
-   * file in the configuration.</li>
+   * <li>Invokes the {@link TaskController} to do the rest of the job 
+   * initialization</li>
    * </ul>
    *
    * @param t task whose job has to be localized on this TT
-   * @return the modified job configuration to be used for all the tasks of this
-   *         job as a starting point.
+   * @param rjob the {@link RunningJob}
+   * @return the path to the job configuration to be used for all the tasks
+   *         of this job as a starting point.
    * @throws IOException
    */
-  JobConf localizeJobFiles(Task t, RunningJob rjob)
-      throws IOException, InterruptedException {
-    JobID jobId = t.getJobID();
+  Path initializeJob(final Task t, RunningJob rjob)
+  throws IOException, InterruptedException {
+    final JobID jobId = t.getJobID();
+
+    final Path jobFile = new Path(t.getJobFile());
+    final String userName = t.getUser();
+    final Configuration conf = getJobConf();
 
-    Path jobFile = new Path(t.getJobFile());
-    String userName = t.getUser();
-    JobConf userConf = new JobConf(getJobConf());
-    
-    // Initialize the job directories first
-    FileSystem localFs = FileSystem.getLocal(fConf);
-    getLocalizer().initializeJobDirs(userName, jobId);
-    
     // save local copy of JobToken file
-    String localJobTokenFile = localizeJobTokenFile(t.getUser(), jobId);
+    final String localJobTokenFile = localizeJobTokenFile(t.getUser(), jobId);
     rjob.ugi = UserGroupInformation.createRemoteUser(t.getUser());
-    
-    Credentials ts = TokenCache.loadTokens(localJobTokenFile, fConf);
+
+    Credentials ts = TokenCache.loadTokens(localJobTokenFile, conf);
     Token<JobTokenIdentifier> jt = TokenCache.getJobToken(ts);
     if (jt != null) { //could be null in the case of some unit tests
       getJobTokenSecretManager().addTokenForJob(jobId.toString(), jt);
@@ -967,107 +997,108 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
       rjob.ugi.addToken(token);
     }
 
-    FileSystem userFs = getFS(jobFile, jobId, userConf);
+    FileSystem userFs = getFS(jobFile, jobId, conf);
 
     // Download the job.xml for this job from the system FS
-    Path localJobFile =
-        localizeJobConfFile(new Path(t.getJobFile()), userName, userFs, jobId);
-
+    final Path localJobFile =
+      localizeJobConfFile(new Path(t.getJobFile()), userName, userFs, jobId);
     JobConf localJobConf = new JobConf(localJobFile);
-    //WE WILL TRUST THE USERNAME THAT WE GOT FROM THE JOBTRACKER
-    //AS PART OF THE TASK OBJECT
-    localJobConf.setUser(userName);
+
+    // Setup the public distributed cache
+    TaskDistributedCacheManager taskDistributedCacheManager =
+      getTrackerDistributedCacheManager()
+     .newTaskDistributedCacheManager(jobId, localJobConf);
+    rjob.distCacheMgr = taskDistributedCacheManager;
+    taskDistributedCacheManager.setupCache(TaskTracker.getPublicDistributedCacheDir(),
+        TaskTracker.getPrivateDistributedCacheDir(userName));
+
+    // Set some config values
+    localJobConf.set(JobConf.MAPRED_LOCAL_DIR_PROPERTY,
+        getJobConf().get(JobConf.MAPRED_LOCAL_DIR_PROPERTY));
+    if (conf.get("slave.host.name") != null) {
+      localJobConf.set("slave.host.name", conf.get("slave.host.name"));
+    }
+    resetNumTasksPerJvm(localJobConf);
+    localJobConf.setUser(t.getUser());
+
+    // write back the config (this config will have the updates that the
+    // distributed cache manager makes as well)
+    JobLocalizer.writeLocalJobFile(localJobFile, localJobConf);
+
+    /**
+      * Now initialize the job via task-controller to do the rest of the
+      * job-init. Do this within a doAs since the distributed cache is also set
+      * up in {@link TaskController#initializeJob(String, JobID, Path, Path)}
+      * To support potential authenticated HDFS accesses, we need the tokens
+      */
+    rjob.ugi.doAs(new PrivilegedExceptionAction<Object>() {
+      public Object run() throws IOException {
+        try {
+          taskController.initializeJob(t.getUser(), jobId.toString(), 
+              new Path(localJobTokenFile), localJobFile, TaskTracker.this);
+        } catch (IOException e) {
+          try {
+            // called holding lock on RunningJob
+            removeJobFiles(t.getUser(), jobId);
+          } catch (IOException e2) {
+            LOG.warn("Failed to add " + jobId + " to cleanup queue", e2);
+          }
+          throw e;
+        }
+        return null;
+      }
+    });
+    //search for the conf that the initializeJob created
+    //need to look up certain configs from this conf, like
+    //the distributed cache, profiling, etc. ones
+    Path initializedConf = lDirAlloc.getLocalPathToRead(getLocalJobConfFile(
+           userName, jobId.toString()), getJobConf());
+    return initializedConf;
+  }
+  
+  /** If certain configs are enabled, the jvm-reuse should be disabled
+   * @param localJobConf
+   */
+  static void resetNumTasksPerJvm(JobConf localJobConf) {
+    boolean debugEnabled = false;
+    if (localJobConf.getNumTasksToExecutePerJvm() == 1) {
+      return;
+    }
+    if (localJobConf.getMapDebugScript() != null || 
+        localJobConf.getReduceDebugScript() != null) {
+      debugEnabled = true;
+    }
+    String keepPattern = localJobConf.getKeepTaskFilesPattern();
     
-    // set the location of the token file into jobConf to transfer 
-    // the name to TaskRunner
-    localJobConf.set(TokenCache.JOB_TOKENS_FILENAME, localJobTokenFile);
-    // create the 'job-work' directory: job-specific shared directory for use as
-    // scratch space by all tasks of the same job running on this TaskTracker.
-    Path workDir =
-        lDirAlloc.getLocalPathForWrite(getJobWorkDir(userName,
-            jobId.toString()), fConf);
-    if (!localFs.mkdirs(workDir)) {
-      throw new IOException("Mkdirs failed to create "
-          + workDir.toString());
-    }
-    System.setProperty(JOB_LOCAL_DIR, workDir.toUri().getPath());
-    localJobConf.set(JOB_LOCAL_DIR, workDir.toUri().getPath());
-
-    // Download the job.jar for this job from the system FS
-    localizeJobJarFile(userName, jobId, userFs, localJobConf);
-
-    return localJobConf;
-  }
-
-  // Create job userlog dir.
-  // Create job acls file in job log dir, if needed.
-  void initializeJobLogDir(JobID jobId, JobConf localJobConf)
+    if (debugEnabled || localJobConf.getProfileEnabled() ||
+        keepPattern != null || localJobConf.getKeepFailedTaskFiles()) {
+      //disable jvm reuse
+      localJobConf.setNumTasksToExecutePerJvm(1);
+    }
+  }
+
+  // Remove the log dir from the tasklog cleanup thread
+  void saveLogDir(JobID jobId, JobConf localJobConf)
       throws IOException {
     // remove it from tasklog cleanup thread first,
     // it might be added there because of tasktracker reinit or restart
     JobStartedEvent jse = new JobStartedEvent(jobId);
     getUserLogManager().addLogEvent(jse);
-    localizer.initializeJobLogDir(jobId);
-
-    if (areACLsEnabled()) {
-      // Create job-acls.xml file in job userlog dir and write the needed
-      // info for authorization of users for viewing task logs of this job.
-      writeJobACLs(localJobConf, TaskLog.getJobDir(jobId));
-    }
-  }
-
-  /**
-   *  Creates job-acls.xml under the given directory logDir and writes
-   *  job-view-acl, queue-admins-acl, jobOwner name and queue name into this
-   *  file.
-   *  queue name is the queue to which the job was submitted to.
-   *  queue-admins-acl is the queue admins ACL of the queue to which this
-   *  job was submitted to.
-   * @param conf   job configuration
-   * @param logDir job userlog dir
-   * @throws IOException
-   */
-  private static void writeJobACLs(JobConf conf, File logDir) throws IOException {
-    File aclFile = new File(logDir, jobACLsFile);
-    JobConf aclConf = new JobConf(false);
-
-    // set the job view acl in aclConf
-    String jobViewACL = conf.get(JobContext.JOB_ACL_VIEW_JOB, " ");
-    aclConf.set(JobContext.JOB_ACL_VIEW_JOB, jobViewACL);
-
-    // set the job queue name in aclConf
-    String queue = conf.getQueueName();
-    aclConf.setQueueName(queue);
-
-    // set the queue admins acl in aclConf
-    String qACLName = QueueManager.toFullPropertyName(queue,
-        QueueACL.ADMINISTER_JOBS.getAclName());
-    String queueAdminsACL = conf.get(qACLName, " ");
-    aclConf.set(qACLName, queueAdminsACL);
-
-    // set jobOwner as user.name in aclConf
-    String jobOwner = conf.getUser();
-    aclConf.set("user.name", jobOwner);
-
-    FileOutputStream out = new FileOutputStream(aclFile);
-    try {
-      aclConf.writeXml(out);
-    } finally {
-      out.close();
-    }
-    Localizer.PermissionsHandler.setPermissions(aclFile,
-        Localizer.PermissionsHandler.sevenZeroZero);
   }
 
+  
   /**
    * Download the job configuration file from the FS.
    *
-   * @param t Task whose job file has to be downloaded
-   * @param jobId jobid of the task
+   * @param jobFile the original location of the configuration file
+   * @param user the user in question
+   * @param userFs the FileSystem created on behalf of the user
+   * @param jobId jobid in question
    * @return the local file system path of the downloaded file.
    * @throws IOException
    */
-  private Path localizeJobConfFile(Path jobFile, String user, FileSystem userFs, JobID jobId)
+  private Path localizeJobConfFile(Path jobFile, String user, 
+      FileSystem userFs, JobID jobId)
   throws IOException {
     // Get sizes of JobFile and JarFile
     // sizes are -1 if they are not present.
@@ -1080,7 +1111,7 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
       jobFileSize = -1;
     }
     Path localJobFile =
-      lDirAlloc.getLocalPathForWrite(getLocalJobConfFile(user,
+      lDirAlloc.getLocalPathForWrite(getPrivateDirJobConfFile(user,
           jobId.toString()), jobFileSize, fConf);
 
     // Download job.xml
@@ -1088,58 +1119,16 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
     return localJobFile;
   }
 
-  /**
-   * Download the job jar file from FS to the local file system and unjar it.
-   * Set the local jar file in the passed configuration.
-   *
-   * @param jobId
-   * @param userFs
-   * @param localJobConf
-   * @throws IOException
-   */
-  private void localizeJobJarFile(String user, JobID jobId, FileSystem userFs,
-      JobConf localJobConf)
-  throws IOException {
-    // copy Jar file to the local FS and unjar it.
-    String jarFile = localJobConf.getJar();
-    FileStatus status = null;
-    long jarFileSize = -1;
-    if (jarFile != null) {
-      Path jarFilePath = new Path(jarFile);
-      try {
-        status = userFs.getFileStatus(jarFilePath);
-        jarFileSize = status.getLen();
-      } catch (FileNotFoundException fe) {
-        jarFileSize = -1;
-      }
-      // Here we check for five times the size of jarFileSize to accommodate for
-      // unjarring the jar file in the jars directory
-      Path localJarFile =
-        lDirAlloc.getLocalPathForWrite(
-            getJobJarFile(user, jobId.toString()), 5 * jarFileSize, fConf);
-
-      //Download job.jar
-      userFs.copyToLocalFile(jarFilePath, localJarFile);
-
-      localJobConf.setJar(localJarFile.toString());
-
-      // Also un-jar the job.jar files. We un-jar it so that classes inside
-      // sub-directories, for e.g., lib/, classes/ are available on class-path
-      RunJar.unJar(new File(localJarFile.toString()), new File(localJarFile
-          .getParent().toString()));
-    }
-  }
-
   private void launchTaskForJob(TaskInProgress tip, JobConf jobConf,
-      UserGroupInformation ugi) throws IOException {
+                                RunningJob rjob) throws IOException {
     synchronized (tip) {
       tip.setJobConf(jobConf);
-      tip.setUGI(ugi);
-      tip.launchTask();
+      tip.setUGI(rjob.ugi);
+      tip.launchTask(rjob);
     }
   }
     
-  public synchronized void shutdown() throws IOException {
+  public synchronized void shutdown() throws IOException, InterruptedException {
     shuttingDown = true;
     close();
     if (this.server != null) {
@@ -1156,8 +1145,9 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
    * any running tasks or threads, and cleanup disk space.  A new TaskTracker
    * within the same process space might be restarted, so everything must be
    * clean.
+   * @throws InterruptedException 
    */
-  public synchronized void close() throws IOException {
+  public synchronized void close() throws IOException, InterruptedException {
     //
     // Kill running tasks.  Do this in a 2nd vector, called 'tasksToClose',
     // because calling jobHasFinished() may result in an edit to 'tasks'.
@@ -1246,8 +1236,15 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
     // objects
     FileSystem local = FileSystem.getLocal(conf);
     this.localDirAllocator = new LocalDirAllocator("mapred.local.dir");
+    Class<? extends TaskController> taskControllerClass = 
+      conf.getClass("mapred.task.tracker.task-controller", 
+                     DefaultTaskController.class, TaskController.class);
+   taskController = 
+     (TaskController) ReflectionUtils.newInstance(taskControllerClass, conf);
+   taskController.setup(localDirAllocator);
+
     // create user log manager
-    setUserLogManager(new UserLogManager(conf));
+    setUserLogManager(new UserLogManager(conf, taskController));
     SecurityUtil.login(originalConf, TT_KEYTAB_FILE, TT_USER_NAME);
 
     initialize();
@@ -1687,70 +1684,6 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
     }
   }
 
-  /**
-   * Builds list of PathDeletionContext objects for the given paths
-   */
-  private static PathDeletionContext[] buildPathDeletionContexts(FileSystem fs,
-      Path[] paths) {
-    int i = 0;
-    PathDeletionContext[] contexts = new PathDeletionContext[paths.length];
-
-    for (Path p : paths) {
-      contexts[i++] = new PathDeletionContext(fs, p.toUri().getPath());
-    }
-    return contexts;
-  }
-
-  /**
-   * Builds list of {@link TaskControllerJobPathDeletionContext} objects for a 
-   * job each pointing to the job's jobLocalDir.
-   * @param fs    : FileSystem in which the dirs to be deleted
-   * @param paths : mapred-local-dirs
-   * @param id    : {@link JobID} of the job for which the local-dir needs to 
-   *                be cleaned up.
-   * @param user  : Job owner's username
-   * @param taskController : the task-controller to be used for deletion of
-   *                         jobLocalDir
-   */
-  static PathDeletionContext[] buildTaskControllerJobPathDeletionContexts(
-      FileSystem fs, Path[] paths, JobID id, String user,
-      TaskController taskController)
-      throws IOException {
-    int i = 0;
-    PathDeletionContext[] contexts =
-                          new TaskControllerPathDeletionContext[paths.length];
-
-    for (Path p : paths) {
-      contexts[i++] = new TaskControllerJobPathDeletionContext(fs, p, id, user,
-                                                               taskController);
-    }
-    return contexts;
-  } 
-  
-  /**
-   * Builds list of TaskControllerTaskPathDeletionContext objects for a task
-   * @param fs    : FileSystem in which the dirs to be deleted
-   * @param paths : mapred-local-dirs
-   * @param task  : the task whose taskDir or taskWorkDir is going to be deleted
-   * @param isWorkDir : the dir to be deleted is workDir or taskDir
-   * @param taskController : the task-controller to be used for deletion of
-   *                         taskDir or taskWorkDir
-   */
-  static PathDeletionContext[] buildTaskControllerTaskPathDeletionContexts(
-      FileSystem fs, Path[] paths, Task task, boolean isWorkDir,
-      TaskController taskController)
-      throws IOException {
-    int i = 0;
-    PathDeletionContext[] contexts =
-                          new TaskControllerPathDeletionContext[paths.length];
-
-    for (Path p : paths) {
-      contexts[i++] = new TaskControllerTaskPathDeletionContext(fs, p, task,
-                          isWorkDir, taskController);
-    }
-    return contexts;
-  }
-
   /**
    * The task tracker is done with this job, so we need to clean up.
    * @param action The action with the job
@@ -1778,7 +1711,7 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
         }
         // Delete the job directory for this  
         // task if the job is done/failed
-        if (!rjob.keepJobFiles) {
+        if (!rjob.keepJobFiles && rjob.localized) {
           removeJobFiles(rjob.jobConf.getUser(), rjob.getJobID());
         }
         // add job to user log manager
@@ -1812,12 +1745,21 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
    * @param rjob
    * @throws IOException
    */
-  void removeJobFiles(String user, JobID jobId)
-  throws IOException {
-    PathDeletionContext[] contexts = 
-      buildTaskControllerJobPathDeletionContexts(localFs, 
-          getLocalFiles(fConf, ""), jobId, user, taskController);
-    directoryCleanupThread.addToQueue(contexts);
+  void removeJobFiles(String user, JobID jobId) throws IOException {
+    String userDir = getUserDir(user);
+    String jobDir = getLocalJobDir(user, jobId.toString());
+    PathDeletionContext jobCleanup = 
+      new TaskController.DeletionContext(getTaskController(), false, user, 
+                                         jobDir.substring(userDir.length()));
+    directoryCleanupThread.addToQueue(jobCleanup);
+    
+    for (String str : localdirs) {
+      Path ttPrivateJobDir = FileSystem.getLocal(fConf).makeQualified(
+        new Path(str, TaskTracker.getPrivateDirForJob(user, jobId.toString())));
+      PathDeletionContext ttPrivateJobCleanup =
+        new CleanupQueue.PathDeletionContext(ttPrivateJobDir, fConf);
+      directoryCleanupThread.addToQueue(ttPrivateJobCleanup);
+    }
   }
 
   /**
@@ -2113,12 +2055,14 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
    * Start a new task.
    * All exceptions are handled locally, so that we don't mess up the
    * task tracker.
+   * @throws InterruptedException 
    */
-  void startNewTask(TaskInProgress tip) {
+  void startNewTask(TaskInProgress tip) throws InterruptedException {
     try {
       RunningJob rjob = localizeJob(tip);
+      tip.getTask().setJobFile(rjob.localizedJobConf.toString());
       // Localization is done. Neither rjob.jobConf nor rjob.ugi can be null
-      launchTaskForJob(tip, new JobConf(rjob.jobConf), rjob.ugi); 
+      launchTaskForJob(tip, new JobConf(rjob.jobConf), rjob); 
     } catch (Throwable e) {
       String msg = ("Error initializing " + tip.getTask().getTaskID() + 
                     ":\n" + StringUtils.stringifyException(e));
@@ -2128,8 +2072,9 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
         tip.kill(true);
         tip.cleanup(true);
       } catch (IOException ie2) {
-        LOG.info("Error cleaning up " + tip.getTask().getTaskID() + ":\n" +
-                 StringUtils.stringifyException(ie2));          
+        LOG.info("Error cleaning up " + tip.getTask().getTaskID(), ie2);
+      } catch (InterruptedException ie2) {
+        LOG.info("Error cleaning up " + tip.getTask().getTaskID(), ie2);
       }
         
       // Careful! 
@@ -2238,7 +2183,7 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
     private TaskRunner runner;
     volatile boolean done = false;
     volatile boolean wasKilled = false;
-    private JobConf defaultJobConf;
+    private JobConf ttConf;
     private JobConf localJobConf;
     private boolean keepFailedTaskFiles;
     private boolean alwaysKeepTaskFiles;
@@ -2270,7 +2215,7 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
       this.task = task;
       this.launcher = launcher;
       this.lastProgressReport = System.currentTimeMillis();
-      this.defaultJobConf = conf;
+      this.ttConf = conf;
       localJobConf = null;
       taskStatus = TaskStatus.createTaskStatus(task.isMapTask(), task.getTaskID(), 
                                                0.0f, 
@@ -2289,69 +2234,10 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
         
     void localizeTask(Task task) throws IOException{
 
-      FileSystem localFs = FileSystem.getLocal(fConf);
-
-      // create taskDirs on all the disks.
-      getLocalizer().initializeAttemptDirs(task.getUser(),
-          task.getJobID().toString(), task.getTaskID().toString(),
-          task.isTaskCleanupTask());
-
-      // create the working-directory of the task 
-      Path cwd =
-          lDirAlloc.getLocalPathForWrite(getTaskWorkDir(task.getUser(), task
-              .getJobID().toString(), task.getTaskID().toString(), task
-              .isTaskCleanupTask()), defaultJobConf);
-      if (!localFs.mkdirs(cwd)) {
-        throw new IOException("Mkdirs failed to create " 
-                    + cwd.toString());
-      }
-
-      localJobConf.set("mapred.local.dir",
-                       fConf.get("mapred.local.dir"));
-
-      if (fConf.get("slave.host.name") != null) {
-        localJobConf.set("slave.host.name",
-                         fConf.get("slave.host.name"));
-      }
-            
-      keepFailedTaskFiles = localJobConf.getKeepFailedTaskFiles();
-
       // Do the task-type specific localization
+//TODO: are these calls really required
       task.localizeConfiguration(localJobConf);
       
-      List<String[]> staticResolutions = NetUtils.getAllStaticResolutions();
-      if (staticResolutions != null && staticResolutions.size() > 0) {
-        StringBuffer str = new StringBuffer();
-
-        for (int i = 0; i < staticResolutions.size(); i++) {
-          String[] hostToResolved = staticResolutions.get(i);
-          str.append(hostToResolved[0]+"="+hostToResolved[1]);
-          if (i != staticResolutions.size() - 1) {
-            str.append(',');
-          }
-        }
-        localJobConf.set("hadoop.net.static.resolutions", str.toString());
-      }
-      if (task.isMapTask()) {
-        debugCommand = localJobConf.getMapDebugScript();
-      } else {
-        debugCommand = localJobConf.getReduceDebugScript();
-      }
-      String keepPattern = localJobConf.getKeepTaskFilesPattern();
-      if (keepPattern != null) {
-        alwaysKeepTaskFiles = 
-          Pattern.matches(keepPattern, task.getTaskID().toString());
-      } else {
-        alwaysKeepTaskFiles = false;
-      }
-      if (debugCommand != null || localJobConf.getProfileEnabled() ||
-          alwaysKeepTaskFiles || keepFailedTaskFiles) {
-        //disable jvm reuse
-        localJobConf.setNumTasksToExecutePerJvm(1);
-      }
-      if (isTaskMemoryManagerEnabled()) {
-        localJobConf.setBoolean("task.memory.mgmt.enabled", true);
-      }
       task.setConf(localJobConf);
     }
         
@@ -2374,6 +2260,18 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
       keepFailedTaskFiles = localJobConf.getKeepFailedTaskFiles();
       taskTimeout = localJobConf.getLong("mapred.task.timeout", 
                                          10 * 60 * 1000);
+      if (task.isMapTask()) {
+        debugCommand = localJobConf.getMapDebugScript();
+      } else {
+        debugCommand = localJobConf.getReduceDebugScript();
+      }
+      String keepPattern = localJobConf.getKeepTaskFilesPattern();
+      if (keepPattern != null) {
+        alwaysKeepTaskFiles = 
+          Pattern.matches(keepPattern, task.getTaskID().toString());
+      } else {
+        alwaysKeepTaskFiles = false;
+      }
     }
         
     public synchronized JobConf getJobConf() {
@@ -2394,7 +2292,7 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
     /**
      * Kick off the task execution
      */
-    public synchronized void launchTask() throws IOException {
+    public synchronized void launchTask(RunningJob rjob) throws IOException {
       if (this.taskStatus.getRunState() == TaskStatus.State.UNASSIGNED ||
           this.taskStatus.getRunState() == TaskStatus.State.FAILED_UNCLEAN ||
           this.taskStatus.getRunState() == TaskStatus.State.KILLED_UNCLEAN) {
@@ -2402,7 +2300,7 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
         if (this.taskStatus.getRunState() == TaskStatus.State.UNASSIGNED) {
           this.taskStatus.setRunState(TaskStatus.State.RUNNING);
         }
-        setTaskRunner(task.createRunner(TaskTracker.this, this));
+        setTaskRunner(task.createRunner(TaskTracker.this, this, rjob));
         this.runner.start();
         this.taskStatus.setStartTime(System.currentTimeMillis());
       } else {
@@ -2466,8 +2364,10 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
               "Groups=" + taskCounters.getGroupNames().size() + " Limit=" +
               Counters.MAX_GROUP_LIMIT);
           kill(true);
-        } catch(IOException ie) {
-          LOG.error("Error killing task " + task.getTaskID(), ie);
+        } catch (IOException e) {
+          LOG.error("Error killing task " + task.getTaskID(), e);
+        } catch (InterruptedException e) {
+          LOG.error("Error killing task " + task.getTaskID(), e);
         }
       }
       
@@ -2828,7 +2728,12 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
             getRunState() == TaskStatus.State.UNASSIGNED ||
             getRunState() == TaskStatus.State.COMMIT_PENDING ||
             isCleaningup()) {
-          kill(wasFailure);
+          try {
+            kill(wasFailure);
+          } catch (InterruptedException e) {
+            throw new IOException("Interrupted while killing " +
+                getTask().getTaskID(), e);
+          }
         }
       }
       
@@ -2839,8 +2744,10 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
     /**
      * Something went wrong and the task must be killed.
      * @param wasFailure was it a failure (versus a kill request)?
+     * @throws InterruptedException 
      */
-    public synchronized void kill(boolean wasFailure) throws IOException {
+    public synchronized void kill(boolean wasFailure
+                                  ) throws IOException, InterruptedException {
       if (taskStatus.getRunState() == TaskStatus.State.RUNNING ||
           taskStatus.getRunState() == TaskStatus.State.COMMIT_PENDING ||
           isCleaningup()) {
@@ -2942,7 +2849,7 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
           return;
         }
         try {
-          removeTaskFiles(needCleanup, taskId);
+          removeTaskFiles(needCleanup);
         } catch (Throwable ie) {
           LOG.info("Error cleaning up task runner: "
               + StringUtils.stringifyException(ie));
@@ -2954,47 +2861,27 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
      * Some or all of the files from this task are no longer required. Remove
      * them via CleanupQueue.
      * 
-     * @param needCleanup
+     * @param removeOutputs remove outputs as well as output
      * @param taskId
      * @throws IOException 
      */
-    void removeTaskFiles(boolean needCleanup, TaskAttemptID taskId)
-        throws IOException {
-      if (needCleanup) {
-        if (runner != null) {
-          // cleans up the output directory of the task (where map outputs
-          // and reduce inputs get stored)
-          runner.close();
-        }
-
-        if (localJobConf.getNumTasksToExecutePerJvm() == 1) {
-          // No jvm reuse, remove everything
-          PathDeletionContext[] contexts =
-            buildTaskControllerTaskPathDeletionContexts(localFs,
-                getLocalFiles(fConf, ""), task, false/* not workDir */,
-                taskController);
-          directoryCleanupThread.addToQueue(contexts);
+    void removeTaskFiles(boolean removeOutputs) throws IOException {
+      if (localJobConf.getNumTasksToExecutePerJvm() == 1) {
+        String user = ugi.getShortUserName();
+        int userDirLen = TaskTracker.getUserDir(user).length();
+        String jobId = task.getJobID().toString();
+        String taskId = task.getTaskID().toString();
+        boolean cleanup = task.isTaskCleanupTask();
+        String taskDir;
+        if (!removeOutputs) {
+          taskDir = TaskTracker.getTaskWorkDir(user, jobId, taskId, cleanup);
         } else {
-          // Jvm reuse. We don't delete the workdir since some other task
-          // (running in the same JVM) might be using the dir. The JVM
-          // running the tasks would clean the workdir per a task in the
-          // task process itself.
-          String localTaskDir =
-            getLocalTaskDir(task.getUser(), task.getJobID().toString(), taskId
-                .toString(), task.isTaskCleanupTask());
-          PathDeletionContext[] contexts = buildPathDeletionContexts(
-              localFs, getLocalFiles(defaultJobConf, localTaskDir +
-                         Path.SEPARATOR + TaskTracker.JOBFILE));
-          directoryCleanupThread.addToQueue(contexts);
-        }
-      } else {
-        if (localJobConf.getNumTasksToExecutePerJvm() == 1) {
-          PathDeletionContext[] contexts =
-            buildTaskControllerTaskPathDeletionContexts(localFs,
-              getLocalFiles(fConf, ""), task, true /* workDir */,
-              taskController);
-          directoryCleanupThread.addToQueue(contexts);
+          taskDir = TaskTracker.getLocalTaskDir(user, jobId, taskId, cleanup);
         }
+        PathDeletionContext item =
+          new TaskController.DeletionContext(taskController, false, user,
+                                             taskDir.substring(userDirLen));          
+        directoryCleanupThread.addToQueue(item);
       }
     }
         
@@ -3033,7 +2920,11 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
     if (rjob == null) { //kill the JVM since the job is dead
       LOG.info("Killing JVM " + jvmId + " since job " + jvmId.getJobId() +
                " is dead");
-      jvmManager.killJvm(jvmId);
+      try {
+        jvmManager.killJvm(jvmId);
+      } catch (InterruptedException e) {
+        LOG.warn("Failed to kill " + jvmId, e);
+      }
       return new JvmTask(null, true);
     }
     TaskInProgress tip = jvmManager.getTaskForJvm(jvmId);
@@ -3226,12 +3117,15 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
   static class RunningJob{
     private JobID jobid; 
     private JobConf jobConf;
+    private Path localizedJobConf;
     // keep this for later use
     volatile Set<TaskInProgress> tasks;
     boolean localized;
     boolean keepJobFiles;
     UserGroupInformation ugi;
     FetchStatus f;
+    TaskDistributedCacheManager distCacheMgr;
+    
     RunningJob(JobID jobid) {
       this.jobid = jobid;
       localized = false;
@@ -3850,7 +3744,7 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
       jobTokenSize = status.getLen();
       
       Path localJobTokenFile =
-          lDirAlloc.getLocalPathForWrite(getLocalJobTokenFile(user, 
+          lDirAlloc.getLocalPathForWrite(getPrivateDirJobTokenFile(user, 
               jobId.toString()), jobTokenSize, fConf);
     
       String localJobTokenFileStr = localJobTokenFile.toUri().getPath();
@@ -3936,4 +3830,13 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol,
     return map;
   }
   // End MXBean implemenation
+
+  @Override
+  public void 
+  updatePrivateDistributedCacheSizes(org.apache.hadoop.mapreduce.JobID jobId,
+                                     long[] sizes
+                                     ) throws IOException {
+    distributedCacheManager.setArchiveSizes(jobId, sizes);
+  }
+
 }

+ 9 - 0
src/mapred/org/apache/hadoop/mapred/TaskUmbilicalProtocol.java

@@ -157,4 +157,13 @@ public interface TaskUmbilicalProtocol extends VersionedProtocol {
                                                        TaskAttemptID id) 
   throws IOException;
 
+  /**
+   * The job initializer needs to report the sizes of the archive
+   * objects in the private distributed cache.
+   * @param jobId the job to update
+   * @param sizes the array of sizes that were computed
+   * @throws IOException
+   */
+  void updatePrivateDistributedCacheSizes(org.apache.hadoop.mapreduce.JobID jobId,
+                                          long[] sizes) throws IOException;
 }

+ 23 - 22
src/mapred/org/apache/hadoop/mapred/UserLogCleaner.java

@@ -31,6 +31,7 @@ import org.apache.commons.logging.LogFactory;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.CleanupQueue.PathDeletionContext;
 import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.JobID;
@@ -118,7 +119,7 @@ public class UserLogCleaner extends Thread {
   }
 
   public void deleteJobLogs(JobID jobid) throws IOException {
-    deleteLogPath(TaskLog.getJobDir(jobid).getAbsolutePath());
+    deleteLogPath(jobid.toString());
   }
 
   /**
@@ -134,26 +135,22 @@ public class UserLogCleaner extends Thread {
   public void clearOldUserLogs(Configuration conf) throws IOException {
     File userLogDir = TaskLog.getUserLogDir();
     if (userLogDir.exists()) {
-      String[] logDirs = userLogDir.list();
-      if (logDirs.length > 0) {
+      long now = clock.getTime();
+      for(String logDir: userLogDir.list()) {
         // add all the log dirs to taskLogsMnonitor.
-        long now = clock.getTime();
-        for (String logDir : logDirs) {
-          JobID jobid = null;
-          try {
-            jobid = JobID.forName(logDir);
-          } catch (IllegalArgumentException ie) {
-            // if the directory is not a jobid, delete it immediately
-            deleteLogPath(new File(userLogDir, logDir).getAbsolutePath());
-            continue;
-          }
-          // add the job log directory for deletion with default retain hours,
-          // if it is not already added
-          if (!completedJobs.containsKey(jobid)) {
-            JobCompletedEvent jce = new JobCompletedEvent(jobid, now,
-                getUserlogRetainHours(conf));
-            userLogManager.addLogEvent(jce);
-          }
+        JobID jobid = null;
+        try {
+          jobid = JobID.forName(logDir);
+        } catch (IllegalArgumentException ie) {
+          deleteLogPath(logDir);
+          continue;
+        }
+        // add the job log directory for deletion with default retain hours,
+        // if it is not already added
+        if (!completedJobs.containsKey(jobid)) {
+          JobCompletedEvent jce = 
+            new JobCompletedEvent(jobid, now,getUserlogRetainHours(conf));
+          userLogManager.addLogEvent(jce);
         }
       }
     }
@@ -208,7 +205,11 @@ public class UserLogCleaner extends Thread {
    */
   private void deleteLogPath(String logPath) throws IOException {
     LOG.info("Deleting user log path " + logPath);
-    PathDeletionContext context = new PathDeletionContext(localFs, logPath);
-    cleanupQueue.addToQueue(context);
+    String logRoot = TaskLog.getUserLogDir().toString();
+    String user = localFs.getFileStatus(new Path(logRoot, logPath)).getOwner();
+    TaskController controller = userLogManager.getTaskController();
+    PathDeletionContext item = 
+      new TaskController.DeletionContext(controller, true, user, logPath);
+    cleanupQueue.addToQueue(item);
   }
 }

+ 1 - 1
src/mapred/org/apache/hadoop/mapreduce/security/TokenCache.java

@@ -156,7 +156,7 @@ public class TokenCache {
    * @throws IOException
    */
   //@InterfaceAudience.Private
-  public static Credentials loadTokens(String jobTokenFile, JobConf conf) 
+  public static Credentials loadTokens(String jobTokenFile, Configuration conf) 
   throws IOException {
     Path localJobTokenFile = new Path ("file:///" + jobTokenFile);
     

+ 4 - 16
src/mapred/org/apache/hadoop/mapreduce/server/tasktracker/Localizer.java

@@ -31,7 +31,6 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.TaskController;
 import org.apache.hadoop.mapred.TaskLog;
 import org.apache.hadoop.mapred.TaskTracker;
-import org.apache.hadoop.mapred.TaskController.InitializationContext;
 import org.apache.hadoop.mapreduce.JobID;
 
 /**
@@ -44,19 +43,16 @@ public class Localizer {
 
   private FileSystem fs;
   private String[] localDirs;
-  private TaskController taskController;
 
   /**
    * Create a Localizer instance
    * 
    * @param fileSys
    * @param lDirs
-   * @param tc
    */
-  public Localizer(FileSystem fileSys, String[] lDirs, TaskController tc) {
+  public Localizer(FileSystem fileSys, String[] lDirs) {
     fs = fileSys;
     localDirs = lDirs;
-    taskController = tc;
   }
 
   /**
@@ -264,13 +260,6 @@ public class Localizer {
                 + user);
       }
 
-      // Now, run the task-controller specific code to initialize the
-      // user-directories.
-      InitializationContext context = new InitializationContext();
-      context.user = user;
-      context.workDir = null;
-      taskController.initializeUser(context);
-
       // Localization of the user is done
       localizedUser.set(true);
     }
@@ -283,7 +272,7 @@ public class Localizer {
    * <br>
    * Here, we set 700 permissions on the job directories created on all disks.
    * This we do so as to avoid any misuse by other users till the time
-   * {@link TaskController#initializeJob(JobInitializationContext)} is run at a
+   * {@link TaskController#initializeJob} is run at a
    * later time to set proper private permissions on the job directories. <br>
    * 
    * @param user
@@ -331,16 +320,15 @@ public class Localizer {
    * @param user
    * @param jobId
    * @param attemptId
-   * @param isCleanupAttempt
    * @throws IOException
    */
   public void initializeAttemptDirs(String user, String jobId,
-      String attemptId, boolean isCleanupAttempt)
+      String attemptId)
       throws IOException {
 
     boolean initStatus = false;
     String attemptDirPath =
-        TaskTracker.getLocalTaskDir(user, jobId, attemptId, isCleanupAttempt);
+        TaskTracker.getLocalTaskDir(user, jobId, attemptId);
 
     for (String localDir : localDirs) {
       Path localAttemptDir = new Path(localDir, attemptDirPath);

+ 40 - 5
src/mapred/org/apache/hadoop/mapreduce/server/tasktracker/userlogs/UserLogManager.java

@@ -24,9 +24,12 @@ import java.util.concurrent.LinkedBlockingQueue;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapred.DefaultTaskController;
+import org.apache.hadoop.mapred.TaskController;
 import org.apache.hadoop.mapred.TaskLogsTruncater;
 import org.apache.hadoop.mapred.TaskTracker;
 import org.apache.hadoop.mapred.UserLogCleaner;
+import org.apache.hadoop.util.ReflectionUtils;
 
 /**
  * This manages user logs on the {@link TaskTracker}.
@@ -37,6 +40,7 @@ public class UserLogManager {
     new LinkedBlockingQueue<UserLogEvent>();
   private TaskLogsTruncater taskLogsTruncater;
   private UserLogCleaner userLogCleaner;
+  private final TaskController taskController;
 
   private Thread monitorLogEvents = new Thread() {
     @Override
@@ -56,17 +60,49 @@ public class UserLogManager {
    * 
    * It should be explicitly started using {@link #start()} to start functioning
    * 
-   * @param conf
-   *          The {@link Configuration}
+   * @param conf The {@link Configuration}
+   * @param taskController The task controller to delete the log files
+   * 
+   * @throws IOException
+   */
+  public UserLogManager(Configuration conf,
+                        TaskController taskController) throws IOException {
+    this.taskController = taskController;
+    setFields(conf);
+  }
+  
+  /**
+   * Create the user log manager to manage user logs on {@link TaskTracker}.
+   * This constructor is there mainly for unit tests.
    * 
+   * @param conf The {@link Configuration}
+   *
    * @throws IOException
    */
   public UserLogManager(Configuration conf) throws IOException {
+    Class<? extends TaskController> taskControllerClass = 
+      conf.getClass("mapred.task.tracker.task-controller", 
+                     DefaultTaskController.class, TaskController.class);
+    TaskController taskController = 
+     (TaskController) ReflectionUtils.newInstance(taskControllerClass, conf);
+    this.taskController = taskController;
+    setFields(conf);
+  }
+  
+  private void setFields(Configuration conf) throws IOException {
     taskLogsTruncater = new TaskLogsTruncater(conf);
     userLogCleaner = new UserLogCleaner(this, conf);
     monitorLogEvents.setDaemon(true);
   }
 
+  /**
+   * Get the taskController for deleting logs.
+   * @return the TaskController
+   */
+  public TaskController getTaskController() {
+    return taskController;
+  }
+
   /**
    * Starts managing the logs
    */
@@ -101,13 +137,12 @@ public class UserLogManager {
    *          TT's conf
    * @throws IOException
    */
-  public void clearOldUserLogs(Configuration conf)
-      throws IOException {
+  public void clearOldUserLogs(Configuration conf) throws IOException {
     userLogCleaner.clearOldUserLogs(conf);
   }
 
   private void doJvmFinishedAction(JvmFinishedEvent event) {
-    taskLogsTruncater.truncateLogs(event.getJvmInfo());
+    // do nothing
   }
 
   private void doJobStartedAction(JobStartedEvent event) {

+ 103 - 61
src/test/org/apache/hadoop/filecache/TestTrackerDistributedCacheManager.java

@@ -34,6 +34,8 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.mapred.DefaultTaskController;
+import org.apache.hadoop.mapred.JobID;
+import org.apache.hadoop.mapred.JobLocalizer;
 import org.apache.hadoop.mapred.TaskController;
 import org.apache.hadoop.mapred.TaskTracker;
 import org.apache.hadoop.mapreduce.Job;
@@ -69,7 +71,9 @@ public class TestTrackerDistributedCacheManager extends TestCase {
   private static final int LOCAL_CACHE_SUBDIR = 2;
   protected Configuration conf;
   protected Path firstCacheFile;
+  protected Path firstCacheFilePublic;
   protected Path secondCacheFile;
+  protected Path secondCacheFilePublic;
   private FileSystem fs;
 
   protected LocalDirAllocator localDirAllocator = 
@@ -107,18 +111,22 @@ public class TestTrackerDistributedCacheManager extends TestCase {
         taskControllerClass, conf);
 
     // setup permissions for mapred local dir
-    taskController.setup();
+    taskController.setup(localDirAllocator);
 
     // Create the temporary cache files to be used in the tests.
     firstCacheFile = new Path(TEST_ROOT_DIR, "firstcachefile");
     secondCacheFile = new Path(TEST_ROOT_DIR, "secondcachefile");
+    firstCacheFilePublic = new Path(TEST_ROOT_DIR, "firstcachefileOne");
+    secondCacheFilePublic = new Path(TEST_ROOT_DIR, "secondcachefileOne");
+    createPublicTempFile(firstCacheFilePublic);
+    createPublicTempFile(secondCacheFilePublic);
     createPrivateTempFile(firstCacheFile);
     createPrivateTempFile(secondCacheFile);
   }
   
   protected void refreshConf(Configuration conf) throws IOException {
     taskController.setConf(conf);
-    taskController.setup();
+    taskController.setup(localDirAllocator);
   }
 
   /**
@@ -146,6 +154,7 @@ public class TestTrackerDistributedCacheManager extends TestCase {
     Configuration subConf = new Configuration(conf);
     String userName = getJobOwnerName();
     subConf.set("user.name", userName);
+    JobID jobid = new JobID("jt",1);
     DistributedCache.addCacheFile(firstCacheFile.toUri(), subConf);
     DistributedCache.addFileToClassPath(secondCacheFile, subConf, 
                                         FileSystem.get(subConf));
@@ -162,13 +171,16 @@ public class TestTrackerDistributedCacheManager extends TestCase {
     TrackerDistributedCacheManager manager = 
       new TrackerDistributedCacheManager(conf, taskController);
     TaskDistributedCacheManager handle =
-      manager.newTaskDistributedCacheManager(subConf);
+      manager.newTaskDistributedCacheManager(jobid, subConf);
     assertNull(null, DistributedCache.getLocalCacheFiles(subConf));
     File workDir = new File(new Path(TEST_ROOT_DIR, "workdir").toString());
-    handle.setup(localDirAllocator, workDir, TaskTracker
-        .getPrivateDistributedCacheDir(userName), 
-        TaskTracker.getPublicDistributedCacheDir());
-    // ****** End of imitating TaskRunner code
+    handle.setupCache(TaskTracker.getPublicDistributedCacheDir(), 
+        TaskTracker.getPrivateDistributedCacheDir(userName));
+    JobLocalizer.downloadPrivateCache(subConf);
+    // DOESN'T ACTUALLY HAPPEN IN THE TaskRunner (THIS IS A TODO)
+//    handle.setupPrivateCache(localDirAllocator, TaskTracker
+//        .getPrivateDistributedCacheDir(userName));
+//    // ****** End of imitating TaskRunner code
 
     Path[] localCacheFiles = DistributedCache.getLocalCacheFiles(subConf);
     assertNotNull(null, localCacheFiles);
@@ -201,14 +213,17 @@ public class TestTrackerDistributedCacheManager extends TestCase {
     }
 
     @Override
-    Path localizeCache(Configuration conf, URI cache, long confFileStamp,
-        CacheStatus cacheStatus, FileStatus fileStatus, boolean isArchive,
-        boolean isPublic) throws IOException {
-      if (cache.equals(firstCacheFile.toUri())) {
+    Path localizePublicCacheObject(Configuration conf, URI cache, 
+                                   long confFileStamp,
+                                   CacheStatus cacheStatus, 
+                                   FileStatus fileStatus, 
+                                   boolean isArchive) throws IOException {
+      if (cache.equals(firstCacheFilePublic.toUri())) {
         throw new IOException("fake fail");
       }
-      return super.localizeCache(conf, cache, confFileStamp, cacheStatus,
-          fileStatus, isArchive, isPublic);
+      return super.localizePublicCacheObject(conf, cache, confFileStamp, 
+                                             cacheStatus, fileStatus, 
+                                             isArchive);
     }
   }
 
@@ -234,10 +249,10 @@ public class TestTrackerDistributedCacheManager extends TestCase {
 
     // Task localizing for first job
     TaskDistributedCacheManager handle = manager
-        .newTaskDistributedCacheManager(conf1);
-    handle.setup(localDirAllocator, workDir, TaskTracker
-          .getPrivateDistributedCacheDir(userName), 
-          TaskTracker.getPublicDistributedCacheDir());
+        .newTaskDistributedCacheManager(new JobID("jt", 1), conf1);
+    handle.setupCache(TaskTracker.getPublicDistributedCacheDir(), 
+        TaskTracker.getPrivateDistributedCacheDir(userName));
+    JobLocalizer.downloadPrivateCache(conf1);
     handle.release();
     for (TaskDistributedCacheManager.CacheFile c : handle.getCacheFiles()) {
       assertEquals(0, manager.getReferenceCount(c.uri, conf1, c.timestamp, 
@@ -252,7 +267,7 @@ public class TestTrackerDistributedCacheManager extends TestCase {
     Configuration conf2 = job2.getConfiguration();
     conf2.set("user.name", userName);
     // add a file that would get failed to localize
-    DistributedCache.addCacheFile(firstCacheFile.toUri(), conf2);
+    DistributedCache.addCacheFile(firstCacheFilePublic.toUri(), conf2);
     // add a file that is already localized by different job
     DistributedCache.addCacheFile(secondCacheFile.toUri(), conf2);
     // add a file that is never localized
@@ -263,12 +278,12 @@ public class TestTrackerDistributedCacheManager extends TestCase {
 
     // Task localizing for second job
     // localization for the "firstCacheFile" will fail.
-    handle = manager.newTaskDistributedCacheManager(conf2);
+    handle = manager.newTaskDistributedCacheManager(new JobID("jt", 2), conf2);
     Throwable th = null;
     try {
-      handle.setup(localDirAllocator, workDir, TaskTracker
-          .getPrivateDistributedCacheDir(userName), 
-          TaskTracker.getPublicDistributedCacheDir());
+      handle.setupCache(TaskTracker.getPublicDistributedCacheDir(),
+          TaskTracker.getPrivateDistributedCacheDir(userName));
+      JobLocalizer.downloadPrivateCache(conf2);
     } catch (IOException e) {
       th = e;
       LOG.info("Exception during setup", e);
@@ -316,6 +331,26 @@ public class TestTrackerDistributedCacheManager extends TestCase {
     }
   }
 
+  private void appendBooleanArray(StringBuilder buffer, boolean[] data) {
+    if (data != null && data.length != 0) {
+      buffer.append(data[0]);
+      for(int i=1; i < data.length; i++) {
+        buffer.append(',');
+        buffer.append(data[i]);
+      }
+    }
+  }
+
+  private void appendLongArray(StringBuilder buffer, long[] data) {
+    if (data != null && data.length != 0) {
+      buffer.append(data[0]);
+      for(int i=1; i < data.length; i++) {
+        buffer.append(',');
+        buffer.append(data[i]);
+      }
+    }
+  }
+
   private void appendUriArray(StringBuilder buffer, URI[] data) {
     if (data != null && data.length != 0) {
       buffer.append(data[0]);
@@ -333,15 +368,15 @@ public class TestTrackerDistributedCacheManager extends TestCase {
     buf.append("\nArchives:");
     appendUriArray(buf, DistributedCache.getCacheArchives(conf1));
     buf.append("\nFile Visible:");
-    appendStringArray(buf, TrackerDistributedCacheManager.getFileVisibilities
+    appendBooleanArray(buf, TrackerDistributedCacheManager.getFileVisibilities
                       (conf1));
     buf.append("\nArchive Visible:");
-    appendStringArray(buf, TrackerDistributedCacheManager.getArchiveVisibilities
+    appendBooleanArray(buf, TrackerDistributedCacheManager.getArchiveVisibilities
                       (conf1));
     buf.append("\nFile timestamps:");
-    appendStringArray(buf, DistributedCache.getFileTimestamps(conf1));
+    appendLongArray(buf, DistributedCache.getFileTimestamps(conf1));
     buf.append("\nArchive timestamps:");
-    appendStringArray(buf, DistributedCache.getArchiveTimestamps(conf1));
+    appendLongArray(buf, DistributedCache.getArchiveTimestamps(conf1));
     LOG.info("state = " + buf.toString());
   }
   
@@ -367,10 +402,10 @@ public class TestTrackerDistributedCacheManager extends TestCase {
 
     // Task localizing for job
     TaskDistributedCacheManager handle = manager
-        .newTaskDistributedCacheManager(conf1);
-    handle.setup(localDirAllocator, workDir, TaskTracker
-          .getPrivateDistributedCacheDir(userName), 
-          TaskTracker.getPublicDistributedCacheDir());
+        .newTaskDistributedCacheManager(new JobID("jt", 1), conf1);
+    handle.setupCache(TaskTracker.getPublicDistributedCacheDir(),
+        TaskTracker.getPrivateDistributedCacheDir(userName));
+    JobLocalizer.downloadPrivateCache(conf1);
     TaskDistributedCacheManager.CacheFile c = handle.getCacheFiles().get(0);
     String distCacheDir;
     if (visibility) {
@@ -381,8 +416,7 @@ public class TestTrackerDistributedCacheManager extends TestCase {
     Path localizedPath =
       manager.getLocalCache(cacheFile.toUri(), conf1, distCacheDir,
           fs.getFileStatus(cacheFile), false,
-          c.timestamp, new Path(TEST_ROOT_DIR), false,
-          visibility);
+          c.timestamp, visibility);
     assertTrue("Cache file didn't get localized in the expected directory. " +
         "Expected localization to happen within " + 
         ROOT_MAPRED_LOCAL_DIR + "/" + distCacheDir +
@@ -494,20 +528,21 @@ public class TestTrackerDistributedCacheManager extends TestCase {
     conf2.set("user.name", userName);
 
     // We first test the size limit
-    Path firstLocalCache = manager.getLocalCache(firstCacheFile.toUri(), conf2, 
+    Path firstLocalCache = manager.getLocalCache(firstCacheFilePublic.toUri(), conf2, 
         TaskTracker.getPrivateDistributedCacheDir(userName),
-        fs.getFileStatus(firstCacheFile), false,
-        now, new Path(TEST_ROOT_DIR), false, false);
-    manager.releaseCache(firstCacheFile.toUri(), conf2, now, 
+        fs.getFileStatus(firstCacheFilePublic), false,
+        fs.getFileStatus(firstCacheFilePublic).getModificationTime(), true);
+    manager.releaseCache(firstCacheFilePublic.toUri(), conf2, 
+        fs.getFileStatus(firstCacheFilePublic).getModificationTime(), 
         TrackerDistributedCacheManager.getLocalizedCacheOwner(false));
     //in above code,localized a file of size 4K and then release the cache 
     // which will cause the cache be deleted when the limit goes out. 
     // The below code localize another cache which's designed to
     //sweep away the first cache.
-    Path secondLocalCache = manager.getLocalCache(secondCacheFile.toUri(), conf2, 
+    Path secondLocalCache = manager.getLocalCache(secondCacheFilePublic.toUri(), conf2, 
         TaskTracker.getPrivateDistributedCacheDir(userName),
-        fs.getFileStatus(secondCacheFile), false, 
-        System.currentTimeMillis(), new Path(TEST_ROOT_DIR), false, false);
+        fs.getFileStatus(secondCacheFilePublic), false, 
+        fs.getFileStatus(secondCacheFilePublic).getModificationTime(), true);
     assertFalse("DistributedCache failed deleting old" + 
         " cache when the cache store is full.",
         localfs.exists(firstLocalCache));
@@ -530,7 +565,6 @@ public class TestTrackerDistributedCacheManager extends TestCase {
     System.err.println("That directory ends up with "
                        + localfs.listStatus(firstCursor).length
                        + " subdirectories");
-
     Path cachesBase = firstCursor;
 
     assertFalse
@@ -545,21 +579,23 @@ public class TestTrackerDistributedCacheManager extends TestCase {
     Path fourthCacheFile = new Path(TEST_ROOT_DIR, "fourthcachefile");
     // Adding two more small files, so it triggers the number of sub directory
     // limit but does not trigger the file size limit.
-    createTempFile(thirdCacheFile, 1);
-    createTempFile(fourthCacheFile, 1);
+    createPublicTempFile(thirdCacheFile);
+    createPublicTempFile(fourthCacheFile);
     Path thirdLocalCache = manager.getLocalCache(thirdCacheFile.toUri(), conf2,
         TaskTracker.getPrivateDistributedCacheDir(userName),
         fs.getFileStatus(thirdCacheFile), false,
-        now, new Path(TEST_ROOT_DIR), false, false);
+        fs.getFileStatus(thirdCacheFile).getModificationTime(), 
+        true);
     // Release the third cache so that it can be deleted while sweeping
-    manager.releaseCache(thirdCacheFile.toUri(), conf2, now, 
+    manager.releaseCache(thirdCacheFile.toUri(), conf2, 
+        fs.getFileStatus(thirdCacheFile).getModificationTime(), 
         TrackerDistributedCacheManager.getLocalizedCacheOwner(false));
     // Getting the fourth cache will make the number of sub directories becomes
     // 3 which is greater than 2. So the released cache will be deleted.
     Path fourthLocalCache = manager.getLocalCache(fourthCacheFile.toUri(), conf2, 
         TaskTracker.getPrivateDistributedCacheDir(userName),
         fs.getFileStatus(fourthCacheFile), false, 
-        System.currentTimeMillis(), new Path(TEST_ROOT_DIR), false, false);
+        fs.getFileStatus(fourthCacheFile).getModificationTime(), true);
     assertFalse("DistributedCache failed deleting old" + 
         " cache when the cache exceeds the number of sub directories limit.",
         localfs.exists(thirdLocalCache));
@@ -590,7 +626,7 @@ public class TestTrackerDistributedCacheManager extends TestCase {
         TaskTracker.getPrivateDistributedCacheDir(userName),
         fs.getFileStatus(firstCacheFile), false,
         System.currentTimeMillis(),
-        new Path(TEST_ROOT_DIR), false, false);
+        false);
     assertNotNull("DistributedCache cached file on non-default filesystem.",
         result);
   }
@@ -625,6 +661,8 @@ public class TestTrackerDistributedCacheManager extends TestCase {
   protected void tearDown() throws IOException {
     new File(firstCacheFile.toString()).delete();
     new File(secondCacheFile.toString()).delete();
+    new File(firstCacheFilePublic.toString()).delete();
+    new File(secondCacheFilePublic.toString()).delete();
     FileUtil.fullyDelete(new File(TEST_ROOT_DIR));
   }
 
@@ -677,12 +715,14 @@ public class TestTrackerDistributedCacheManager extends TestCase {
 
     // ****** Imitate TaskRunner code.
     TaskDistributedCacheManager handle =
-      manager.newTaskDistributedCacheManager(subConf);
+      manager.newTaskDistributedCacheManager(new JobID("jt", 1), subConf);
     assertNull(null, DistributedCache.getLocalCacheFiles(subConf));
     File workDir = new File(new Path(TEST_ROOT_DIR, "workdir").toString());
-    handle.setup(localDirAllocator, workDir, TaskTracker
-        .getPrivateDistributedCacheDir(userName), 
-        TaskTracker.getPublicDistributedCacheDir());
+    handle.setupCache(TaskTracker.getPublicDistributedCacheDir(), 
+        TaskTracker.getPrivateDistributedCacheDir(userName));
+    //TODO this doesn't really happen in the TaskRunner
+//    handle.setupPrivateCache(localDirAllocator, TaskTracker
+//        .getPrivateDistributedCacheDir(userName));
     // ****** End of imitating TaskRunner code
 
     Path[] localCacheFiles = DistributedCache.getLocalCacheFiles(subConf);
@@ -702,8 +742,10 @@ public class TestTrackerDistributedCacheManager extends TestCase {
     // running a task of the same job
     Throwable th = null;
     try {
-      handle.setup(localDirAllocator, workDir, TaskTracker
-          .getPrivateDistributedCacheDir(userName), TaskTracker.getPublicDistributedCacheDir());
+      handle.setupCache(TaskTracker.getPublicDistributedCacheDir(),
+          TaskTracker.getPrivateDistributedCacheDir(userName));
+//      handle.setupPrivateCache(localDirAllocator, TaskTracker
+//          .getPrivateDistributedCacheDir(userName));
     } catch (IOException ie) {
       th = ie;
     }
@@ -721,9 +763,9 @@ public class TestTrackerDistributedCacheManager extends TestCase {
     TrackerDistributedCacheManager.determineCacheVisibilities(subConf2);
     
     handle =
-      manager.newTaskDistributedCacheManager(subConf2);
-    handle.setup(localDirAllocator, workDir, TaskTracker
-        .getPrivateDistributedCacheDir(userName), TaskTracker.getPublicDistributedCacheDir());
+      manager.newTaskDistributedCacheManager(new JobID("jt", 2), subConf2);
+    handle.setupCache(TaskTracker.getPublicDistributedCacheDir(), 
+        TaskTracker.getPrivateDistributedCacheDir(userName));
     Path[] localCacheFiles2 = DistributedCache.getLocalCacheFiles(subConf2);
     assertNotNull(null, localCacheFiles2);
     assertEquals(1, localCacheFiles2.length);
@@ -758,20 +800,20 @@ public class TestTrackerDistributedCacheManager extends TestCase {
     long now = System.currentTimeMillis();
 
     Path[] localCache = new Path[2];
-    localCache[0] = manager.getLocalCache(firstCacheFile.toUri(), conf, 
+    localCache[0] = manager.getLocalCache(firstCacheFilePublic.toUri(), conf, 
         TaskTracker.getPrivateDistributedCacheDir(userName),
-        fs.getFileStatus(firstCacheFile), false,
-        now, new Path(TEST_ROOT_DIR), false, false);
+        fs.getFileStatus(firstCacheFilePublic), false,
+        fs.getFileStatus(firstCacheFilePublic).getModificationTime(), true);
     FsPermission myPermission = new FsPermission((short)0600);
     Path myFile = new Path(localCache[0].getParent(), "myfile.txt");
     if (FileSystem.create(localfs, myFile, myPermission) == null) {
       throw new IOException("Could not create " + myFile);
     }
     try {
-      localCache[1] = manager.getLocalCache(secondCacheFile.toUri(), conf, 
+      localCache[1] = manager.getLocalCache(secondCacheFilePublic.toUri(), conf, 
           TaskTracker.getPrivateDistributedCacheDir(userName),
-          fs.getFileStatus(secondCacheFile), false, 
-          System.currentTimeMillis(), new Path(TEST_ROOT_DIR), false, false);
+          fs.getFileStatus(secondCacheFilePublic), false, 
+          fs.getFileStatus(secondCacheFilePublic).getModificationTime(), true);
       FileStatus stat = localfs.getFileStatus(myFile);
       assertTrue(stat.getPermission().equals(myPermission));
       // validate permissions of localized files.

+ 3 - 3
src/test/org/apache/hadoop/mapred/ClusterWithLinuxTaskController.java

@@ -31,6 +31,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.filecache.TestTrackerDistributedCacheManager;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -74,17 +75,16 @@ public class ClusterWithLinuxTaskController extends TestCase {
         + "/task-controller";
 
     @Override
-    public void setup() throws IOException {
+    public void setup(LocalDirAllocator allocator) throws IOException {
       // get the current ugi and set the task controller group owner
       getConf().set(TT_GROUP, taskTrackerSpecialGroup);
 
       // write configuration file
       configurationFile = createTaskControllerConf(System
           .getProperty(TASKCONTROLLER_PATH), getConf());
-      super.setup();
+      super.setup(allocator);
     }
 
-    @Override
     protected String getTaskControllerExecutablePath() {
       return taskControllerExePath;
     }

+ 5 - 3
src/test/org/apache/hadoop/mapred/TestJobTrackerSafeMode.java

@@ -74,7 +74,7 @@ public class TestJobTrackerSafeMode extends TestCase {
    *   - check that after all the trackers are recovered, scheduling is opened 
    */
   private void testSafeMode(MiniDFSCluster dfs, MiniMRCluster mr) 
-  throws IOException {
+  throws IOException, InterruptedException {
     FileSystem fileSys = dfs.getFileSystem();
     JobConf jobConf = mr.createJobConf();
     String mapSignalFile = UtilsForTests.getMapSignalFile(shareDir);
@@ -218,8 +218,9 @@ public class TestJobTrackerSafeMode extends TestCase {
 
   /**
    * Test {@link JobTracker}'s safe mode.
+   * @throws InterruptedException 
    */
-  public void testJobTrackerSafeMode() throws IOException {
+  public void testJobTrackerSafeMode() throws IOException,InterruptedException{
     String namenode = null;
     MiniDFSCluster dfs = null;
     MiniMRCluster mr = null;
@@ -278,7 +279,8 @@ public class TestJobTrackerSafeMode extends TestCase {
     }
   }
 
-  public static void main(String[] args) throws IOException {
+  public static void main(String[] args) 
+  throws IOException, InterruptedException {
     new TestJobTrackerSafeMode().testJobTrackerSafeMode();
   }
 }

+ 46 - 9
src/test/org/apache/hadoop/mapred/TestJvmManager.java

@@ -24,12 +24,19 @@ import java.io.FileReader;
 import java.io.IOException;
 import java.util.Vector;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.filecache.TrackerDistributedCacheManager;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.JvmManager.JvmManagerForType;
 import org.apache.hadoop.mapred.JvmManager.JvmManagerForType.JvmRunner;
+import org.apache.hadoop.mapred.TaskTracker.RunningJob;
 import org.apache.hadoop.mapred.TaskTracker.TaskInProgress;
+import org.apache.hadoop.mapred.UtilsForTests.InlineCleanupQueue;
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.server.tasktracker.userlogs.UserLogManager;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.After;
 import static org.junit.Assert.*;
 import org.junit.Before;
@@ -43,6 +50,8 @@ public class TestJvmManager {
   private TaskTracker tt;
   private JvmManager jvmManager;
   private JobConf ttConf;
+  private boolean threadCaughtException = false;
+  private String user;
 
   @Before
   public void setUp() {
@@ -55,16 +64,24 @@ public class TestJvmManager {
   }
 
   public TestJvmManager() throws Exception {
+    user = UserGroupInformation.getCurrentUser().getShortUserName();
     tt = new TaskTracker();
     ttConf = new JobConf();
     ttConf.setLong("mapred.tasktracker.tasks.sleeptime-before-sigkill", 2000);
     tt.setConf(ttConf);
     tt.setMaxMapSlots(MAP_SLOTS);
     tt.setMaxReduceSlots(REDUCE_SLOTS);
-    tt.setTaskController(new DefaultTaskController());
+    TaskController dtc;
+    tt.setTaskController((dtc = new DefaultTaskController()));
+    Configuration conf = new Configuration();
+    dtc.setConf(conf);
+    LocalDirAllocator ldirAlloc = new LocalDirAllocator("mapred.local.dir");
+    tt.getTaskController().setup(ldirAlloc);
+    JobID jobId = new JobID("test", 0);
     jvmManager = new JvmManager(tt);
     tt.setJvmManagerInstance(jvmManager);
     tt.setUserLogManager(new UserLogManager(ttConf));
+    tt.setCleanupThread(new InlineCleanupQueue());
   }
 
   // write a shell script to execute the command.
@@ -100,15 +117,21 @@ public class TestJvmManager {
     JobConf taskConf = new JobConf(ttConf);
     TaskAttemptID attemptID = new TaskAttemptID("test", 0, true, 0, 0);
     Task task = new MapTask(null, attemptID, 0, null, MAP_SLOTS);
+    task.setUser(user);
     task.setConf(taskConf);
     TaskInProgress tip = tt.new TaskInProgress(task, taskConf);
     File pidFile = new File(TEST_DIR, "pid");
-    final TaskRunner taskRunner = task.createRunner(tt, tip);
+    RunningJob rjob = new RunningJob(attemptID.getJobID());
+    TaskController taskController = new DefaultTaskController();
+    taskController.setConf(ttConf);
+    rjob.distCacheMgr = 
+      new TrackerDistributedCacheManager(ttConf, taskController).
+      newTaskDistributedCacheManager(attemptID.getJobID(), taskConf);
+    final TaskRunner taskRunner = task.createRunner(tt, tip, rjob);
     // launch a jvm which sleeps for 60 seconds
     final Vector<String> vargs = new Vector<String>(2);
     vargs.add(writeScript("SLEEP", "sleep 60\n", pidFile).getAbsolutePath());
     final File workDir = new File(TEST_DIR, "work");
-    workDir.mkdir();
     final File stdout = new File(TEST_DIR, "stdout");
     final File stderr = new File(TEST_DIR, "stderr");
 
@@ -117,10 +140,13 @@ public class TestJvmManager {
       public void run() {
         try {
           taskRunner.launchJvmAndWait(null, vargs, stdout, stderr, 100,
-              workDir, null);
+              workDir);
         } catch (InterruptedException e) {
           e.printStackTrace();
           return;
+        } catch (IOException e) {
+          e.printStackTrace();
+          setThreadCaughtException();
         }
       }
     };
@@ -148,7 +174,14 @@ public class TestJvmManager {
     final JvmRunner jvmRunner = mapJvmManager.jvmIdToRunner.get(jvmid);
     Thread killer = new Thread() {
       public void run() {
-        jvmRunner.kill();
+        try {
+          jvmRunner.kill();
+        } catch (IOException e) {
+          e.printStackTrace();
+          setThreadCaughtException();
+        } catch (InterruptedException e) {
+          e.printStackTrace();
+        }
       }
     };
     killer.start();
@@ -164,21 +197,25 @@ public class TestJvmManager {
     // launch another jvm and see it finishes properly
     attemptID = new TaskAttemptID("test", 0, true, 0, 1);
     task = new MapTask(null, attemptID, 0, null, MAP_SLOTS);
+    task.setUser(user);
     task.setConf(taskConf);
     tip = tt.new TaskInProgress(task, taskConf);
-    TaskRunner taskRunner2 = task.createRunner(tt, tip);
+    TaskRunner taskRunner2 = task.createRunner(tt, tip, rjob);
     // build dummy vargs to call ls
     Vector<String> vargs2 = new Vector<String>(1);
     vargs2.add(writeScript("LS", "ls", pidFile).getAbsolutePath());
     File workDir2 = new File(TEST_DIR, "work2");
-    workDir.mkdir();
     File stdout2 = new File(TEST_DIR, "stdout2");
     File stderr2 = new File(TEST_DIR, "stderr2");
-    taskRunner2.launchJvmAndWait(null, vargs2, stdout2, stderr2, 100, workDir2,
-        null);
+    taskRunner2.launchJvmAndWait(null, vargs2, stdout2, stderr2, 100, workDir2);
     // join all the threads
     killer.join();
     jvmRunner.join();
     launcher.join();
+    assertFalse("Thread caught unexpected IOException", 
+                 threadCaughtException);
+  }
+  private void setThreadCaughtException() {
+    threadCaughtException = true;
   }
 }

+ 3 - 7
src/test/org/apache/hadoop/mapred/TestLinuxTaskController.java

@@ -22,6 +22,7 @@ import java.io.IOException;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.LocalDirAllocator;
 
 import junit.framework.TestCase;
 
@@ -42,11 +43,6 @@ public class TestLinuxTaskController extends TestCase {
 
   public static class MyLinuxTaskController extends LinuxTaskController {
     String taskControllerExePath = taskControllerPath + "/task-controller";
-
-    @Override
-    protected String getTaskControllerExecutablePath() {
-      return taskControllerExePath;
-    }
   }
 
   private void validateTaskControllerSetup(TaskController controller,
@@ -55,7 +51,7 @@ public class TestLinuxTaskController extends TestCase {
       // task controller setup should fail validating permissions.
       Throwable th = null;
       try {
-        controller.setup();
+        controller.setup(new LocalDirAllocator("mapred.local.dir"));
       } catch (IOException ie) {
         th = ie;
       }
@@ -64,7 +60,7 @@ public class TestLinuxTaskController extends TestCase {
           + INVALID_TASKCONTROLLER_PERMISSIONS, th.getMessage().contains(
           "with exit code " + INVALID_TASKCONTROLLER_PERMISSIONS));
     } else {
-      controller.setup();
+      controller.setup(new LocalDirAllocator("mapred.local.dir"));
     }
 
   }

+ 0 - 1
src/test/org/apache/hadoop/mapred/TestMiniMRWithDFS.java

@@ -156,7 +156,6 @@ public class TestMiniMRWithDFS extends TestCase {
           .isDirectory());
       LOG.info("Verifying contents of mapred.local.dir "
           + localDir.getAbsolutePath());
-
       // Verify contents(user-dir) of tracker-sub-dir
       File trackerSubDir = new File(localDir, TaskTracker.SUBDIR);
       if (trackerSubDir.isDirectory()) {

+ 288 - 0
src/test/org/apache/hadoop/mapred/TestTaskCommit.java

@@ -0,0 +1,288 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapred;
+
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.BytesWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.SortedRanges.Range;
+import org.apache.hadoop.mapreduce.TaskType;
+
+public class TestTaskCommit extends HadoopTestCase {
+  Path rootDir = 
+    new Path(System.getProperty("test.build.data",  "/tmp"), "test");
+
+  static class CommitterWithCommitFail extends FileOutputCommitter {
+    public void commitTask(TaskAttemptContext context) throws IOException {
+      Path taskOutputPath = getTempTaskOutputPath(context);
+      TaskAttemptID attemptId = context.getTaskAttemptID();
+      JobConf job = context.getJobConf();
+      if (taskOutputPath != null) {
+        FileSystem fs = taskOutputPath.getFileSystem(job);
+        if (fs.exists(taskOutputPath)) {
+          throw new IOException();
+        }
+      }
+    }
+  }
+
+  /**
+   * Special Committer that does not cleanup temporary files in
+   * abortTask
+   * 
+   * The framework's FileOutputCommitter cleans up any temporary
+   * files left behind in abortTask. We want the test case to
+   * find these files and hence short-circuit abortTask.
+   */
+  static class CommitterWithoutCleanup extends FileOutputCommitter {
+    @Override
+    public void abortTask(TaskAttemptContext context) throws IOException {
+      // does nothing
+    }
+  }
+  
+  /**
+   * Special committer that always requires commit.
+   */
+  static class CommitterThatAlwaysRequiresCommit extends FileOutputCommitter {
+    @Override
+    public boolean needsTaskCommit(TaskAttemptContext context) 
+      throws IOException {
+      return true;
+    }
+  }
+
+  public TestTaskCommit() throws IOException {
+    super(LOCAL_MR, LOCAL_FS, 1, 1);
+  }
+
+  @Override
+  public void tearDown() throws Exception {
+    super.tearDown();
+    FileUtil.fullyDelete(new File(rootDir.toString()));
+  }
+    
+  public void testCommitFail() throws IOException {
+    Path rootDir = 
+      new Path(System.getProperty("test.build.data",  "/tmp"), "test");
+    final Path inDir = new Path(rootDir, "input");
+    final Path outDir = new Path(rootDir, "output");
+    JobConf jobConf = createJobConf();
+    jobConf.setMaxMapAttempts(1);
+    jobConf.setOutputCommitter(CommitterWithCommitFail.class);
+    RunningJob rJob = UtilsForTests.runJob(jobConf, inDir, outDir, 1, 0);
+    rJob.waitForCompletion();
+    assertEquals(JobStatus.FAILED, rJob.getJobState());
+  }
+  
+  private class MyUmbilical implements TaskUmbilicalProtocol {
+    boolean taskDone = false;
+
+    @Override
+    public boolean canCommit(TaskAttemptID taskid) throws IOException {
+      return false;
+    }
+
+    @Override
+    public void commitPending(TaskAttemptID taskId, TaskStatus taskStatus)
+        throws IOException, InterruptedException {
+      fail("Task should not go to commit-pending");
+    }
+
+    @Override
+    public void done(TaskAttemptID taskid) throws IOException {
+      taskDone = true;
+    }
+
+    @Override
+    public void fatalError(TaskAttemptID taskId, String message)
+        throws IOException { }
+
+    @Override
+    public void fsError(TaskAttemptID taskId, String message)
+        throws IOException { }
+
+    @Override
+    public MapTaskCompletionEventsUpdate getMapCompletionEvents(JobID jobId,
+        int fromIndex, int maxLocs, TaskAttemptID id) throws IOException {
+      return null;
+    }
+
+    @Override
+    public JvmTask getTask(JvmContext context) throws IOException {
+      return null;
+    }
+
+    @Override
+    public boolean ping(TaskAttemptID taskid) throws IOException {
+      return true;
+    }
+
+    @Override
+    public void reportDiagnosticInfo(TaskAttemptID taskid, String trace)
+        throws IOException {
+    }
+
+    @Override
+    public void reportNextRecordRange(TaskAttemptID taskid, Range range)
+        throws IOException {
+    }
+
+    @Override
+    public void shuffleError(TaskAttemptID taskId, String message)
+        throws IOException {
+    }
+
+    @Override
+    public boolean statusUpdate(TaskAttemptID taskId, TaskStatus taskStatus)
+        throws IOException, InterruptedException {
+      return true;
+    }
+
+    @Override
+    public long getProtocolVersion(String protocol, long clientVersion)
+        throws IOException {
+      return 0;
+    }
+
+    @Override
+    public void 
+    updatePrivateDistributedCacheSizes(org.apache.hadoop.mapreduce.JobID jobId,
+                                       long[] sizes) throws IOException {
+      // NOTHING
+    }
+  }
+  
+  /**
+   * A test that mimics a failed task to ensure that it does
+   * not get into the COMMIT_PENDING state, by using a fake
+   * UmbilicalProtocol's implementation that fails if the commit.
+   * protocol is played.
+   * 
+   * The test mocks the various steps in a failed task's 
+   * life-cycle using a special OutputCommitter and UmbilicalProtocol
+   * implementation.
+   * 
+   * @throws Exception
+   */
+  public void testTaskCleanupDoesNotCommit() throws Exception {
+    // Mimic a job with a special committer that does not cleanup
+    // files when a task fails.
+    JobConf job = new JobConf();
+    job.setOutputCommitter(CommitterWithoutCleanup.class);
+    Path outDir = new Path(rootDir, "output"); 
+    FileOutputFormat.setOutputPath(job, outDir);
+
+    // Mimic job setup
+    String dummyAttemptID = "attempt_200707121733_0001_m_000000_0";
+    TaskAttemptID attemptID = TaskAttemptID.forName(dummyAttemptID);
+    OutputCommitter committer = new CommitterWithoutCleanup();
+    JobContext jContext = new JobContext(job, attemptID.getJobID());
+    committer.setupJob(jContext);
+    
+
+    // Mimic a map task
+    dummyAttemptID = "attempt_200707121733_0001_m_000001_0";
+    attemptID = TaskAttemptID.forName(dummyAttemptID);
+    Task task = new MapTask(new Path(rootDir, "job.xml").toString(), attemptID,
+        0, null, 1);
+    task.setConf(job);
+    task.localizeConfiguration(job);
+    task.initialize(job, attemptID.getJobID(), Reporter.NULL, false);
+    
+    // Mimic the map task writing some output.
+    String file = "test.txt";
+    FileSystem localFs = FileSystem.getLocal(job);
+    TextOutputFormat<Text, Text> theOutputFormat 
+      = new TextOutputFormat<Text, Text>();
+    RecordWriter<Text, Text> theRecordWriter = 
+      theOutputFormat.getRecordWriter(localFs,
+        job, file, Reporter.NULL);
+    theRecordWriter.write(new Text("key"), new Text("value"));
+    theRecordWriter.close(Reporter.NULL);
+
+    // Mimic a task failure; setting up the task for cleanup simulates
+    // the abort protocol to be played.
+    // Without checks in the framework, this will fail
+    // as the committer will cause a COMMIT to happen for
+    // the cleanup task.
+    task.setTaskCleanupTask();
+    MyUmbilical umbilical = new MyUmbilical();
+    task.run(job, umbilical);
+    assertTrue("Task did not succeed", umbilical.taskDone);
+  }
+
+  public void testCommitRequiredForMapTask() throws Exception {
+    Task testTask = createDummyTask(TaskType.MAP);
+    assertTrue("MapTask should need commit", testTask.isCommitRequired());
+  }
+
+  public void testCommitRequiredForReduceTask() throws Exception {
+    Task testTask = createDummyTask(TaskType.REDUCE);
+    assertTrue("ReduceTask should need commit", testTask.isCommitRequired());
+  }
+  
+  public void testCommitNotRequiredForJobSetup() throws Exception {
+    Task testTask = createDummyTask(TaskType.MAP);
+    testTask.setJobSetupTask();
+    assertFalse("Job setup task should not need commit", 
+        testTask.isCommitRequired());
+  }
+  
+  public void testCommitNotRequiredForJobCleanup() throws Exception {
+    Task testTask = createDummyTask(TaskType.MAP);
+    testTask.setJobCleanupTask();
+    assertFalse("Job cleanup task should not need commit", 
+        testTask.isCommitRequired());
+  }
+
+  public void testCommitNotRequiredForTaskCleanup() throws Exception {
+    Task testTask = createDummyTask(TaskType.REDUCE);
+    testTask.setTaskCleanupTask();
+    assertFalse("Task cleanup task should not need commit", 
+        testTask.isCommitRequired());
+  }
+
+  private Task createDummyTask(TaskType type) throws IOException, ClassNotFoundException,
+  InterruptedException {
+    JobConf conf = new JobConf();
+    conf.setOutputCommitter(CommitterThatAlwaysRequiresCommit.class);
+    Path outDir = new Path(rootDir, "output"); 
+    FileOutputFormat.setOutputPath(conf, outDir);
+    JobID jobId = JobID.forName("job_201002121132_0001");
+    Task testTask;
+    if (type == TaskType.MAP) {
+      testTask = new MapTask();
+    } else {
+      testTask = new ReduceTask();
+    }
+    testTask.setConf(conf);
+    testTask.initialize(conf, jobId, Reporter.NULL, false);
+    return testTask;
+  }
+
+  public static void main(String[] argv) throws Exception {
+    TestTaskCommit td = new TestTaskCommit();
+    td.testCommitFail();
+  }
+}

+ 13 - 4
src/test/org/apache/hadoop/mapred/TestTaskEnvironment.java

@@ -26,11 +26,15 @@ import java.util.Vector;
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.filecache.TrackerDistributedCacheManager;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.mapred.JvmManager.JvmManagerForType;
 import org.apache.hadoop.mapred.JvmManager.JvmManagerForType.JvmRunner;
 import org.apache.hadoop.mapred.JvmManager.JvmEnv;
+import org.apache.hadoop.mapred.TaskTracker.RunningJob;
 import org.apache.hadoop.mapred.TaskTracker.TaskInProgress;
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.server.tasktracker.userlogs.UserLogManager;
@@ -98,7 +102,14 @@ public class TestTaskEnvironment {
     Task task = new MapTask(null, attemptID, 0, null, MAP_SLOTS);
     task.setConf(taskConf);
     TaskInProgress tip = tt.new TaskInProgress(task, taskConf);
-    final TaskRunner taskRunner = task.createRunner(tt, tip);
+    RunningJob rjob = new RunningJob(attemptID.getJobID());
+    TaskController taskController = new DefaultTaskController();
+    taskController.setConf(ttConf);
+    rjob.distCacheMgr = 
+      new TrackerDistributedCacheManager(ttConf, taskController).
+      newTaskDistributedCacheManager(attemptID.getJobID(), taskConf);
+      
+    final TaskRunner taskRunner = task.createRunner(tt, tip, rjob);
     String errorInfo = "Child error";
     String mapredChildEnv = taskRunner.getChildEnv(taskConf);
     taskRunner.updateUserLoginEnv(errorInfo, user, taskConf, env);
@@ -111,9 +122,7 @@ public class TestTaskEnvironment {
     workDir.mkdir();
     final File stdout = new File(TEST_DIR, "stdout");
     final File stderr = new File(TEST_DIR, "stderr");
-    JvmEnv jenv = jvmManager.constructJvmEnv(null, vargs,
-      stdout,stderr, 100, workDir, env, taskConf);
-    Map<String, String> jvmenvmap = jenv.env;
+    Map<String, String> jvmenvmap = env;
     String javaOpts = taskRunner.getChildJavaOpts(ttConf, 
       JobConf.MAPRED_MAP_TASK_JAVA_OPTS);
 

+ 15 - 19
src/test/org/apache/hadoop/mapred/TestTaskTrackerLocalization.java

@@ -27,18 +27,18 @@ import java.util.jar.JarOutputStream;
 import java.util.zip.ZipEntry;
 
 import junit.framework.TestCase;
+import org.junit.Ignore;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.filecache.TrackerDistributedCacheManager;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.JvmManager.JvmEnv;
 import org.apache.hadoop.mapred.QueueManager.QueueACL;
-import org.apache.hadoop.mapred.TaskController.JobInitializationContext;
-import org.apache.hadoop.mapred.TaskController.TaskControllerContext;
 import org.apache.hadoop.mapred.TaskTracker.RunningJob;
 import org.apache.hadoop.mapred.TaskTracker.TaskInProgress;
 import org.apache.hadoop.mapred.UtilsForTests.InlineCleanupQueue;
@@ -54,6 +54,7 @@ import org.apache.hadoop.util.Shell;
  * TaskTracker.
  * 
  */
+@Ignore // test relies on deprecated functionality/lifecycle
 public class TestTaskTrackerLocalization extends TestCase {
 
   private File TEST_ROOT_DIR;
@@ -186,10 +187,9 @@ public class TestTaskTrackerLocalization extends TestCase {
     // setup task controller
     taskController = getTaskController();
     taskController.setConf(trackerFConf);
-    taskController.setup();
+    taskController.setup(lDirAlloc);
     tracker.setTaskController(taskController);
-    tracker.setLocalizer(new Localizer(tracker.getLocalFileSystem(), localDirs,
-                                       taskController));
+    tracker.setLocalizer(new Localizer(tracker.getLocalFileSystem(),localDirs));
   }
 
   protected TaskController getTaskController() {
@@ -617,7 +617,7 @@ public class TestTaskTrackerLocalization extends TestCase {
     InlineCleanupQueue cleanupQueue = new InlineCleanupQueue();
     tracker.setCleanupThread(cleanupQueue);
 
-    tip.removeTaskFiles(needCleanup, taskId);
+    tip.removeTaskFiles(needCleanup);
 
     if (jvmReuse) {
       // work dir should still exist and cleanup queue should be empty
@@ -708,13 +708,20 @@ public class TestTaskTrackerLocalization extends TestCase {
         + " is not created in any of the configured dirs!!",
         attemptWorkDir != null);
 
-    TaskRunner runner = task.createRunner(tracker, tip);
+    RunningJob rjob = new RunningJob(jobId);
+    TaskController taskController = new DefaultTaskController();
+    taskController.setConf(trackerFConf);
+    rjob.distCacheMgr = 
+      new TrackerDistributedCacheManager(trackerFConf, taskController).
+      newTaskDistributedCacheManager(jobId, trackerFConf);
+      
+    TaskRunner runner = task.createRunner(tracker, tip, rjob);
     tip.setTaskRunner(runner);
 
     // /////// Few more methods being tested
     runner.setupChildTaskConfiguration(lDirAlloc);
     TaskRunner.createChildTmpDir(new File(attemptWorkDir.toUri().getPath()),
-        localizedJobConf);
+        localizedJobConf, true);
     attemptLogFiles = runner.prepareLogFiles(task.getTaskID(),
         task.isTaskCleanupTask());
 
@@ -731,17 +738,6 @@ public class TestTaskTrackerLocalization extends TestCase {
     localizedTaskConf = new JobConf(localTaskFile);
     TaskRunner.setupChildMapredLocalDirs(task, localizedTaskConf);
     // ///////
-
-    // Initialize task via TaskController
-    TaskControllerContext taskContext =
-        new TaskController.TaskControllerContext();
-    taskContext.env =
-        new JvmEnv(null, null, null, null, -1, new File(localizedJobConf
-            .get(TaskTracker.JOB_LOCAL_DIR)), null, localizedJobConf);
-    taskContext.task = task;
-    // /////////// The method being tested
-    taskController.initializeTask(taskContext);
-    // ///////////
   }
 
   /**

+ 2 - 1
src/test/org/apache/hadoop/mapred/TestTrackerDistributedCacheManagerWithLinuxTaskController.java

@@ -25,6 +25,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.LocalDirAllocator;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.mapred.ClusterWithLinuxTaskController.MyLinuxTaskController;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -64,7 +65,7 @@ public class TestTrackerDistributedCacheManagerWithLinuxTaskController extends
     String execPath = path + "/task-controller";
     ((MyLinuxTaskController)taskController).setTaskControllerExe(execPath);
     taskController.setConf(conf);
-    taskController.setup();
+    taskController.setup(new LocalDirAllocator("mapred.local.dir"));
   }
 
   @Override

+ 6 - 3
src/test/org/apache/hadoop/mapred/TestUserLogCleanup.java

@@ -27,6 +27,7 @@ import org.apache.hadoop.mapred.UtilsForTests.FakeClock;
 import org.apache.hadoop.mapreduce.JobContext;
 import org.apache.hadoop.mapreduce.server.tasktracker.Localizer;
 import org.apache.hadoop.mapreduce.server.tasktracker.userlogs.*;
+import org.apache.hadoop.security.UserGroupInformation;
 
 import static org.junit.Assert.*;
 
@@ -59,10 +60,13 @@ public class TestUserLogCleanup {
   }
 
   private File localizeJob(JobID jobid) throws IOException {
+    String user = UserGroupInformation.getCurrentUser().getShortUserName();
+    new JobLocalizer(tt.getJobConf(), user, 
+                     jobid.toString()).initializeJobLogDir();
     File jobUserlog = TaskLog.getJobDir(jobid);
     JobConf conf = new JobConf();
     // localize job log directory
-    tt.initializeJobLogDir(jobid, conf);
+    tt.saveLogDir(jobid, conf);
     assertTrue(jobUserlog + " directory is not created.", jobUserlog.exists());
     return jobUserlog;
   }
@@ -78,8 +82,7 @@ public class TestUserLogCleanup {
     tt = new TaskTracker();
     tt.setConf(new JobConf(conf));
     localizer = new Localizer(FileSystem.get(conf), conf
-        .getStrings(JobConf.MAPRED_LOCAL_DIR_PROPERTY),
-        new DefaultTaskController());
+        .getStrings(JobConf.MAPRED_LOCAL_DIR_PROPERTY));
     tt.setLocalizer(localizer);
     userLogManager = new UtilsForTests.InLineUserLogManager(conf);
     userLogCleaner = userLogManager.getUserLogCleaner();

+ 14 - 1
src/test/org/apache/hadoop/mapred/UtilsForTests.java

@@ -47,11 +47,13 @@ import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.WritableComparable;
 import org.apache.hadoop.io.SequenceFile.CompressionType;
 import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.CleanupQueue.PathDeletionContext;
 import org.apache.hadoop.mapred.SortValidator.RecordStatsChecker.NonSplitableSequenceFileInputFormat;
 import org.apache.hadoop.mapred.lib.IdentityMapper;
 import org.apache.hadoop.mapred.lib.IdentityReducer;
 import org.apache.hadoop.mapreduce.server.tasktracker.userlogs.UserLogEvent;
 import org.apache.hadoop.mapreduce.server.tasktracker.userlogs.UserLogManager;
+import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 
 /** 
@@ -439,7 +441,7 @@ public class UtilsForTests {
    * asynchronously.
    */
   public static class InlineCleanupQueue extends CleanupQueue {
-    List<String> stalePaths = new ArrayList<String>();
+    List<Path> stalePaths = new ArrayList<Path>();
 
     public InlineCleanupQueue() {
       // do nothing
@@ -462,6 +464,17 @@ public class UtilsForTests {
         }
       }
     }
+    static boolean deletePath(PathDeletionContext context) throws IOException {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Trying to delete " + context.fullPath);
+      }
+//      FileSystem fs = context.fullPath.getFileSystem(context.conf);
+//      if (fs.exists(context.fullPath)) {
+//        return fs.delete(context.fullPath, true);
+//      }
+      context.deletePath();
+      return true;
+    }
   }
 
   static String getTaskSignalParameter(boolean isMap) {

+ 7 - 3
src/test/org/apache/hadoop/util/TestProcfsBasedProcessTree.java

@@ -31,6 +31,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.util.ProcessTree.Signal;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.Shell.ExitCodeException;
 import org.apache.hadoop.mapred.UtilsForTests;
@@ -147,8 +148,7 @@ public class TestProcfsBasedProcessTree extends TestCase {
     String pid = getRogueTaskPID();
     LOG.info("Root process pid: " + pid);
     ProcfsBasedProcessTree p = new ProcfsBasedProcessTree(pid,
-        ProcessTree.isSetsidAvailable,
-        ProcessTree.DEFAULT_SLEEPTIME_BEFORE_SIGKILL);
+        ProcessTree.isSetsidAvailable);
     p = p.getProcessTree(); // initialize
     LOG.info("ProcessTree: " + p.toString());
     File leaf = new File(lowestDescendant);
@@ -168,7 +168,11 @@ public class TestProcfsBasedProcessTree extends TestCase {
     String processTreeDump = p.getProcessTreeDump();
 
     // destroy the map task and all its subprocesses
-    p.destroy(true/*in the background*/);
+    if (ProcessTree.isSetsidAvailable) {
+      ProcessTree.killProcessGroup(pid, Signal.KILL);
+    } else {
+      ProcessTree.killProcess(pid, Signal.KILL);
+    }
     if(ProcessTree.isSetsidAvailable) {// whole processtree should be gone
       assertEquals(false, p.isAnyProcessInTreeAlive());
     }