浏览代码

commit 67e4039329625e1af13bcdad5407039e45759207
Author: Konstantin Boudnik <cos@goodenter-lm.local>
Date: Mon May 3 18:58:34 2010 -0700

HADOOP-6332 from https://issues.apache.org/jira/secure/attachment/12443539/6332-phase2.patch

+++ b/YAHOO-CHANGES.txt
+ HADOOP-6332. Large-scale Automated Test Framework (sharad, Sreekanth
+ Ramakrishnan, at all via cos)
+


git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.20-security-patches@1077437 13f79535-47bb-0310-9956-ffa450edef68

Owen O'Malley 14 年之前
父节点
当前提交
f74d8491a5
共有 50 个文件被更改,包括 9506 次插入232 次删除
  1. 8 4
      build.xml
  2. 43 16
      src/test/aop/build/aop.xml
  3. 35 0
      src/test/org/apache/hadoop/mapred/UtilsForTests.java
  4. 63 0
      src/test/system/aop/org/apache/hadoop/hdfs/HDFSPolicyProviderAspect.aj
  5. 67 0
      src/test/system/aop/org/apache/hadoop/hdfs/server/datanode/DataNodeAspect.aj
  6. 77 0
      src/test/system/aop/org/apache/hadoop/hdfs/server/namenode/NameNodeAspect.aj
  7. 8 0
      src/test/system/aop/org/apache/hadoop/mapred/JobTrackerAspect.aj
  8. 58 0
      src/test/system/aop/org/apache/hadoop/mapred/MapReducePolicyProviderAspect.aj
  9. 3 2
      src/test/system/aop/org/apache/hadoop/mapred/TaskAspect.aj
  10. 40 4
      src/test/system/aop/org/apache/hadoop/mapred/TaskTrackerAspect.aj
  11. 24 6
      src/test/system/aop/org/apache/hadoop/test/system/DaemonProtocolAspect.aj
  12. 41 0
      src/test/system/c++/runAs/Makefile.in
  13. 5104 0
      src/test/system/c++/runAs/configure
  14. 65 0
      src/test/system/c++/runAs/configure.ac
  15. 59 0
      src/test/system/c++/runAs/main.c
  16. 111 0
      src/test/system/c++/runAs/runAs.c
  17. 59 0
      src/test/system/c++/runAs/runAs.h.in
  18. 52 0
      src/test/system/conf/hadoop-policy-system-test.xml
  19. 128 0
      src/test/system/conf/system-test.xml
  20. 69 0
      src/test/system/java/org/apache/hadoop/hdfs/TestHL040.java
  21. 82 0
      src/test/system/java/org/apache/hadoop/hdfs/test/system/DNClient.java
  22. 36 0
      src/test/system/java/org/apache/hadoop/hdfs/test/system/DNProtocol.java
  23. 149 0
      src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSCluster.java
  24. 43 0
      src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSDaemonClient.java
  25. 71 0
      src/test/system/java/org/apache/hadoop/hdfs/test/system/NNClient.java
  26. 36 0
      src/test/system/java/org/apache/hadoop/hdfs/test/system/NNProtocol.java
  27. 18 5
      src/test/system/java/org/apache/hadoop/mapred/TTTaskInfoImpl.java
  28. 153 14
      src/test/system/java/org/apache/hadoop/mapred/TestCluster.java
  29. 15 4
      src/test/system/java/org/apache/hadoop/mapred/TestControlledJob.java
  30. 342 0
      src/test/system/java/org/apache/hadoop/mapred/TestDistributedCacheModifiedFile.java
  31. 281 0
      src/test/system/java/org/apache/hadoop/mapred/TestDistributedCachePrivateFile.java
  32. 300 0
      src/test/system/java/org/apache/hadoop/mapred/TestDistributedCacheUnModifiedFile.java
  33. 216 0
      src/test/system/java/org/apache/hadoop/mapred/TestFileOwner.java
  34. 185 0
      src/test/system/java/org/apache/hadoop/mapred/TestJobKill.java
  35. 145 0
      src/test/system/java/org/apache/hadoop/mapred/TestPushConfig.java
  36. 625 0
      src/test/system/java/org/apache/hadoop/mapred/TestTaskKilling.java
  37. 9 26
      src/test/system/java/org/apache/hadoop/mapred/TestTaskOwner.java
  38. 17 5
      src/test/system/java/org/apache/hadoop/mapreduce/test/system/JTProtocol.java
  39. 33 19
      src/test/system/java/org/apache/hadoop/mapreduce/test/system/MRCluster.java
  40. 28 8
      src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTProtocol.java
  41. 8 1
      src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTTaskInfo.java
  42. 37 30
      src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonClient.java
  43. 31 7
      src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonCluster.java
  44. 25 15
      src/test/system/java/org/apache/hadoop/test/system/DaemonProtocol.java
  45. 31 0
      src/test/system/java/org/apache/hadoop/test/system/process/ClusterProcessManager.java
  46. 190 63
      src/test/system/java/org/apache/hadoop/test/system/process/HadoopDaemonRemoteCluster.java
  47. 96 0
      src/test/system/java/org/apache/hadoop/test/system/process/MultiUserHadoopDaemonRemoteCluster.java
  48. 23 3
      src/test/system/java/org/apache/hadoop/test/system/process/RemoteProcess.java
  49. 48 0
      src/test/system/scripts/pushConfig.sh
  50. 119 0
      src/test/testjar/JobKillCommitter.java

+ 8 - 4
build.xml

@@ -864,8 +864,6 @@
     <attribute name="classpath" />
     <attribute name="test.dir" />
     <attribute name="fileset.dir" />
-    <attribute name="hadoop.home" default="" />
-    <attribute name="hadoop.conf.dir" default="" />
     <attribute name="hadoop.conf.dir.deployed" default="" />
     <attribute name="test.krb5.conf" default="" />
     <attribute name="test.krb5.conf.filename" default="" />
@@ -904,8 +902,14 @@
                      value="@{test.krb5.conf.filename}"/>
         <sysproperty key="hadoop.policy.file" value="hadoop-policy.xml" />
         <sysproperty key="java.library.path"
-          value="${build.native}/lib:${lib.dir}/native/${build.platform}"/>
-        <sysproperty key="install.c++.examples" value="${install.c++.examples}"/>
+             value="${build.native}/lib:${lib.dir}/native/${build.platform}"/>
+        <sysproperty key="install.c++.examples"
+                     value="${install.c++.examples}" />
+        <sysproperty key="testjar"
+                     value="@{test.dir}/testjar" />
+        <!-- System properties that are specifically set for system tests -->
+        <sysproperty key="test.system.hdrc.deployed.hadoopconfdir"
+                     value="@{hadoop.conf.dir.deployed}" />
         <!-- set io.compression.codec.lzo.class in the child jvm only if it is set -->
         <syspropertyset dynamic="no">
           <propertyref name="io.compression.codec.lzo.class"/>

+ 43 - 16
src/test/aop/build/aop.xml

@@ -88,7 +88,6 @@
   <!-- Classpath for running system tests -->
   <path id="test.system.classpath">
         <pathelement location="${hadoop.conf.dir.deployed}" />
-        <pathelement location="${hadoop.conf.dir}" />
         <pathelement location="${system-test-build-dir}/test/extraconf" />
         <pathelement location="${system-test-build-dir}/test/classes" />
         <pathelement location="${system-test-build-dir}/classes" />
@@ -114,16 +113,11 @@
   <!-- ================ -->
   <!-- run system tests -->
   <!-- ================ -->
-  <target name="test-system" depends="-test-system-deployed, -test-system-local"
+  <target name="test-system" depends="ivy-retrieve-common"
     description="Run system tests">
-  </target>
-
-  <target name="-test-system-local"
-    depends="ivy-retrieve-common, prepare-test-system" 
-    unless="hadoop.conf.dir.deployed">
-    <macro-jar-examples
-      build.dir="${system-test-build-dir}"
-      basedir="${system-test-build-dir}/examples">
+    <subant buildpath="build.xml" target="jar-test-system"/>
+    <macro-jar-examples build.dir="${system-test-build-dir}"
+                        basedir="${system-test-build-dir}/examples">
     </macro-jar-examples>
     <macro-test-runner test.file="${test.all.tests.file}"
                        classpath="test.system.classpath"
@@ -149,12 +143,6 @@
     </macro-test-runner>
   </target>
 
-  <target name="prepare-test-system" depends="jar-test-system">
-    <subant buildpath="build.xml" target="inject-system-faults">
-      <property name="build.dir" value="${system-test-build-dir}" />
-    </subant>
-  </target>
-
   <target name="injectfaults"
           description="Instrument classes with faults and other AOP advices">
     <mkdir dir="${build-fi.dir}"/>
@@ -259,4 +247,43 @@
   </macrodef>
 
   <!--End of Fault Injection (FI) related session-->
+
+  <!-- Start of cluster controller binary target -->
+  <property name="runAs.src" 
+    value ="${test.src.dir}/system/c++/runAs"/>
+  <property name="runAs.build.dir" 
+    value="${system-test-build-dir}/c++-build"/>
+  <property name="runAs.configure.script" 
+    value="${runAs.build.dir}/configure"/>
+  <target name="init-runAs-build">
+    <condition property="runAs.parameters.passed">
+      <not>
+        <equals arg1="${run-as.hadoop.home.dir}" 
+          arg2="$${run-as.hadoop.home.dir}"/>
+      </not>
+    </condition>
+    <fail unless="runAs.parameters.passed" 
+          message="Required parameters run-as.hadoop.home.dir not passed to the build"/>
+    <mkdir dir="${runAs.build.dir}"/>
+    <copy todir="${runAs.build.dir}" overwrite="true">
+      <fileset dir="${runAs.src}" includes="**/*"/>
+    </copy>
+    <chmod perm="+x" file="${runAs.configure.script}">
+    </chmod>
+  </target>
+
+  <target name="configure-runAs" 
+    depends="init-runAs-build">
+    <exec executable="${runAs.configure.script}" 
+      dir="${runAs.build.dir}" failonerror="true">
+      <arg value="--with-home=${run-as.hadoop.home.dir}"/>
+    </exec>
+  </target>
+  <target name="run-as" depends="configure-runAs">
+    <exec executable="${make.cmd}" dir="${runAs.build.dir}" 
+        searchpath="yes" failonerror="yes">
+     <arg value="all" />
+    </exec>
+  </target>
+  <!-- End of cluster controller binary target -->
 </project>

+ 35 - 0
src/test/org/apache/hadoop/mapred/UtilsForTests.java

@@ -34,6 +34,7 @@ import org.apache.commons.logging.Log;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -815,5 +816,39 @@ public class UtilsForTests {
       throw new RuntimeException("Could not start jt", e);
     }
   }
+
+  /**
+   * This creates a file in the dfs
+   * @param dfs FileSystem Local File System where file needs to be picked
+   * @param URIPATH Path dfs path where file needs to be copied
+   * @param permission FsPermission File permission
+   * @return returns the DataOutputStream
+   */
+  public static DataOutputStream
+      createTmpFileDFS(FileSystem dfs, Path URIPATH,
+      FsPermission permission, String input) throws Exception {
+    //Creating the path with the file
+    DataOutputStream file =
+      FileSystem.create(dfs, URIPATH, permission);
+    file.writeBytes(input);
+    file.close();
+    return file;
+  }
+
+  /**
+   * This formats the long tasktracker name to just the FQDN
+   * @param taskTrackerLong String The long format of the tasktracker string
+   * @return String The FQDN of the tasktracker
+   * @throws Exception
+   */
+  public static String getFQDNofTT (String taskTrackerLong) throws Exception {
+    //Getting the exact FQDN of the tasktracker from the tasktracker string.
+    String[] firstSplit = taskTrackerLong.split("_");
+    String tmpOutput = firstSplit[1];
+    String[] secondSplit = tmpOutput.split(":");
+    String tmpTaskTracker = secondSplit[0];
+    return tmpTaskTracker;
+  }
+
 }
 

+ 63 - 0
src/test/system/aop/org/apache/hadoop/hdfs/HDFSPolicyProviderAspect.aj

@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.util.ArrayList;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.test.system.DaemonProtocol;
+import org.apache.hadoop.hdfs.test.system.DNProtocol;
+import org.apache.hadoop.hdfs.test.system.NNProtocol;
+import org.apache.hadoop.security.authorize.Service;
+import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
+
+/**
+ * This aspect adds two HDFS Herriot specific protocols tp the list of 'authorized'
+ * Herriot protocols.
+ * Protocol descriptors i.e. 'security.nn.protocol.acl' have to be added to
+ * <code>hadoop-policy.xml</code> if present
+ */
+public privileged aspect HDFSPolicyProviderAspect {
+  private static final Log LOG = LogFactory
+      .getLog(HDFSPolicyProviderAspect.class);
+
+  ArrayList<Service> herriotHDFSServices = null;
+
+  pointcut updateHDFSServices() :
+    execution (public Service[] HDFSPolicyProvider.getServices());
+
+  Service[] around() : updateHDFSServices () {
+    herriotHDFSServices = new ArrayList<Service>();
+    for (Service s : HDFSPolicyProvider.hdfsServices) {
+      LOG.debug("Copying configured protocol to "
+          + s.getProtocol().getCanonicalName());
+      herriotHDFSServices.add(s);
+    }
+    herriotHDFSServices.add(new Service("security.daemon.protocol.acl",
+        DaemonProtocol.class));
+    herriotHDFSServices.add(new Service("security.nn.protocol.acl",
+        NNProtocol.class));
+    herriotHDFSServices.add(new Service("security.dn.protocol.acl",
+        DNProtocol.class));
+    final Service[] retArray = herriotHDFSServices
+        .toArray(new Service[herriotHDFSServices.size()]);
+    LOG.debug("Number of configured protocols to return: " + retArray.length);
+    return retArray;
+  }
+}

+ 67 - 0
src/test/system/aop/org/apache/hadoop/hdfs/server/datanode/DataNodeAspect.aj

@@ -0,0 +1,67 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.datanode;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.AbstractList;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.test.system.DNProtocol;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.system.DaemonProtocol;
+
+public privileged aspect DataNodeAspect {
+  declare parents : DataNode implements DNProtocol;
+
+  public Configuration DataNode.getDaemonConf() {
+    return super.getConf();
+  }
+
+  pointcut dnConstructorPointcut(Configuration conf, AbstractList<File> dirs) :
+    call(DataNode.new(Configuration, AbstractList<File>))
+    && args(conf, dirs);
+
+  after(Configuration conf, AbstractList<File> dirs) returning (DataNode datanode):
+    dnConstructorPointcut(conf, dirs) {
+    try {
+      UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+      datanode.setUser(ugi.getShortUserName());
+    } catch (IOException e) {
+      datanode.LOG.warn("Unable to get the user information for the " +
+          "Jobtracker");
+    }
+    datanode.setReady(true);
+  }
+
+  pointcut getVersionAspect(String protocol, long clientVersion) :
+    execution(public long DataNode.getProtocolVersion(String ,
+      long) throws IOException) && args(protocol, clientVersion);
+
+  long around(String protocol, long clientVersion) :
+    getVersionAspect(protocol, clientVersion) {
+    if(protocol.equals(DaemonProtocol.class.getName())) {
+      return DaemonProtocol.versionID;
+    } else if(protocol.equals(DNProtocol.class.getName())) {
+      return DNProtocol.versionID;
+    } else {
+      return proceed(protocol, clientVersion);
+    }
+  }
+}

+ 77 - 0
src/test/system/aop/org/apache/hadoop/hdfs/server/namenode/NameNodeAspect.aj

@@ -0,0 +1,77 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.test.system.NNProtocol;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.system.DaemonProtocol;
+
+public privileged aspect NameNodeAspect {
+  declare parents : NameNode implements NNProtocol;
+
+  // Namename doesn't store a copy of its configuration
+  // because it can be changed through the life cycle of the object
+  // So, the an exposed reference needs to be added and updated after
+  // new NameNode(Configuration conf) is complete
+  Configuration NameNode.configRef = null;
+
+  // Method simply assign a reference to the NameNode configuration object
+  void NameNode.setRef (Configuration conf) {
+    if (configRef == null)
+      configRef = conf;
+  }
+
+  public Configuration NameNode.getDaemonConf() {
+    return configRef;
+  }
+
+  pointcut nnConstructorPointcut(Configuration conf) :
+    call(NameNode.new(Configuration)) && args(conf);
+
+  after(Configuration conf) returning (NameNode namenode):
+    nnConstructorPointcut(conf) {
+    try {
+      UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+      namenode.setUser(ugi.getShortUserName());
+    } catch (IOException e) {
+      namenode.LOG.warn("Unable to get the user information for the " +
+          "Jobtracker");
+    }
+    namenode.setRef(conf);
+    namenode.setReady(true);
+  }
+
+  pointcut getVersionAspect(String protocol, long clientVersion) :
+    execution(public long NameNode.getProtocolVersion(String ,
+      long) throws IOException) && args(protocol, clientVersion);
+
+  long around(String protocol, long clientVersion) :
+    getVersionAspect(protocol, clientVersion) {
+    if(protocol.equals(DaemonProtocol.class.getName())) {
+      return DaemonProtocol.versionID;
+    } else if(protocol.equals(NNProtocol.class.getName())) {
+      return NNProtocol.versionID;
+    } else {
+      return proceed(protocol, clientVersion);
+    }
+  }
+}

+ 8 - 0
src/test/system/aop/org/apache/hadoop/mapred/JobTrackerAspect.aj

@@ -33,6 +33,7 @@ import org.apache.hadoop.mapreduce.test.system.JTProtocol;
 import org.apache.hadoop.mapreduce.test.system.JobInfo;
 import org.apache.hadoop.mapreduce.test.system.TTInfo;
 import org.apache.hadoop.mapreduce.test.system.TaskInfo;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.system.DaemonProtocol;
 
 /**
@@ -195,6 +196,13 @@ public privileged aspect JobTrackerAspect {
   after(JobConf conf, String jobtrackerIndentifier) 
     returning (JobTracker tracker): jtConstructorPointCut(conf, 
         jobtrackerIndentifier) {
+    try {
+      UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+      tracker.setUser(ugi.getShortUserName());
+    } catch (IOException e) {
+      tracker.LOG.warn("Unable to get the user information for the " +
+      		"Jobtracker");
+    }
     tracker.setReady(true);
   }
   

+ 58 - 0
src/test/system/aop/org/apache/hadoop/mapred/MapReducePolicyProviderAspect.aj

@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapred;
+
+import java.util.ArrayList;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.mapreduce.test.system.TTProtocol;
+import org.apache.hadoop.security.authorize.Service;
+import org.apache.hadoop.test.system.DaemonProtocol;
+
+/**
+ * This aspect adds two MR specific Herriot protocols tp the list of
+ * 'authorized' Herriot protocols. Protocol descriptors i.e.
+ * 'security.tt.protocol.acl' have to be added to <code>hadoop-policy.xml</code>
+ * if present
+ */
+public privileged aspect MapReducePolicyProviderAspect {
+  private static final Log LOG = LogFactory
+      .getLog(MapReducePolicyProviderAspect.class);
+  ArrayList<Service> herriotMRServices = null;
+
+  pointcut updateMRServices() :
+    execution (public Service[] MapReducePolicyProvider.getServices());
+
+  Service[] around() : updateMRServices () {
+    herriotMRServices = new ArrayList<Service>();
+    for (Service s : MapReducePolicyProvider.mapReduceServices) {
+      LOG.debug("Copying configured protocol to "
+          + s.getProtocol().getCanonicalName());
+      herriotMRServices.add(s);
+    }
+    herriotMRServices.add(new Service("security.daemon.protocol.acl",
+        DaemonProtocol.class));
+    herriotMRServices.add(new Service("security.tt.protocol.acl",
+        TTProtocol.class));
+    final Service[] retArray = herriotMRServices
+        .toArray(new Service[herriotMRServices.size()]);
+    LOG.debug("Number of configured protocols to return: " + retArray.length);
+    return retArray;
+  }
+}

+ 3 - 2
src/test/system/aop/org/apache/hadoop/mapred/TaskAspect.aj

@@ -29,6 +29,7 @@ import org.apache.hadoop.mapred.Task.TaskReporter;
 import org.apache.hadoop.mapreduce.test.system.FinishTaskControlAction;
 import org.apache.hadoop.test.system.ControlAction;
 import org.apache.hadoop.test.system.DaemonProtocol;
+import org.apache.hadoop.mapreduce.test.system.TTProtocol;
 
 public privileged aspect TaskAspect {
 
@@ -106,8 +107,8 @@ public privileged aspect TaskAspect {
   after(Class k, long version, InetSocketAddress addr, Configuration conf) 
     throws IOException : rpcInterceptor(k, version, addr, conf) {
     daemonProxy = 
-      (DaemonProtocol) RPC.getProxy(
-          DaemonProtocol.class, DaemonProtocol.versionID, addr, conf);
+      (TTProtocol) RPC.getProxy(
+          TTProtocol.class, TTProtocol.versionID, addr, conf);
   }
   
 }

+ 40 - 4
src/test/system/aop/org/apache/hadoop/mapred/TaskTrackerAspect.aj

@@ -26,9 +26,12 @@ import org.apache.hadoop.mapreduce.test.system.TTProtocol;
 import org.apache.hadoop.mapreduce.test.system.TTTaskInfo;
 import org.apache.hadoop.mapred.TTTaskInfoImpl.MapTTTaskInfo;
 import org.apache.hadoop.mapred.TTTaskInfoImpl.ReduceTTTaskInfo;
-import org.apache.hadoop.test.system.ControlAction;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.test.system.DaemonProtocol;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 import org.apache.hadoop.mapred.TaskTracker.TaskInProgress;
+import org.apache.hadoop.mapreduce.TaskAttemptID;
 
 public privileged aspect TaskTrackerAspect {
 
@@ -36,6 +39,7 @@ public privileged aspect TaskTrackerAspect {
 
   // Add a last sent status field to the Tasktracker class.
   TaskTrackerStatus TaskTracker.lastSentStatus = null;
+  static String TaskTracker.TASKJARDIR = TaskTracker.JARSDIR;
 
   public synchronized TaskTrackerStatus TaskTracker.getStatus()
       throws IOException {
@@ -75,11 +79,11 @@ public privileged aspect TaskTrackerAspect {
     if (tip.task.isMapTask()) {
       info = new MapTTTaskInfo(tip.slotTaken, tip.wasKilled,
           (MapTaskStatus) tip.getStatus(), tip.getJobConf(), tip.getTask()
-              .getUser(), tip.getTask().isTaskCleanupTask());
+              .getUser(), tip.getTask().isTaskCleanupTask(), getPid(tip.getTask().getTaskID()));
     } else {
       info = new ReduceTTTaskInfo(tip.slotTaken, tip.wasKilled,
           (ReduceTaskStatus) tip.getStatus(), tip.getJobConf(), tip.getTask()
-              .getUser(), tip.getTask().isTaskCleanupTask());
+              .getUser(), tip.getTask().isTaskCleanupTask(),getPid(tip.getTask().getTaskID()));
     }
     return info;
   }
@@ -98,6 +102,13 @@ public privileged aspect TaskTrackerAspect {
 
   after(JobConf conf) returning (TaskTracker tracker): 
     ttConstructorPointCut(conf) {
+    try {
+      UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+      tracker.setUser(ugi.getShortUserName());
+    } catch (IOException e) {
+      tracker.LOG.warn("Unable to get the user information for the " +
+          "Jobtracker");
+    }
     tracker.setReady(true);
   }
   
@@ -114,6 +125,31 @@ public privileged aspect TaskTrackerAspect {
     } else {
       return proceed(protocol, clientVersion);
     }
-  }
+  }  
 
+  public boolean TaskTracker.isProcessTreeAlive(String pid) throws IOException {
+    // Command to be executed is as follows :
+    // ps -o pid,ppid,sid,command -e | grep -v ps | grep -v grep | grep
+    // "$pid"
+    String checkerCommand =
+        getDaemonConf().get(
+            "test.system.processgroup_checker_command",
+            "ps -o pid,ppid,sid,command -e "
+                + "| grep -v ps | grep -v grep | grep \"$");
+    String[] command =
+        new String[] { "bash", "-c", checkerCommand + pid + "\"" };
+    ShellCommandExecutor shexec = new ShellCommandExecutor(command);
+    try {
+      shexec.execute();
+    } catch (Shell.ExitCodeException e) {
+      TaskTracker.LOG
+          .info("The process tree grep threw a exitcode exception pointing "
+              + "to process tree not being alive.");
+      return false;
+    }
+    TaskTracker.LOG.info("The task grep command is : "
+        + shexec.toString() + " the output from command is : "
+        + shexec.getOutput());
+    return true;
+  }
 }

+ 24 - 6
src/test/system/aop/org/apache/hadoop/test/system/DaemonProtocolAspect.aj

@@ -25,7 +25,8 @@ import java.util.List;
 import java.util.ArrayList;
 import java.util.Map;
 import java.util.Properties;
-
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.util.Shell.ShellCommandExecutor;
 import org.apache.hadoop.fs.FileStatus;
@@ -47,7 +48,8 @@ public aspect DaemonProtocolAspect {
   @SuppressWarnings("unchecked")
   private HashMap<Object, List<ControlAction>> DaemonProtocol.actions = 
     new HashMap<Object, List<ControlAction>>();
-  
+  private static final Log LOG = LogFactory.getLog(
+      DaemonProtocolAspect.class.getName());
   /**
    * Set if the daemon process is ready or not, concrete daemon protocol should
    * implement pointcuts to determine when the daemon is ready and use the
@@ -239,20 +241,36 @@ public aspect DaemonProtocolAspect {
     return  logDir+File.separator+daemonLogPattern+"*";
   }
 
-  public int DaemonProtocol.getNumberOfMatchesInLogFile(String pattern)
-      throws IOException {
-    String filePattern = getFilePattern();
+  public int DaemonProtocol.getNumberOfMatchesInLogFile(String pattern,
+      String[] list) throws IOException {
+    StringBuffer filePattern = new StringBuffer(getFilePattern());    
+    if(list != null){
+      for(int i =0; i < list.length; ++i)
+      {
+        filePattern.append(" | grep -v " + list[i] );
+      }
+    }  
     String[] cmd =
         new String[] {
             "bash",
             "-c",
             "grep -c "
                 + pattern + " " + filePattern
-                + " | awk -F: '{s+=$2} END {print s}'" };
+                + " | awk -F: '{s+=$2} END {print s}'" };    
     ShellCommandExecutor shexec = new ShellCommandExecutor(cmd);
     shexec.execute();
     String output = shexec.getOutput();
     return Integer.parseInt(output.replaceAll("\n", "").trim());
   }
+
+  private String DaemonProtocol.user = null;
+  
+  public String DaemonProtocol.getDaemonUser() {
+    return user;
+  }
+  
+  public void DaemonProtocol.setUser(String user) {
+    this.user = user;
+  }
 }
 

+ 41 - 0
src/test/system/c++/runAs/Makefile.in

@@ -0,0 +1,41 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+OBJS=main.o runAs.o
+CC=@CC@
+CFLAGS = @CFLAGS@
+BINARY=runAs
+installdir = @prefix@
+
+all: $(OBJS)
+	$(CC) $(CFLAG) -o $(BINARY) $(OBJS)
+
+main.o: runAs.o main.c
+	$(CC) $(CFLAG) -o main.o -c main.c
+
+runAs.o: runAs.h runAs.c
+	$(CC) $(CFLAG) -o runAs.o -c runAs.c
+
+clean:
+	rm -rf $(BINARY) $(OBJS) $(TESTOBJS)
+
+install: all
+	cp $(BINARY) $(installdir)
+
+uninstall:
+	rm -rf $(installdir)/$(BINARY)
+	rm -rf $(BINARY)

+ 5104 - 0
src/test/system/c++/runAs/configure

@@ -0,0 +1,5104 @@
+#! /bin/sh
+# Guess values for system-dependent variables and create Makefiles.
+# Generated by GNU Autoconf 2.65 for runAs 0.1.
+#
+#
+# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
+# 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
+# Inc.
+#
+#
+# This configure script is free software; the Free Software Foundation
+# gives unlimited permission to copy, distribute and modify it.
+## -------------------- ##
+## M4sh Initialization. ##
+## -------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
+  emulate sh
+  NULLCMD=:
+  # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+  # is contrary to our usage.  Disable this feature.
+  alias -g '${1+"$@"}'='"$@"'
+  setopt NO_GLOB_SUBST
+else
+  case `(set -o) 2>/dev/null` in #(
+  *posix*) :
+    set -o posix ;; #(
+  *) :
+     ;;
+esac
+fi
+
+
+as_nl='
+'
+export as_nl
+# Printing a long string crashes Solaris 7 /usr/bin/printf.
+as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
+# Prefer a ksh shell builtin over an external printf program on Solaris,
+# but without wasting forks for bash or zsh.
+if test -z "$BASH_VERSION$ZSH_VERSION" \
+    && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
+  as_echo='print -r --'
+  as_echo_n='print -rn --'
+elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
+  as_echo='printf %s\n'
+  as_echo_n='printf %s'
+else
+  if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
+    as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
+    as_echo_n='/usr/ucb/echo -n'
+  else
+    as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
+    as_echo_n_body='eval
+      arg=$1;
+      case $arg in #(
+      *"$as_nl"*)
+	expr "X$arg" : "X\\(.*\\)$as_nl";
+	arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
+      esac;
+      expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
+    '
+    export as_echo_n_body
+    as_echo_n='sh -c $as_echo_n_body as_echo'
+  fi
+  export as_echo_body
+  as_echo='sh -c $as_echo_body as_echo'
+fi
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+  PATH_SEPARATOR=:
+  (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+    (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+      PATH_SEPARATOR=';'
+  }
+fi
+
+
+# IFS
+# We need space, tab and new line, in precisely that order.  Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+IFS=" ""	$as_nl"
+
+# Find who we are.  Look in the path if we contain no directory separator.
+case $0 in #((
+  *[\\/]* ) as_myself=$0 ;;
+  *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+  done
+IFS=$as_save_IFS
+
+     ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+  as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+  $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+  exit 1
+fi
+
+# Unset variables that we do not need and which cause bugs (e.g. in
+# pre-3.0 UWIN ksh).  But do not cause bugs in bash 2.01; the "|| exit 1"
+# suppresses any "Segmentation fault" message there.  '((' could
+# trigger a bug in pdksh 5.2.14.
+for as_var in BASH_ENV ENV MAIL MAILPATH
+do eval test x\${$as_var+set} = xset \
+  && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
+done
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# CDPATH.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+if test "x$CONFIG_SHELL" = x; then
+  as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then :
+  emulate sh
+  NULLCMD=:
+  # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which
+  # is contrary to our usage.  Disable this feature.
+  alias -g '\${1+\"\$@\"}'='\"\$@\"'
+  setopt NO_GLOB_SUBST
+else
+  case \`(set -o) 2>/dev/null\` in #(
+  *posix*) :
+    set -o posix ;; #(
+  *) :
+     ;;
+esac
+fi
+"
+  as_required="as_fn_return () { (exit \$1); }
+as_fn_success () { as_fn_return 0; }
+as_fn_failure () { as_fn_return 1; }
+as_fn_ret_success () { return 0; }
+as_fn_ret_failure () { return 1; }
+
+exitcode=0
+as_fn_success || { exitcode=1; echo as_fn_success failed.; }
+as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; }
+as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; }
+as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; }
+if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then :
+
+else
+  exitcode=1; echo positional parameters were not saved.
+fi
+test x\$exitcode = x0 || exit 1"
+  as_suggested="  as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO
+  as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO
+  eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" &&
+  test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1
+test \$(( 1 + 1 )) = 2 || exit 1"
+  if (eval "$as_required") 2>/dev/null; then :
+  as_have_required=yes
+else
+  as_have_required=no
+fi
+  if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then :
+
+else
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+as_found=false
+for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+  as_found=:
+  case $as_dir in #(
+	 /*)
+	   for as_base in sh bash ksh sh5; do
+	     # Try only shells that exist, to save several forks.
+	     as_shell=$as_dir/$as_base
+	     if { test -f "$as_shell" || test -f "$as_shell.exe"; } &&
+		    { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then :
+  CONFIG_SHELL=$as_shell as_have_required=yes
+		   if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then :
+  break 2
+fi
+fi
+	   done;;
+       esac
+  as_found=false
+done
+$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } &&
+	      { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then :
+  CONFIG_SHELL=$SHELL as_have_required=yes
+fi; }
+IFS=$as_save_IFS
+
+
+      if test "x$CONFIG_SHELL" != x; then :
+  # We cannot yet assume a decent shell, so we have to provide a
+	# neutralization value for shells without unset; and this also
+	# works around shells that cannot unset nonexistent variables.
+	BASH_ENV=/dev/null
+	ENV=/dev/null
+	(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
+	export CONFIG_SHELL
+	exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"}
+fi
+
+    if test x$as_have_required = xno; then :
+  $as_echo "$0: This script requires a shell more modern than all"
+  $as_echo "$0: the shells that I found on your system."
+  if test x${ZSH_VERSION+set} = xset ; then
+    $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should"
+    $as_echo "$0: be upgraded to zsh 4.3.4 or later."
+  else
+    $as_echo "$0: Please tell bug-autoconf@gnu.org about your system,
+$0: including any error possibly output before this
+$0: message. Then install a modern shell, or manually run
+$0: the script under such a shell if you do have one."
+  fi
+  exit 1
+fi
+fi
+fi
+SHELL=${CONFIG_SHELL-/bin/sh}
+export SHELL
+# Unset more variables known to interfere with behavior of common tools.
+CLICOLOR_FORCE= GREP_OPTIONS=
+unset CLICOLOR_FORCE GREP_OPTIONS
+
+## --------------------- ##
+## M4sh Shell Functions. ##
+## --------------------- ##
+# as_fn_unset VAR
+# ---------------
+# Portably unset VAR.
+as_fn_unset ()
+{
+  { eval $1=; unset $1;}
+}
+as_unset=as_fn_unset
+
+# as_fn_set_status STATUS
+# -----------------------
+# Set $? to STATUS, without forking.
+as_fn_set_status ()
+{
+  return $1
+} # as_fn_set_status
+
+# as_fn_exit STATUS
+# -----------------
+# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
+as_fn_exit ()
+{
+  set +e
+  as_fn_set_status $1
+  exit $1
+} # as_fn_exit
+
+# as_fn_mkdir_p
+# -------------
+# Create "$as_dir" as a directory, including parents if necessary.
+as_fn_mkdir_p ()
+{
+
+  case $as_dir in #(
+  -*) as_dir=./$as_dir;;
+  esac
+  test -d "$as_dir" || eval $as_mkdir_p || {
+    as_dirs=
+    while :; do
+      case $as_dir in #(
+      *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+      *) as_qdir=$as_dir;;
+      esac
+      as_dirs="'$as_qdir' $as_dirs"
+      as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+	 X"$as_dir" : 'X\(//\)[^/]' \| \
+	 X"$as_dir" : 'X\(//\)$' \| \
+	 X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_dir" |
+    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)[^/].*/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
+      test -d "$as_dir" && break
+    done
+    test -z "$as_dirs" || eval "mkdir $as_dirs"
+  } || test -d "$as_dir" || as_fn_error "cannot create directory $as_dir"
+
+
+} # as_fn_mkdir_p
+# as_fn_append VAR VALUE
+# ----------------------
+# Append the text in VALUE to the end of the definition contained in VAR. Take
+# advantage of any shell optimizations that allow amortized linear growth over
+# repeated appends, instead of the typical quadratic growth present in naive
+# implementations.
+if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
+  eval 'as_fn_append ()
+  {
+    eval $1+=\$2
+  }'
+else
+  as_fn_append ()
+  {
+    eval $1=\$$1\$2
+  }
+fi # as_fn_append
+
+# as_fn_arith ARG...
+# ------------------
+# Perform arithmetic evaluation on the ARGs, and store the result in the
+# global $as_val. Take advantage of shells that can avoid forks. The arguments
+# must be portable across $(()) and expr.
+if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
+  eval 'as_fn_arith ()
+  {
+    as_val=$(( $* ))
+  }'
+else
+  as_fn_arith ()
+  {
+    as_val=`expr "$@" || test $? -eq 1`
+  }
+fi # as_fn_arith
+
+
+# as_fn_error ERROR [LINENO LOG_FD]
+# ---------------------------------
+# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
+# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
+# script with status $?, using 1 if that was 0.
+as_fn_error ()
+{
+  as_status=$?; test $as_status -eq 0 && as_status=1
+  if test "$3"; then
+    as_lineno=${as_lineno-"$2"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+    $as_echo "$as_me:${as_lineno-$LINENO}: error: $1" >&$3
+  fi
+  $as_echo "$as_me: error: $1" >&2
+  as_fn_exit $as_status
+} # as_fn_error
+
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+   test "X`expr 00001 : '.*\(...\)'`" = X001; then
+  as_expr=expr
+else
+  as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+  as_basename=basename
+else
+  as_basename=false
+fi
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+  as_dirname=dirname
+else
+  as_dirname=false
+fi
+
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+	 X"$0" : 'X\(//\)$' \| \
+	 X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X/"$0" |
+    sed '/^.*\/\([^/][^/]*\)\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\/\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\/\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
+
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+
+  as_lineno_1=$LINENO as_lineno_1a=$LINENO
+  as_lineno_2=$LINENO as_lineno_2a=$LINENO
+  eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" &&
+  test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || {
+  # Blame Lee E. McMahon (1931-1989) for sed's syntax.  :-)
+  sed -n '
+    p
+    /[$]LINENO/=
+  ' <$as_myself |
+    sed '
+      s/[$]LINENO.*/&-/
+      t lineno
+      b
+      :lineno
+      N
+      :loop
+      s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
+      t loop
+      s/-\n.*//
+    ' >$as_me.lineno &&
+  chmod +x "$as_me.lineno" ||
+    { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; }
+
+  # Don't try to exec as it changes $[0], causing all sort of problems
+  # (the dirname of $[0] is not the place where we might find the
+  # original and so on.  Autoconf is especially sensitive to this).
+  . "./$as_me.lineno"
+  # Exit status is that of the last command.
+  exit
+}
+
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in #(((((
+-n*)
+  case `echo 'xy\c'` in
+  *c*) ECHO_T='	';;	# ECHO_T is single tab character.
+  xy)  ECHO_C='\c';;
+  *)   echo `echo ksh88 bug on AIX 6.1` > /dev/null
+       ECHO_T='	';;
+  esac;;
+*)
+  ECHO_N='-n';;
+esac
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+  rm -f conf$$.dir/conf$$.file
+else
+  rm -f conf$$.dir
+  mkdir conf$$.dir 2>/dev/null
+fi
+if (echo >conf$$.file) 2>/dev/null; then
+  if ln -s conf$$.file conf$$ 2>/dev/null; then
+    as_ln_s='ln -s'
+    # ... but there are two gotchas:
+    # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+    # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+    # In both cases, we have to default to `cp -p'.
+    ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+      as_ln_s='cp -p'
+  elif ln conf$$.file conf$$ 2>/dev/null; then
+    as_ln_s=ln
+  else
+    as_ln_s='cp -p'
+  fi
+else
+  as_ln_s='cp -p'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+if mkdir -p . 2>/dev/null; then
+  as_mkdir_p='mkdir -p "$as_dir"'
+else
+  test -d ./-p && rmdir ./-p
+  as_mkdir_p=false
+fi
+
+if test -x / >/dev/null 2>&1; then
+  as_test_x='test -x'
+else
+  if ls -dL / >/dev/null 2>&1; then
+    as_ls_L_option=L
+  else
+    as_ls_L_option=
+  fi
+  as_test_x='
+    eval sh -c '\''
+      if test -d "$1"; then
+	test -d "$1/.";
+      else
+	case $1 in #(
+	-*)set "./$1";;
+	esac;
+	case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #((
+	???[sx]*):;;*)false;;esac;fi
+    '\'' sh
+  '
+fi
+as_executable_p=$as_test_x
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+test -n "$DJDIR" || exec 7<&0 </dev/null
+exec 6>&1
+
+# Name of the host.
+# hostname on some systems (SVR3.2, Linux) returns a bogus exit status,
+# so uname gets run too.
+ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
+
+#
+# Initializations.
+#
+ac_default_prefix=/usr/local
+ac_clean_files=
+ac_config_libobj_dir=.
+LIBOBJS=
+cross_compiling=no
+subdirs=
+MFLAGS=
+MAKEFLAGS=
+
+# Identity of this package.
+PACKAGE_NAME='runAs'
+PACKAGE_TARNAME='runas'
+PACKAGE_VERSION='0.1'
+PACKAGE_STRING='runAs 0.1'
+PACKAGE_BUGREPORT=''
+PACKAGE_URL=''
+
+ac_default_prefix=.
+ac_unique_file="main.c"
+# Factoring default headers for most tests.
+ac_includes_default="\
+#include <stdio.h>
+#ifdef HAVE_SYS_TYPES_H
+# include <sys/types.h>
+#endif
+#ifdef HAVE_SYS_STAT_H
+# include <sys/stat.h>
+#endif
+#ifdef STDC_HEADERS
+# include <stdlib.h>
+# include <stddef.h>
+#else
+# ifdef HAVE_STDLIB_H
+#  include <stdlib.h>
+# endif
+#endif
+#ifdef HAVE_STRING_H
+# if !defined STDC_HEADERS && defined HAVE_MEMORY_H
+#  include <memory.h>
+# endif
+# include <string.h>
+#endif
+#ifdef HAVE_STRINGS_H
+# include <strings.h>
+#endif
+#ifdef HAVE_INTTYPES_H
+# include <inttypes.h>
+#endif
+#ifdef HAVE_STDINT_H
+# include <stdint.h>
+#endif
+#ifdef HAVE_UNISTD_H
+# include <unistd.h>
+#endif"
+
+ac_subst_vars='SET_MAKE
+LTLIBOBJS
+LIBOBJS
+EGREP
+GREP
+CPP
+OBJEXT
+EXEEXT
+ac_ct_CC
+CPPFLAGS
+LDFLAGS
+CFLAGS
+CC
+target_alias
+host_alias
+build_alias
+LIBS
+ECHO_T
+ECHO_N
+ECHO_C
+DEFS
+mandir
+localedir
+libdir
+psdir
+pdfdir
+dvidir
+htmldir
+infodir
+docdir
+oldincludedir
+includedir
+localstatedir
+sharedstatedir
+sysconfdir
+datadir
+datarootdir
+libexecdir
+sbindir
+bindir
+program_transform_name
+prefix
+exec_prefix
+PACKAGE_URL
+PACKAGE_BUGREPORT
+PACKAGE_STRING
+PACKAGE_VERSION
+PACKAGE_TARNAME
+PACKAGE_NAME
+PATH_SEPARATOR
+SHELL'
+ac_subst_files=''
+ac_user_opts='
+enable_option_checking
+with_home
+'
+      ac_precious_vars='build_alias
+host_alias
+target_alias
+CC
+CFLAGS
+LDFLAGS
+LIBS
+CPPFLAGS
+CPP'
+
+
+# Initialize some variables set by options.
+ac_init_help=
+ac_init_version=false
+ac_unrecognized_opts=
+ac_unrecognized_sep=
+# The variables have the same names as the options, with
+# dashes changed to underlines.
+cache_file=/dev/null
+exec_prefix=NONE
+no_create=
+no_recursion=
+prefix=NONE
+program_prefix=NONE
+program_suffix=NONE
+program_transform_name=s,x,x,
+silent=
+site=
+srcdir=
+verbose=
+x_includes=NONE
+x_libraries=NONE
+
+# Installation directory options.
+# These are left unexpanded so users can "make install exec_prefix=/foo"
+# and all the variables that are supposed to be based on exec_prefix
+# by default will actually change.
+# Use braces instead of parens because sh, perl, etc. also accept them.
+# (The list follows the same order as the GNU Coding Standards.)
+bindir='${exec_prefix}/bin'
+sbindir='${exec_prefix}/sbin'
+libexecdir='${exec_prefix}/libexec'
+datarootdir='${prefix}/share'
+datadir='${datarootdir}'
+sysconfdir='${prefix}/etc'
+sharedstatedir='${prefix}/com'
+localstatedir='${prefix}/var'
+includedir='${prefix}/include'
+oldincludedir='/usr/include'
+docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
+infodir='${datarootdir}/info'
+htmldir='${docdir}'
+dvidir='${docdir}'
+pdfdir='${docdir}'
+psdir='${docdir}'
+libdir='${exec_prefix}/lib'
+localedir='${datarootdir}/locale'
+mandir='${datarootdir}/man'
+
+ac_prev=
+ac_dashdash=
+for ac_option
+do
+  # If the previous option needs an argument, assign it.
+  if test -n "$ac_prev"; then
+    eval $ac_prev=\$ac_option
+    ac_prev=
+    continue
+  fi
+
+  case $ac_option in
+  *=*)	ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
+  *)	ac_optarg=yes ;;
+  esac
+
+  # Accept the important Cygnus configure options, so we can diagnose typos.
+
+  case $ac_dashdash$ac_option in
+  --)
+    ac_dashdash=yes ;;
+
+  -bindir | --bindir | --bindi | --bind | --bin | --bi)
+    ac_prev=bindir ;;
+  -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
+    bindir=$ac_optarg ;;
+
+  -build | --build | --buil | --bui | --bu)
+    ac_prev=build_alias ;;
+  -build=* | --build=* | --buil=* | --bui=* | --bu=*)
+    build_alias=$ac_optarg ;;
+
+  -cache-file | --cache-file | --cache-fil | --cache-fi \
+  | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c)
+    ac_prev=cache_file ;;
+  -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \
+  | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*)
+    cache_file=$ac_optarg ;;
+
+  --config-cache | -C)
+    cache_file=config.cache ;;
+
+  -datadir | --datadir | --datadi | --datad)
+    ac_prev=datadir ;;
+  -datadir=* | --datadir=* | --datadi=* | --datad=*)
+    datadir=$ac_optarg ;;
+
+  -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \
+  | --dataroo | --dataro | --datar)
+    ac_prev=datarootdir ;;
+  -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \
+  | --dataroot=* | --dataroo=* | --dataro=* | --datar=*)
+    datarootdir=$ac_optarg ;;
+
+  -disable-* | --disable-*)
+    ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
+    # Reject names that are not valid shell variable names.
+    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+      as_fn_error "invalid feature name: $ac_useropt"
+    ac_useropt_orig=$ac_useropt
+    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+    case $ac_user_opts in
+      *"
+"enable_$ac_useropt"
+"*) ;;
+      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig"
+	 ac_unrecognized_sep=', ';;
+    esac
+    eval enable_$ac_useropt=no ;;
+
+  -docdir | --docdir | --docdi | --doc | --do)
+    ac_prev=docdir ;;
+  -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*)
+    docdir=$ac_optarg ;;
+
+  -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv)
+    ac_prev=dvidir ;;
+  -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*)
+    dvidir=$ac_optarg ;;
+
+  -enable-* | --enable-*)
+    ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
+    # Reject names that are not valid shell variable names.
+    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+      as_fn_error "invalid feature name: $ac_useropt"
+    ac_useropt_orig=$ac_useropt
+    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+    case $ac_user_opts in
+      *"
+"enable_$ac_useropt"
+"*) ;;
+      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig"
+	 ac_unrecognized_sep=', ';;
+    esac
+    eval enable_$ac_useropt=\$ac_optarg ;;
+
+  -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
+  | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
+  | --exec | --exe | --ex)
+    ac_prev=exec_prefix ;;
+  -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
+  | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
+  | --exec=* | --exe=* | --ex=*)
+    exec_prefix=$ac_optarg ;;
+
+  -gas | --gas | --ga | --g)
+    # Obsolete; use --with-gas.
+    with_gas=yes ;;
+
+  -help | --help | --hel | --he | -h)
+    ac_init_help=long ;;
+  -help=r* | --help=r* | --hel=r* | --he=r* | -hr*)
+    ac_init_help=recursive ;;
+  -help=s* | --help=s* | --hel=s* | --he=s* | -hs*)
+    ac_init_help=short ;;
+
+  -host | --host | --hos | --ho)
+    ac_prev=host_alias ;;
+  -host=* | --host=* | --hos=* | --ho=*)
+    host_alias=$ac_optarg ;;
+
+  -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht)
+    ac_prev=htmldir ;;
+  -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \
+  | --ht=*)
+    htmldir=$ac_optarg ;;
+
+  -includedir | --includedir | --includedi | --included | --include \
+  | --includ | --inclu | --incl | --inc)
+    ac_prev=includedir ;;
+  -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
+  | --includ=* | --inclu=* | --incl=* | --inc=*)
+    includedir=$ac_optarg ;;
+
+  -infodir | --infodir | --infodi | --infod | --info | --inf)
+    ac_prev=infodir ;;
+  -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
+    infodir=$ac_optarg ;;
+
+  -libdir | --libdir | --libdi | --libd)
+    ac_prev=libdir ;;
+  -libdir=* | --libdir=* | --libdi=* | --libd=*)
+    libdir=$ac_optarg ;;
+
+  -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
+  | --libexe | --libex | --libe)
+    ac_prev=libexecdir ;;
+  -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
+  | --libexe=* | --libex=* | --libe=*)
+    libexecdir=$ac_optarg ;;
+
+  -localedir | --localedir | --localedi | --localed | --locale)
+    ac_prev=localedir ;;
+  -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*)
+    localedir=$ac_optarg ;;
+
+  -localstatedir | --localstatedir | --localstatedi | --localstated \
+  | --localstate | --localstat | --localsta | --localst | --locals)
+    ac_prev=localstatedir ;;
+  -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
+  | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*)
+    localstatedir=$ac_optarg ;;
+
+  -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
+    ac_prev=mandir ;;
+  -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
+    mandir=$ac_optarg ;;
+
+  -nfp | --nfp | --nf)
+    # Obsolete; use --without-fp.
+    with_fp=no ;;
+
+  -no-create | --no-create | --no-creat | --no-crea | --no-cre \
+  | --no-cr | --no-c | -n)
+    no_create=yes ;;
+
+  -no-recursion | --no-recursion | --no-recursio | --no-recursi \
+  | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r)
+    no_recursion=yes ;;
+
+  -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \
+  | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \
+  | --oldin | --oldi | --old | --ol | --o)
+    ac_prev=oldincludedir ;;
+  -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \
+  | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \
+  | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*)
+    oldincludedir=$ac_optarg ;;
+
+  -prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
+    ac_prev=prefix ;;
+  -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
+    prefix=$ac_optarg ;;
+
+  -program-prefix | --program-prefix | --program-prefi | --program-pref \
+  | --program-pre | --program-pr | --program-p)
+    ac_prev=program_prefix ;;
+  -program-prefix=* | --program-prefix=* | --program-prefi=* \
+  | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*)
+    program_prefix=$ac_optarg ;;
+
+  -program-suffix | --program-suffix | --program-suffi | --program-suff \
+  | --program-suf | --program-su | --program-s)
+    ac_prev=program_suffix ;;
+  -program-suffix=* | --program-suffix=* | --program-suffi=* \
+  | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*)
+    program_suffix=$ac_optarg ;;
+
+  -program-transform-name | --program-transform-name \
+  | --program-transform-nam | --program-transform-na \
+  | --program-transform-n | --program-transform- \
+  | --program-transform | --program-transfor \
+  | --program-transfo | --program-transf \
+  | --program-trans | --program-tran \
+  | --progr-tra | --program-tr | --program-t)
+    ac_prev=program_transform_name ;;
+  -program-transform-name=* | --program-transform-name=* \
+  | --program-transform-nam=* | --program-transform-na=* \
+  | --program-transform-n=* | --program-transform-=* \
+  | --program-transform=* | --program-transfor=* \
+  | --program-transfo=* | --program-transf=* \
+  | --program-trans=* | --program-tran=* \
+  | --progr-tra=* | --program-tr=* | --program-t=*)
+    program_transform_name=$ac_optarg ;;
+
+  -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd)
+    ac_prev=pdfdir ;;
+  -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*)
+    pdfdir=$ac_optarg ;;
+
+  -psdir | --psdir | --psdi | --psd | --ps)
+    ac_prev=psdir ;;
+  -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*)
+    psdir=$ac_optarg ;;
+
+  -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+  | -silent | --silent | --silen | --sile | --sil)
+    silent=yes ;;
+
+  -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
+    ac_prev=sbindir ;;
+  -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
+  | --sbi=* | --sb=*)
+    sbindir=$ac_optarg ;;
+
+  -sharedstatedir | --sharedstatedir | --sharedstatedi \
+  | --sharedstated | --sharedstate | --sharedstat | --sharedsta \
+  | --sharedst | --shareds | --shared | --share | --shar \
+  | --sha | --sh)
+    ac_prev=sharedstatedir ;;
+  -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
+  | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
+  | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
+  | --sha=* | --sh=*)
+    sharedstatedir=$ac_optarg ;;
+
+  -site | --site | --sit)
+    ac_prev=site ;;
+  -site=* | --site=* | --sit=*)
+    site=$ac_optarg ;;
+
+  -srcdir | --srcdir | --srcdi | --srcd | --src | --sr)
+    ac_prev=srcdir ;;
+  -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*)
+    srcdir=$ac_optarg ;;
+
+  -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
+  | --syscon | --sysco | --sysc | --sys | --sy)
+    ac_prev=sysconfdir ;;
+  -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
+  | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
+    sysconfdir=$ac_optarg ;;
+
+  -target | --target | --targe | --targ | --tar | --ta | --t)
+    ac_prev=target_alias ;;
+  -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
+    target_alias=$ac_optarg ;;
+
+  -v | -verbose | --verbose | --verbos | --verbo | --verb)
+    verbose=yes ;;
+
+  -version | --version | --versio | --versi | --vers | -V)
+    ac_init_version=: ;;
+
+  -with-* | --with-*)
+    ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
+    # Reject names that are not valid shell variable names.
+    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+      as_fn_error "invalid package name: $ac_useropt"
+    ac_useropt_orig=$ac_useropt
+    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+    case $ac_user_opts in
+      *"
+"with_$ac_useropt"
+"*) ;;
+      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig"
+	 ac_unrecognized_sep=', ';;
+    esac
+    eval with_$ac_useropt=\$ac_optarg ;;
+
+  -without-* | --without-*)
+    ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'`
+    # Reject names that are not valid shell variable names.
+    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+      as_fn_error "invalid package name: $ac_useropt"
+    ac_useropt_orig=$ac_useropt
+    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+    case $ac_user_opts in
+      *"
+"with_$ac_useropt"
+"*) ;;
+      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig"
+	 ac_unrecognized_sep=', ';;
+    esac
+    eval with_$ac_useropt=no ;;
+
+  --x)
+    # Obsolete; use --with-x.
+    with_x=yes ;;
+
+  -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \
+  | --x-incl | --x-inc | --x-in | --x-i)
+    ac_prev=x_includes ;;
+  -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \
+  | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*)
+    x_includes=$ac_optarg ;;
+
+  -x-libraries | --x-libraries | --x-librarie | --x-librari \
+  | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l)
+    ac_prev=x_libraries ;;
+  -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \
+  | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
+    x_libraries=$ac_optarg ;;
+
+  -*) as_fn_error "unrecognized option: \`$ac_option'
+Try \`$0 --help' for more information."
+    ;;
+
+  *=*)
+    ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='`
+    # Reject names that are not valid shell variable names.
+    case $ac_envvar in #(
+      '' | [0-9]* | *[!_$as_cr_alnum]* )
+      as_fn_error "invalid variable name: \`$ac_envvar'" ;;
+    esac
+    eval $ac_envvar=\$ac_optarg
+    export $ac_envvar ;;
+
+  *)
+    # FIXME: should be removed in autoconf 3.0.
+    $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2
+    expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
+      $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2
+    : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}
+    ;;
+
+  esac
+done
+
+if test -n "$ac_prev"; then
+  ac_option=--`echo $ac_prev | sed 's/_/-/g'`
+  as_fn_error "missing argument to $ac_option"
+fi
+
+if test -n "$ac_unrecognized_opts"; then
+  case $enable_option_checking in
+    no) ;;
+    fatal) as_fn_error "unrecognized options: $ac_unrecognized_opts" ;;
+    *)     $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;;
+  esac
+fi
+
+# Check all directory arguments for consistency.
+for ac_var in	exec_prefix prefix bindir sbindir libexecdir datarootdir \
+		datadir sysconfdir sharedstatedir localstatedir includedir \
+		oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
+		libdir localedir mandir
+do
+  eval ac_val=\$$ac_var
+  # Remove trailing slashes.
+  case $ac_val in
+    */ )
+      ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'`
+      eval $ac_var=\$ac_val;;
+  esac
+  # Be sure to have absolute directory names.
+  case $ac_val in
+    [\\/$]* | ?:[\\/]* )  continue;;
+    NONE | '' ) case $ac_var in *prefix ) continue;; esac;;
+  esac
+  as_fn_error "expected an absolute directory name for --$ac_var: $ac_val"
+done
+
+# There might be people who depend on the old broken behavior: `$host'
+# used to hold the argument of --host etc.
+# FIXME: To remove some day.
+build=$build_alias
+host=$host_alias
+target=$target_alias
+
+# FIXME: To remove some day.
+if test "x$host_alias" != x; then
+  if test "x$build_alias" = x; then
+    cross_compiling=maybe
+    $as_echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host.
+    If a cross compiler is detected then cross compile mode will be used." >&2
+  elif test "x$build_alias" != "x$host_alias"; then
+    cross_compiling=yes
+  fi
+fi
+
+ac_tool_prefix=
+test -n "$host_alias" && ac_tool_prefix=$host_alias-
+
+test "$silent" = yes && exec 6>/dev/null
+
+
+ac_pwd=`pwd` && test -n "$ac_pwd" &&
+ac_ls_di=`ls -di .` &&
+ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` ||
+  as_fn_error "working directory cannot be determined"
+test "X$ac_ls_di" = "X$ac_pwd_ls_di" ||
+  as_fn_error "pwd does not report name of working directory"
+
+
+# Find the source files, if location was not specified.
+if test -z "$srcdir"; then
+  ac_srcdir_defaulted=yes
+  # Try the directory containing this script, then the parent directory.
+  ac_confdir=`$as_dirname -- "$as_myself" ||
+$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+	 X"$as_myself" : 'X\(//\)[^/]' \| \
+	 X"$as_myself" : 'X\(//\)$' \| \
+	 X"$as_myself" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_myself" |
+    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)[^/].*/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
+  srcdir=$ac_confdir
+  if test ! -r "$srcdir/$ac_unique_file"; then
+    srcdir=..
+  fi
+else
+  ac_srcdir_defaulted=no
+fi
+if test ! -r "$srcdir/$ac_unique_file"; then
+  test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .."
+  as_fn_error "cannot find sources ($ac_unique_file) in $srcdir"
+fi
+ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work"
+ac_abs_confdir=`(
+	cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error "$ac_msg"
+	pwd)`
+# When building in place, set srcdir=.
+if test "$ac_abs_confdir" = "$ac_pwd"; then
+  srcdir=.
+fi
+# Remove unnecessary trailing slashes from srcdir.
+# Double slashes in file names in object file debugging info
+# mess up M-x gdb in Emacs.
+case $srcdir in
+*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;;
+esac
+for ac_var in $ac_precious_vars; do
+  eval ac_env_${ac_var}_set=\${${ac_var}+set}
+  eval ac_env_${ac_var}_value=\$${ac_var}
+  eval ac_cv_env_${ac_var}_set=\${${ac_var}+set}
+  eval ac_cv_env_${ac_var}_value=\$${ac_var}
+done
+
+#
+# Report the --help message.
+#
+if test "$ac_init_help" = "long"; then
+  # Omit some internal or obsolete options to make the list less imposing.
+  # This message is too long to be a string in the A/UX 3.1 sh.
+  cat <<_ACEOF
+\`configure' configures runAs 0.1 to adapt to many kinds of systems.
+
+Usage: $0 [OPTION]... [VAR=VALUE]...
+
+To assign environment variables (e.g., CC, CFLAGS...), specify them as
+VAR=VALUE.  See below for descriptions of some of the useful variables.
+
+Defaults for the options are specified in brackets.
+
+Configuration:
+  -h, --help              display this help and exit
+      --help=short        display options specific to this package
+      --help=recursive    display the short help of all the included packages
+  -V, --version           display version information and exit
+  -q, --quiet, --silent   do not print \`checking...' messages
+      --cache-file=FILE   cache test results in FILE [disabled]
+  -C, --config-cache      alias for \`--cache-file=config.cache'
+  -n, --no-create         do not create output files
+      --srcdir=DIR        find the sources in DIR [configure dir or \`..']
+
+Installation directories:
+  --prefix=PREFIX         install architecture-independent files in PREFIX
+                          [$ac_default_prefix]
+  --exec-prefix=EPREFIX   install architecture-dependent files in EPREFIX
+                          [PREFIX]
+
+By default, \`make install' will install all the files in
+\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc.  You can specify
+an installation prefix other than \`$ac_default_prefix' using \`--prefix',
+for instance \`--prefix=\$HOME'.
+
+For better control, use the options below.
+
+Fine tuning of the installation directories:
+  --bindir=DIR            user executables [EPREFIX/bin]
+  --sbindir=DIR           system admin executables [EPREFIX/sbin]
+  --libexecdir=DIR        program executables [EPREFIX/libexec]
+  --sysconfdir=DIR        read-only single-machine data [PREFIX/etc]
+  --sharedstatedir=DIR    modifiable architecture-independent data [PREFIX/com]
+  --localstatedir=DIR     modifiable single-machine data [PREFIX/var]
+  --libdir=DIR            object code libraries [EPREFIX/lib]
+  --includedir=DIR        C header files [PREFIX/include]
+  --oldincludedir=DIR     C header files for non-gcc [/usr/include]
+  --datarootdir=DIR       read-only arch.-independent data root [PREFIX/share]
+  --datadir=DIR           read-only architecture-independent data [DATAROOTDIR]
+  --infodir=DIR           info documentation [DATAROOTDIR/info]
+  --localedir=DIR         locale-dependent data [DATAROOTDIR/locale]
+  --mandir=DIR            man documentation [DATAROOTDIR/man]
+  --docdir=DIR            documentation root [DATAROOTDIR/doc/runas]
+  --htmldir=DIR           html documentation [DOCDIR]
+  --dvidir=DIR            dvi documentation [DOCDIR]
+  --pdfdir=DIR            pdf documentation [DOCDIR]
+  --psdir=DIR             ps documentation [DOCDIR]
+_ACEOF
+
+  cat <<\_ACEOF
+_ACEOF
+fi
+
+if test -n "$ac_init_help"; then
+  case $ac_init_help in
+     short | recursive ) echo "Configuration of runAs 0.1:";;
+   esac
+  cat <<\_ACEOF
+
+Optional Packages:
+  --with-PACKAGE[=ARG]    use PACKAGE [ARG=yes]
+  --without-PACKAGE       do not use PACKAGE (same as --with-PACKAGE=no)
+--with-home path to hadoop home dir
+
+Some influential environment variables:
+  CC          C compiler command
+  CFLAGS      C compiler flags
+  LDFLAGS     linker flags, e.g. -L<lib dir> if you have libraries in a
+              nonstandard directory <lib dir>
+  LIBS        libraries to pass to the linker, e.g. -l<library>
+  CPPFLAGS    (Objective) C/C++ preprocessor flags, e.g. -I<include dir> if
+              you have headers in a nonstandard directory <include dir>
+  CPP         C preprocessor
+
+Use these variables to override the choices made by `configure' or to help
+it to find libraries and programs with nonstandard names/locations.
+
+Report bugs to the package provider.
+_ACEOF
+ac_status=$?
+fi
+
+if test "$ac_init_help" = "recursive"; then
+  # If there are subdirs, report their specific --help.
+  for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue
+    test -d "$ac_dir" ||
+      { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } ||
+      continue
+    ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+  ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
+  # A ".." for each directory in $ac_dir_suffix.
+  ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+  case $ac_top_builddir_sub in
+  "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+  *)  ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+  esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+  .)  # We are building in place.
+    ac_srcdir=.
+    ac_top_srcdir=$ac_top_builddir_sub
+    ac_abs_top_srcdir=$ac_pwd ;;
+  [\\/]* | ?:[\\/]* )  # Absolute name.
+    ac_srcdir=$srcdir$ac_dir_suffix;
+    ac_top_srcdir=$srcdir
+    ac_abs_top_srcdir=$srcdir ;;
+  *) # Relative name.
+    ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+    ac_top_srcdir=$ac_top_build_prefix$srcdir
+    ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+    cd "$ac_dir" || { ac_status=$?; continue; }
+    # Check for guested configure.
+    if test -f "$ac_srcdir/configure.gnu"; then
+      echo &&
+      $SHELL "$ac_srcdir/configure.gnu" --help=recursive
+    elif test -f "$ac_srcdir/configure"; then
+      echo &&
+      $SHELL "$ac_srcdir/configure" --help=recursive
+    else
+      $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
+    fi || ac_status=$?
+    cd "$ac_pwd" || { ac_status=$?; break; }
+  done
+fi
+
+test -n "$ac_init_help" && exit $ac_status
+if $ac_init_version; then
+  cat <<\_ACEOF
+runAs configure 0.1
+generated by GNU Autoconf 2.65
+
+Copyright (C) 2009 Free Software Foundation, Inc.
+This configure script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it.
+_ACEOF
+  exit
+fi
+
+## ------------------------ ##
+## Autoconf initialization. ##
+## ------------------------ ##
+
+# ac_fn_c_try_compile LINENO
+# --------------------------
+# Try to compile conftest.$ac_ext, and return whether this succeeded.
+ac_fn_c_try_compile ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  rm -f conftest.$ac_objext
+  if { { ac_try="$ac_compile"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_compile") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    grep -v '^ *+' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+    mv -f conftest.er1 conftest.err
+  fi
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && {
+	 test -z "$ac_c_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest.$ac_objext; then :
+  ac_retval=0
+else
+  $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+	ac_retval=1
+fi
+  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+  as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_compile
+
+# ac_fn_c_try_cpp LINENO
+# ----------------------
+# Try to preprocess conftest.$ac_ext, and return whether this succeeded.
+ac_fn_c_try_cpp ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  if { { ac_try="$ac_cpp conftest.$ac_ext"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    grep -v '^ *+' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+    mv -f conftest.er1 conftest.err
+  fi
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } >/dev/null && {
+	 test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
+	 test ! -s conftest.err
+       }; then :
+  ac_retval=0
+else
+  $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+    ac_retval=1
+fi
+  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+  as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_cpp
+
+# ac_fn_c_try_run LINENO
+# ----------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes
+# that executables *can* be run.
+ac_fn_c_try_run ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  if { { ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_link") 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && { ac_try='./conftest$ac_exeext'
+  { { case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_try") 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; }; then :
+  ac_retval=0
+else
+  $as_echo "$as_me: program exited with status $ac_status" >&5
+       $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+       ac_retval=$ac_status
+fi
+  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+  as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_run
+
+# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES
+# -------------------------------------------------------
+# Tests whether HEADER exists, giving a warning if it cannot be compiled using
+# the include files in INCLUDES and setting the cache variable VAR
+# accordingly.
+ac_fn_c_check_header_mongrel ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
+  $as_echo_n "(cached) " >&6
+fi
+eval ac_res=\$$3
+	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+else
+  # Is the header compilable?
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5
+$as_echo_n "checking $2 usability... " >&6; }
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+$4
+#include <$2>
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_header_compiler=yes
+else
+  ac_header_compiler=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5
+$as_echo "$ac_header_compiler" >&6; }
+
+# Is the header present?
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5
+$as_echo_n "checking $2 presence... " >&6; }
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <$2>
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+  ac_header_preproc=yes
+else
+  ac_header_preproc=no
+fi
+rm -f conftest.err conftest.$ac_ext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5
+$as_echo "$ac_header_preproc" >&6; }
+
+# So?  What about this header?
+case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #((
+  yes:no: )
+    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5
+$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;}
+    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
+$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
+    ;;
+  no:yes:* )
+    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5
+$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;}
+    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2:     check for missing prerequisite headers?" >&5
+$as_echo "$as_me: WARNING: $2:     check for missing prerequisite headers?" >&2;}
+    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5
+$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;}
+    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2:     section \"Present But Cannot Be Compiled\"" >&5
+$as_echo "$as_me: WARNING: $2:     section \"Present But Cannot Be Compiled\"" >&2;}
+    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
+$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
+    ;;
+esac
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
+  $as_echo_n "(cached) " >&6
+else
+  eval "$3=\$ac_header_compiler"
+fi
+eval ac_res=\$$3
+	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+fi
+  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+
+} # ac_fn_c_check_header_mongrel
+
+# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES
+# -------------------------------------------------------
+# Tests whether HEADER exists and can be compiled using the include files in
+# INCLUDES, setting the cache variable VAR accordingly.
+ac_fn_c_check_header_compile ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+$4
+#include <$2>
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  eval "$3=yes"
+else
+  eval "$3=no"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+
+} # ac_fn_c_check_header_compile
+
+# ac_fn_c_check_type LINENO TYPE VAR INCLUDES
+# -------------------------------------------
+# Tests whether TYPE exists after having included INCLUDES, setting cache
+# variable VAR accordingly.
+ac_fn_c_check_type ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
+  $as_echo_n "(cached) " >&6
+else
+  eval "$3=no"
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+$4
+int
+main ()
+{
+if (sizeof ($2))
+	 return 0;
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+$4
+int
+main ()
+{
+if (sizeof (($2)))
+	    return 0;
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+else
+  eval "$3=yes"
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+
+} # ac_fn_c_check_type
+
+# ac_fn_c_try_link LINENO
+# -----------------------
+# Try to link conftest.$ac_ext, and return whether this succeeded.
+ac_fn_c_try_link ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  rm -f conftest.$ac_objext conftest$ac_exeext
+  if { { ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_link") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    grep -v '^ *+' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+    mv -f conftest.er1 conftest.err
+  fi
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; } && {
+	 test -z "$ac_c_werror_flag" ||
+	 test ! -s conftest.err
+       } && test -s conftest$ac_exeext && {
+	 test "$cross_compiling" = yes ||
+	 $as_test_x conftest$ac_exeext
+       }; then :
+  ac_retval=0
+else
+  $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+	ac_retval=1
+fi
+  # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
+  # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
+  # interfere with the next link command; also delete a directory that is
+  # left behind by Apple's compiler.  We do this before executing the actions.
+  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
+  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+  as_fn_set_status $ac_retval
+
+} # ac_fn_c_try_link
+
+# ac_fn_c_check_func LINENO FUNC VAR
+# ----------------------------------
+# Tests whether FUNC exists, setting the cache variable VAR accordingly
+ac_fn_c_check_func ()
+{
+  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+$as_echo_n "checking for $2... " >&6; }
+if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+/* Define $2 to an innocuous variant, in case <limits.h> declares $2.
+   For example, HP-UX 11i <limits.h> declares gettimeofday.  */
+#define $2 innocuous_$2
+
+/* System header to define __stub macros and hopefully few prototypes,
+    which can conflict with char $2 (); below.
+    Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+    <limits.h> exists even on freestanding compilers.  */
+
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+
+#undef $2
+
+/* Override any GCC internal prototype to avoid an error.
+   Use char because int might match the return type of a GCC
+   builtin and then its argument prototype would still apply.  */
+#ifdef __cplusplus
+extern "C"
+#endif
+char $2 ();
+/* The GNU C library defines this for functions which it implements
+    to always fail with ENOSYS.  Some functions are actually named
+    something starting with __ and the normal name is an alias.  */
+#if defined __stub_$2 || defined __stub___$2
+choke me
+#endif
+
+int
+main ()
+{
+return $2 ();
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  eval "$3=yes"
+else
+  eval "$3=no"
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+fi
+eval ac_res=\$$3
+	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+$as_echo "$ac_res" >&6; }
+  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
+
+} # ac_fn_c_check_func
+cat >config.log <<_ACEOF
+This file contains any messages produced by compilers while
+running configure, to aid debugging if configure makes a mistake.
+
+It was created by runAs $as_me 0.1, which was
+generated by GNU Autoconf 2.65.  Invocation command line was
+
+  $ $0 $@
+
+_ACEOF
+exec 5>>config.log
+{
+cat <<_ASUNAME
+## --------- ##
+## Platform. ##
+## --------- ##
+
+hostname = `(hostname || uname -n) 2>/dev/null | sed 1q`
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown`
+/bin/uname -X     = `(/bin/uname -X) 2>/dev/null     || echo unknown`
+
+/bin/arch              = `(/bin/arch) 2>/dev/null              || echo unknown`
+/usr/bin/arch -k       = `(/usr/bin/arch -k) 2>/dev/null       || echo unknown`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown`
+/usr/bin/hostinfo      = `(/usr/bin/hostinfo) 2>/dev/null      || echo unknown`
+/bin/machine           = `(/bin/machine) 2>/dev/null           || echo unknown`
+/usr/bin/oslevel       = `(/usr/bin/oslevel) 2>/dev/null       || echo unknown`
+/bin/universe          = `(/bin/universe) 2>/dev/null          || echo unknown`
+
+_ASUNAME
+
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    $as_echo "PATH: $as_dir"
+  done
+IFS=$as_save_IFS
+
+} >&5
+
+cat >&5 <<_ACEOF
+
+
+## ----------- ##
+## Core tests. ##
+## ----------- ##
+
+_ACEOF
+
+
+# Keep a trace of the command line.
+# Strip out --no-create and --no-recursion so they do not pile up.
+# Strip out --silent because we don't want to record it for future runs.
+# Also quote any args containing shell meta-characters.
+# Make two passes to allow for proper duplicate-argument suppression.
+ac_configure_args=
+ac_configure_args0=
+ac_configure_args1=
+ac_must_keep_next=false
+for ac_pass in 1 2
+do
+  for ac_arg
+  do
+    case $ac_arg in
+    -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;;
+    -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+    | -silent | --silent | --silen | --sile | --sil)
+      continue ;;
+    *\'*)
+      ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
+    esac
+    case $ac_pass in
+    1) as_fn_append ac_configure_args0 " '$ac_arg'" ;;
+    2)
+      as_fn_append ac_configure_args1 " '$ac_arg'"
+      if test $ac_must_keep_next = true; then
+	ac_must_keep_next=false # Got value, back to normal.
+      else
+	case $ac_arg in
+	  *=* | --config-cache | -C | -disable-* | --disable-* \
+	  | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \
+	  | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \
+	  | -with-* | --with-* | -without-* | --without-* | --x)
+	    case "$ac_configure_args0 " in
+	      "$ac_configure_args1"*" '$ac_arg' "* ) continue ;;
+	    esac
+	    ;;
+	  -* ) ac_must_keep_next=true ;;
+	esac
+      fi
+      as_fn_append ac_configure_args " '$ac_arg'"
+      ;;
+    esac
+  done
+done
+{ ac_configure_args0=; unset ac_configure_args0;}
+{ ac_configure_args1=; unset ac_configure_args1;}
+
+# When interrupted or exit'd, cleanup temporary files, and complete
+# config.log.  We remove comments because anyway the quotes in there
+# would cause problems or look ugly.
+# WARNING: Use '\'' to represent an apostrophe within the trap.
+# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug.
+trap 'exit_status=$?
+  # Save into config.log some information that might help in debugging.
+  {
+    echo
+
+    cat <<\_ASBOX
+## ---------------- ##
+## Cache variables. ##
+## ---------------- ##
+_ASBOX
+    echo
+    # The following way of writing the cache mishandles newlines in values,
+(
+  for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do
+    eval ac_val=\$$ac_var
+    case $ac_val in #(
+    *${as_nl}*)
+      case $ac_var in #(
+      *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
+$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
+      esac
+      case $ac_var in #(
+      _ | IFS | as_nl) ;; #(
+      BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
+      *) { eval $ac_var=; unset $ac_var;} ;;
+      esac ;;
+    esac
+  done
+  (set) 2>&1 |
+    case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #(
+    *${as_nl}ac_space=\ *)
+      sed -n \
+	"s/'\''/'\''\\\\'\'''\''/g;
+	  s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p"
+      ;; #(
+    *)
+      sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+      ;;
+    esac |
+    sort
+)
+    echo
+
+    cat <<\_ASBOX
+## ----------------- ##
+## Output variables. ##
+## ----------------- ##
+_ASBOX
+    echo
+    for ac_var in $ac_subst_vars
+    do
+      eval ac_val=\$$ac_var
+      case $ac_val in
+      *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+      esac
+      $as_echo "$ac_var='\''$ac_val'\''"
+    done | sort
+    echo
+
+    if test -n "$ac_subst_files"; then
+      cat <<\_ASBOX
+## ------------------- ##
+## File substitutions. ##
+## ------------------- ##
+_ASBOX
+      echo
+      for ac_var in $ac_subst_files
+      do
+	eval ac_val=\$$ac_var
+	case $ac_val in
+	*\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+	esac
+	$as_echo "$ac_var='\''$ac_val'\''"
+      done | sort
+      echo
+    fi
+
+    if test -s confdefs.h; then
+      cat <<\_ASBOX
+## ----------- ##
+## confdefs.h. ##
+## ----------- ##
+_ASBOX
+      echo
+      cat confdefs.h
+      echo
+    fi
+    test "$ac_signal" != 0 &&
+      $as_echo "$as_me: caught signal $ac_signal"
+    $as_echo "$as_me: exit $exit_status"
+  } >&5
+  rm -f core *.core core.conftest.* &&
+    rm -f -r conftest* confdefs* conf$$* $ac_clean_files &&
+    exit $exit_status
+' 0
+for ac_signal in 1 2 13 15; do
+  trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal
+done
+ac_signal=0
+
+# confdefs.h avoids OS command line length limits that DEFS can exceed.
+rm -f -r conftest* confdefs.h
+
+$as_echo "/* confdefs.h */" > confdefs.h
+
+# Predefined preprocessor variables.
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_NAME "$PACKAGE_NAME"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_TARNAME "$PACKAGE_TARNAME"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_VERSION "$PACKAGE_VERSION"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_STRING "$PACKAGE_STRING"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT"
+_ACEOF
+
+cat >>confdefs.h <<_ACEOF
+#define PACKAGE_URL "$PACKAGE_URL"
+_ACEOF
+
+
+# Let the site file select an alternate cache file if it wants to.
+# Prefer an explicitly selected file to automatically selected ones.
+ac_site_file1=NONE
+ac_site_file2=NONE
+if test -n "$CONFIG_SITE"; then
+  ac_site_file1=$CONFIG_SITE
+elif test "x$prefix" != xNONE; then
+  ac_site_file1=$prefix/share/config.site
+  ac_site_file2=$prefix/etc/config.site
+else
+  ac_site_file1=$ac_default_prefix/share/config.site
+  ac_site_file2=$ac_default_prefix/etc/config.site
+fi
+for ac_site_file in "$ac_site_file1" "$ac_site_file2"
+do
+  test "x$ac_site_file" = xNONE && continue
+  if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then
+    { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5
+$as_echo "$as_me: loading site script $ac_site_file" >&6;}
+    sed 's/^/| /' "$ac_site_file" >&5
+    . "$ac_site_file"
+  fi
+done
+
+if test -r "$cache_file"; then
+  # Some versions of bash will fail to source /dev/null (special files
+  # actually), so we avoid doing that.  DJGPP emulates it as a regular file.
+  if test /dev/null != "$cache_file" && test -f "$cache_file"; then
+    { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5
+$as_echo "$as_me: loading cache $cache_file" >&6;}
+    case $cache_file in
+      [\\/]* | ?:[\\/]* ) . "$cache_file";;
+      *)                      . "./$cache_file";;
+    esac
+  fi
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5
+$as_echo "$as_me: creating cache $cache_file" >&6;}
+  >$cache_file
+fi
+
+# Check that the precious variables saved in the cache have kept the same
+# value.
+ac_cache_corrupted=false
+for ac_var in $ac_precious_vars; do
+  eval ac_old_set=\$ac_cv_env_${ac_var}_set
+  eval ac_new_set=\$ac_env_${ac_var}_set
+  eval ac_old_val=\$ac_cv_env_${ac_var}_value
+  eval ac_new_val=\$ac_env_${ac_var}_value
+  case $ac_old_set,$ac_new_set in
+    set,)
+      { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
+$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
+      ac_cache_corrupted=: ;;
+    ,set)
+      { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5
+$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
+      ac_cache_corrupted=: ;;
+    ,);;
+    *)
+      if test "x$ac_old_val" != "x$ac_new_val"; then
+	# differences in whitespace do not lead to failure.
+	ac_old_val_w=`echo x $ac_old_val`
+	ac_new_val_w=`echo x $ac_new_val`
+	if test "$ac_old_val_w" != "$ac_new_val_w"; then
+	  { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5
+$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
+	  ac_cache_corrupted=:
+	else
+	  { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5
+$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;}
+	  eval $ac_var=\$ac_old_val
+	fi
+	{ $as_echo "$as_me:${as_lineno-$LINENO}:   former value:  \`$ac_old_val'" >&5
+$as_echo "$as_me:   former value:  \`$ac_old_val'" >&2;}
+	{ $as_echo "$as_me:${as_lineno-$LINENO}:   current value: \`$ac_new_val'" >&5
+$as_echo "$as_me:   current value: \`$ac_new_val'" >&2;}
+      fi;;
+  esac
+  # Pass precious variables to config.status.
+  if test "$ac_new_set" = set; then
+    case $ac_new_val in
+    *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
+    *) ac_arg=$ac_var=$ac_new_val ;;
+    esac
+    case " $ac_configure_args " in
+      *" '$ac_arg' "*) ;; # Avoid dups.  Use of quotes ensures accuracy.
+      *) as_fn_append ac_configure_args " '$ac_arg'" ;;
+    esac
+  fi
+done
+if $ac_cache_corrupted; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+  { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5
+$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;}
+  as_fn_error "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5
+fi
+## -------------------- ##
+## Main body of script. ##
+## -------------------- ##
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+
+#changing default prefix value to empty string, so that binary does not
+#gets installed within system
+
+
+#add new arguments --with-home
+
+# Check whether --with-home was given.
+if test "${with_home+set}" = set; then :
+  withval=$with_home;
+fi
+
+
+ac_config_headers="$ac_config_headers runAs.h"
+
+
+# Checks for programs.
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+if test -n "$ac_tool_prefix"; then
+  # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}gcc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$CC"; then
+  ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+    ac_cv_prog_CC="${ac_tool_prefix}gcc"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$ac_cv_prog_CC"; then
+  ac_ct_CC=$CC
+  # Extract the first word of "gcc", so it can be a program name with args.
+set dummy gcc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_CC+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$ac_ct_CC"; then
+  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+    ac_cv_prog_ac_ct_CC="gcc"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+$as_echo "$ac_ct_CC" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+  if test "x$ac_ct_CC" = x; then
+    CC=""
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    CC=$ac_ct_CC
+  fi
+else
+  CC="$ac_cv_prog_CC"
+fi
+
+if test -z "$CC"; then
+          if test -n "$ac_tool_prefix"; then
+    # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
+set dummy ${ac_tool_prefix}cc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$CC"; then
+  ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+    ac_cv_prog_CC="${ac_tool_prefix}cc"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+  fi
+fi
+if test -z "$CC"; then
+  # Extract the first word of "cc", so it can be a program name with args.
+set dummy cc; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$CC"; then
+  ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+  ac_prog_rejected=no
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+    if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
+       ac_prog_rejected=yes
+       continue
+     fi
+    ac_cv_prog_CC="cc"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+if test $ac_prog_rejected = yes; then
+  # We found a bogon in the path, so make sure we never use it.
+  set dummy $ac_cv_prog_CC
+  shift
+  if test $# != 0; then
+    # We chose a different compiler from the bogus one.
+    # However, it has the same basename, so the bogon will be chosen
+    # first if we set CC to just the basename; use the full file name.
+    shift
+    ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@"
+  fi
+fi
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+fi
+if test -z "$CC"; then
+  if test -n "$ac_tool_prefix"; then
+  for ac_prog in cl.exe
+  do
+    # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_CC+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$CC"; then
+  ac_cv_prog_CC="$CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+    ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+CC=$ac_cv_prog_CC
+if test -n "$CC"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+$as_echo "$CC" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+    test -n "$CC" && break
+  done
+fi
+if test -z "$CC"; then
+  ac_ct_CC=$CC
+  for ac_prog in cl.exe
+do
+  # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if test "${ac_cv_prog_ac_ct_CC+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$ac_ct_CC"; then
+  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
+    ac_cv_prog_ac_ct_CC="$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+ac_ct_CC=$ac_cv_prog_ac_ct_CC
+if test -n "$ac_ct_CC"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+$as_echo "$ac_ct_CC" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+  test -n "$ac_ct_CC" && break
+done
+
+  if test "x$ac_ct_CC" = x; then
+    CC=""
+  else
+    case $cross_compiling:$ac_tool_warned in
+yes:)
+{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ac_tool_warned=yes ;;
+esac
+    CC=$ac_ct_CC
+  fi
+fi
+
+fi
+
+
+test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error "no acceptable C compiler found in \$PATH
+See \`config.log' for more details." "$LINENO" 5; }
+
+# Provide some information about the compiler.
+$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5
+set X $ac_compile
+ac_compiler=$2
+for ac_option in --version -v -V -qversion; do
+  { { ac_try="$ac_compiler $ac_option >&5"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_compiler $ac_option >&5") 2>conftest.err
+  ac_status=$?
+  if test -s conftest.err; then
+    sed '10a\
+... rest of stderr output deleted ...
+         10q' conftest.err >conftest.er1
+    cat conftest.er1 >&5
+  fi
+  rm -f conftest.er1 conftest.err
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+done
+
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out"
+# Try to create an executable without -o first, disregard a.out.
+# It will help us diagnose broken compilers, and finding out an intuition
+# of exeext.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5
+$as_echo_n "checking whether the C compiler works... " >&6; }
+ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'`
+
+# The possible output files:
+ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*"
+
+ac_rmfiles=
+for ac_file in $ac_files
+do
+  case $ac_file in
+    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
+    * ) ac_rmfiles="$ac_rmfiles $ac_file";;
+  esac
+done
+rm -f $ac_rmfiles
+
+if { { ac_try="$ac_link_default"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_link_default") 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; then :
+  # Autoconf-2.13 could set the ac_cv_exeext variable to `no'.
+# So ignore a value of `no', otherwise this would lead to `EXEEXT = no'
+# in a Makefile.  We should not override ac_cv_exeext if it was cached,
+# so that the user can short-circuit this test for compilers unknown to
+# Autoconf.
+for ac_file in $ac_files ''
+do
+  test -f "$ac_file" || continue
+  case $ac_file in
+    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj )
+	;;
+    [ab].out )
+	# We found the default executable, but exeext='' is most
+	# certainly right.
+	break;;
+    *.* )
+	if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no;
+	then :; else
+	   ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+	fi
+	# We set ac_cv_exeext here because the later test for it is not
+	# safe: cross compilers may not add the suffix if given an `-o'
+	# argument, so we may need to know it at that point already.
+	# Even if this section looks crufty: it has the advantage of
+	# actually working.
+	break;;
+    * )
+	break;;
+  esac
+done
+test "$ac_cv_exeext" = no && ac_cv_exeext=
+
+else
+  ac_file=''
+fi
+if test -z "$ac_file"; then :
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+$as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+{ as_fn_set_status 77
+as_fn_error "C compiler cannot create executables
+See \`config.log' for more details." "$LINENO" 5; }; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5
+$as_echo_n "checking for C compiler default output file name... " >&6; }
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5
+$as_echo "$ac_file" >&6; }
+ac_exeext=$ac_cv_exeext
+
+rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out
+ac_clean_files=$ac_clean_files_save
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5
+$as_echo_n "checking for suffix of executables... " >&6; }
+if { { ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_link") 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; then :
+  # If both `conftest.exe' and `conftest' are `present' (well, observable)
+# catch `conftest.exe'.  For instance with Cygwin, `ls conftest' will
+# work properly (i.e., refer to `conftest.exe'), while it won't with
+# `rm'.
+for ac_file in conftest.exe conftest conftest.*; do
+  test -f "$ac_file" || continue
+  case $ac_file in
+    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
+    *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+	  break;;
+    * ) break;;
+  esac
+done
+else
+  { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error "cannot compute suffix of executables: cannot compile and link
+See \`config.log' for more details." "$LINENO" 5; }
+fi
+rm -f conftest conftest$ac_cv_exeext
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5
+$as_echo "$ac_cv_exeext" >&6; }
+
+rm -f conftest.$ac_ext
+EXEEXT=$ac_cv_exeext
+ac_exeext=$EXEEXT
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <stdio.h>
+int
+main ()
+{
+FILE *f = fopen ("conftest.out", "w");
+ return ferror (f) || fclose (f) != 0;
+
+  ;
+  return 0;
+}
+_ACEOF
+ac_clean_files="$ac_clean_files conftest.out"
+# Check that the compiler produces executables we can run.  If not, either
+# the compiler is broken, or we cross compile.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5
+$as_echo_n "checking whether we are cross compiling... " >&6; }
+if test "$cross_compiling" != yes; then
+  { { ac_try="$ac_link"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_link") 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }
+  if { ac_try='./conftest$ac_cv_exeext'
+  { { case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_try") 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; }; then
+    cross_compiling=no
+  else
+    if test "$cross_compiling" = maybe; then
+	cross_compiling=yes
+    else
+	{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error "cannot run C compiled programs.
+If you meant to cross compile, use \`--host'.
+See \`config.log' for more details." "$LINENO" 5; }
+    fi
+  fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5
+$as_echo "$cross_compiling" >&6; }
+
+rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out
+ac_clean_files=$ac_clean_files_save
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5
+$as_echo_n "checking for suffix of object files... " >&6; }
+if test "${ac_cv_objext+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+rm -f conftest.o conftest.obj
+if { { ac_try="$ac_compile"
+case "(($ac_try" in
+  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+  *) ac_try_echo=$ac_try;;
+esac
+eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+$as_echo "$ac_try_echo"; } >&5
+  (eval "$ac_compile") 2>&5
+  ac_status=$?
+  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+  test $ac_status = 0; }; then :
+  for ac_file in conftest.o conftest.obj conftest.*; do
+  test -f "$ac_file" || continue;
+  case $ac_file in
+    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;;
+    *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'`
+       break;;
+  esac
+done
+else
+  $as_echo "$as_me: failed program was:" >&5
+sed 's/^/| /' conftest.$ac_ext >&5
+
+{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error "cannot compute suffix of object files: cannot compile
+See \`config.log' for more details." "$LINENO" 5; }
+fi
+rm -f conftest.$ac_cv_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5
+$as_echo "$ac_cv_objext" >&6; }
+OBJEXT=$ac_cv_objext
+ac_objext=$OBJEXT
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5
+$as_echo_n "checking whether we are using the GNU C compiler... " >&6; }
+if test "${ac_cv_c_compiler_gnu+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+#ifndef __GNUC__
+       choke me
+#endif
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_compiler_gnu=yes
+else
+  ac_compiler_gnu=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ac_cv_c_compiler_gnu=$ac_compiler_gnu
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5
+$as_echo "$ac_cv_c_compiler_gnu" >&6; }
+if test $ac_compiler_gnu = yes; then
+  GCC=yes
+else
+  GCC=
+fi
+ac_test_CFLAGS=${CFLAGS+set}
+ac_save_CFLAGS=$CFLAGS
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5
+$as_echo_n "checking whether $CC accepts -g... " >&6; }
+if test "${ac_cv_prog_cc_g+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  ac_save_c_werror_flag=$ac_c_werror_flag
+   ac_c_werror_flag=yes
+   ac_cv_prog_cc_g=no
+   CFLAGS="-g"
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_cv_prog_cc_g=yes
+else
+  CFLAGS=""
+      cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+
+else
+  ac_c_werror_flag=$ac_save_c_werror_flag
+	 CFLAGS="-g"
+	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_cv_prog_cc_g=yes
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+   ac_c_werror_flag=$ac_save_c_werror_flag
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5
+$as_echo "$ac_cv_prog_cc_g" >&6; }
+if test "$ac_test_CFLAGS" = set; then
+  CFLAGS=$ac_save_CFLAGS
+elif test $ac_cv_prog_cc_g = yes; then
+  if test "$GCC" = yes; then
+    CFLAGS="-g -O2"
+  else
+    CFLAGS="-g"
+  fi
+else
+  if test "$GCC" = yes; then
+    CFLAGS="-O2"
+  else
+    CFLAGS=
+  fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5
+$as_echo_n "checking for $CC option to accept ISO C89... " >&6; }
+if test "${ac_cv_prog_cc_c89+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  ac_cv_prog_cc_c89=no
+ac_save_CC=$CC
+cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <stdarg.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+/* Most of the following tests are stolen from RCS 5.7's src/conf.sh.  */
+struct buf { int x; };
+FILE * (*rcsopen) (struct buf *, struct stat *, int);
+static char *e (p, i)
+     char **p;
+     int i;
+{
+  return p[i];
+}
+static char *f (char * (*g) (char **, int), char **p, ...)
+{
+  char *s;
+  va_list v;
+  va_start (v,p);
+  s = g (p, va_arg (v,int));
+  va_end (v);
+  return s;
+}
+
+/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default.  It has
+   function prototypes and stuff, but not '\xHH' hex character constants.
+   These don't provoke an error unfortunately, instead are silently treated
+   as 'x'.  The following induces an error, until -std is added to get
+   proper ANSI mode.  Curiously '\x00'!='x' always comes out true, for an
+   array size at least.  It's necessary to write '\x00'==0 to get something
+   that's true only with -std.  */
+int osf4_cc_array ['\x00' == 0 ? 1 : -1];
+
+/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters
+   inside strings and character constants.  */
+#define FOO(x) 'x'
+int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1];
+
+int test (int i, double x);
+struct s1 {int (*f) (int a);};
+struct s2 {int (*f) (double a);};
+int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int);
+int argc;
+char **argv;
+int
+main ()
+{
+return f (e, argv, 0) != argv[0]  ||  f (e, argv, 1) != argv[1];
+  ;
+  return 0;
+}
+_ACEOF
+for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \
+	-Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
+do
+  CC="$ac_save_CC $ac_arg"
+  if ac_fn_c_try_compile "$LINENO"; then :
+  ac_cv_prog_cc_c89=$ac_arg
+fi
+rm -f core conftest.err conftest.$ac_objext
+  test "x$ac_cv_prog_cc_c89" != "xno" && break
+done
+rm -f conftest.$ac_ext
+CC=$ac_save_CC
+
+fi
+# AC_CACHE_VAL
+case "x$ac_cv_prog_cc_c89" in
+  x)
+    { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
+$as_echo "none needed" >&6; } ;;
+  xno)
+    { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
+$as_echo "unsupported" >&6; } ;;
+  *)
+    CC="$CC $ac_cv_prog_cc_c89"
+    { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5
+$as_echo "$ac_cv_prog_cc_c89" >&6; } ;;
+esac
+if test "x$ac_cv_prog_cc_c89" != xno; then :
+
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+# Checks for libraries.
+
+# Checks for header files.
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5
+$as_echo_n "checking how to run the C preprocessor... " >&6; }
+# On Suns, sometimes $CPP names a directory.
+if test -n "$CPP" && test -d "$CPP"; then
+  CPP=
+fi
+if test -z "$CPP"; then
+  if test "${ac_cv_prog_CPP+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+      # Double quotes because CPP needs to be expanded
+    for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp"
+    do
+      ac_preproc_ok=false
+for ac_c_preproc_warn_flag in '' yes
+do
+  # Use a header file that comes with gcc, so configuring glibc
+  # with a fresh cross-compiler works.
+  # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+  # <limits.h> exists even on freestanding compilers.
+  # On the NeXT, cc -E runs the code through the compiler's parser,
+  # not just through cpp. "Syntax error" is here to catch this case.
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+		     Syntax error
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+
+else
+  # Broken: fails on valid input.
+continue
+fi
+rm -f conftest.err conftest.$ac_ext
+
+  # OK, works on sane cases.  Now check whether nonexistent headers
+  # can be detected and how.
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <ac_nonexistent.h>
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+  # Broken: success on invalid input.
+continue
+else
+  # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+rm -f conftest.err conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then :
+  break
+fi
+
+    done
+    ac_cv_prog_CPP=$CPP
+
+fi
+  CPP=$ac_cv_prog_CPP
+else
+  ac_cv_prog_CPP=$CPP
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5
+$as_echo "$CPP" >&6; }
+ac_preproc_ok=false
+for ac_c_preproc_warn_flag in '' yes
+do
+  # Use a header file that comes with gcc, so configuring glibc
+  # with a fresh cross-compiler works.
+  # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
+  # <limits.h> exists even on freestanding compilers.
+  # On the NeXT, cc -E runs the code through the compiler's parser,
+  # not just through cpp. "Syntax error" is here to catch this case.
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#ifdef __STDC__
+# include <limits.h>
+#else
+# include <assert.h>
+#endif
+		     Syntax error
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+
+else
+  # Broken: fails on valid input.
+continue
+fi
+rm -f conftest.err conftest.$ac_ext
+
+  # OK, works on sane cases.  Now check whether nonexistent headers
+  # can be detected and how.
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <ac_nonexistent.h>
+_ACEOF
+if ac_fn_c_try_cpp "$LINENO"; then :
+  # Broken: success on invalid input.
+continue
+else
+  # Passes both tests.
+ac_preproc_ok=:
+break
+fi
+rm -f conftest.err conftest.$ac_ext
+
+done
+# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+rm -f conftest.err conftest.$ac_ext
+if $ac_preproc_ok; then :
+
+else
+  { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+as_fn_error "C preprocessor \"$CPP\" fails sanity check
+See \`config.log' for more details." "$LINENO" 5; }
+fi
+
+ac_ext=c
+ac_cpp='$CPP $CPPFLAGS'
+ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ac_compiler_gnu=$ac_cv_c_compiler_gnu
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5
+$as_echo_n "checking for grep that handles long lines and -e... " >&6; }
+if test "${ac_cv_path_GREP+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -z "$GREP"; then
+  ac_path_GREP_found=false
+  # Loop through the user's path and test for each of PROGNAME-LIST
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_prog in grep ggrep; do
+    for ac_exec_ext in '' $ac_executable_extensions; do
+      ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext"
+      { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue
+# Check for GNU ac_path_GREP and select it if it is found.
+  # Check for GNU $ac_path_GREP
+case `"$ac_path_GREP" --version 2>&1` in
+*GNU*)
+  ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;;
+*)
+  ac_count=0
+  $as_echo_n 0123456789 >"conftest.in"
+  while :
+  do
+    cat "conftest.in" "conftest.in" >"conftest.tmp"
+    mv "conftest.tmp" "conftest.in"
+    cp "conftest.in" "conftest.nl"
+    $as_echo 'GREP' >> "conftest.nl"
+    "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+    as_fn_arith $ac_count + 1 && ac_count=$as_val
+    if test $ac_count -gt ${ac_path_GREP_max-0}; then
+      # Best one so far, save it but keep looking for a better one
+      ac_cv_path_GREP="$ac_path_GREP"
+      ac_path_GREP_max=$ac_count
+    fi
+    # 10*(2^10) chars as input seems more than enough
+    test $ac_count -gt 10 && break
+  done
+  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+      $ac_path_GREP_found && break 3
+    done
+  done
+  done
+IFS=$as_save_IFS
+  if test -z "$ac_cv_path_GREP"; then
+    as_fn_error "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+  fi
+else
+  ac_cv_path_GREP=$GREP
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5
+$as_echo "$ac_cv_path_GREP" >&6; }
+ GREP="$ac_cv_path_GREP"
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5
+$as_echo_n "checking for egrep... " >&6; }
+if test "${ac_cv_path_EGREP+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  if echo a | $GREP -E '(a|b)' >/dev/null 2>&1
+   then ac_cv_path_EGREP="$GREP -E"
+   else
+     if test -z "$EGREP"; then
+  ac_path_EGREP_found=false
+  # Loop through the user's path and test for each of PROGNAME-LIST
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_prog in egrep; do
+    for ac_exec_ext in '' $ac_executable_extensions; do
+      ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext"
+      { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue
+# Check for GNU ac_path_EGREP and select it if it is found.
+  # Check for GNU $ac_path_EGREP
+case `"$ac_path_EGREP" --version 2>&1` in
+*GNU*)
+  ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;;
+*)
+  ac_count=0
+  $as_echo_n 0123456789 >"conftest.in"
+  while :
+  do
+    cat "conftest.in" "conftest.in" >"conftest.tmp"
+    mv "conftest.tmp" "conftest.in"
+    cp "conftest.in" "conftest.nl"
+    $as_echo 'EGREP' >> "conftest.nl"
+    "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+    as_fn_arith $ac_count + 1 && ac_count=$as_val
+    if test $ac_count -gt ${ac_path_EGREP_max-0}; then
+      # Best one so far, save it but keep looking for a better one
+      ac_cv_path_EGREP="$ac_path_EGREP"
+      ac_path_EGREP_max=$ac_count
+    fi
+    # 10*(2^10) chars as input seems more than enough
+    test $ac_count -gt 10 && break
+  done
+  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+esac
+
+      $ac_path_EGREP_found && break 3
+    done
+  done
+  done
+IFS=$as_save_IFS
+  if test -z "$ac_cv_path_EGREP"; then
+    as_fn_error "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+  fi
+else
+  ac_cv_path_EGREP=$EGREP
+fi
+
+   fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5
+$as_echo "$ac_cv_path_EGREP" >&6; }
+ EGREP="$ac_cv_path_EGREP"
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5
+$as_echo_n "checking for ANSI C header files... " >&6; }
+if test "${ac_cv_header_stdc+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <float.h>
+
+int
+main ()
+{
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_cv_header_stdc=yes
+else
+  ac_cv_header_stdc=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+
+if test $ac_cv_header_stdc = yes; then
+  # SunOS 4.x string.h does not declare mem*, contrary to ANSI.
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <string.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+  $EGREP "memchr" >/dev/null 2>&1; then :
+
+else
+  ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+  # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI.
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <stdlib.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+  $EGREP "free" >/dev/null 2>&1; then :
+
+else
+  ac_cv_header_stdc=no
+fi
+rm -f conftest*
+
+fi
+
+if test $ac_cv_header_stdc = yes; then
+  # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi.
+  if test "$cross_compiling" = yes; then :
+  :
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <ctype.h>
+#include <stdlib.h>
+#if ((' ' & 0x0FF) == 0x020)
+# define ISLOWER(c) ('a' <= (c) && (c) <= 'z')
+# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c))
+#else
+# define ISLOWER(c) \
+		   (('a' <= (c) && (c) <= 'i') \
+		     || ('j' <= (c) && (c) <= 'r') \
+		     || ('s' <= (c) && (c) <= 'z'))
+# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c))
+#endif
+
+#define XOR(e, f) (((e) && !(f)) || (!(e) && (f)))
+int
+main ()
+{
+  int i;
+  for (i = 0; i < 256; i++)
+    if (XOR (islower (i), ISLOWER (i))
+	|| toupper (i) != TOUPPER (i))
+      return 2;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_run "$LINENO"; then :
+
+else
+  ac_cv_header_stdc=no
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+  conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+fi
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5
+$as_echo "$ac_cv_header_stdc" >&6; }
+if test $ac_cv_header_stdc = yes; then
+
+$as_echo "#define STDC_HEADERS 1" >>confdefs.h
+
+fi
+
+# On IRIX 5.3, sys/types and inttypes.h are conflicting.
+for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \
+		  inttypes.h stdint.h unistd.h
+do :
+  as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
+ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default
+"
+eval as_val=\$$as_ac_Header
+   if test "x$as_val" = x""yes; then :
+  cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+for ac_header in stdlib.h string.h unistd.h fcntl.h
+do :
+  as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
+ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default"
+eval as_val=\$$as_ac_Header
+   if test "x$as_val" = x""yes; then :
+  cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+
+done
+
+
+#check for HADOOP_HOME
+if test "$with_home" != ""
+then
+cat >>confdefs.h <<_ACEOF
+#define HADOOP_HOME "$with_home"
+_ACEOF
+
+fi
+
+# Checks for typedefs, structures, and compiler characteristics.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for an ANSI C-conforming const" >&5
+$as_echo_n "checking for an ANSI C-conforming const... " >&6; }
+if test "${ac_cv_c_const+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+int
+main ()
+{
+/* FIXME: Include the comments suggested by Paul. */
+#ifndef __cplusplus
+  /* Ultrix mips cc rejects this.  */
+  typedef int charset[2];
+  const charset cs;
+  /* SunOS 4.1.1 cc rejects this.  */
+  char const *const *pcpcc;
+  char **ppc;
+  /* NEC SVR4.0.2 mips cc rejects this.  */
+  struct point {int x, y;};
+  static struct point const zero = {0,0};
+  /* AIX XL C 1.02.0.0 rejects this.
+     It does not let you subtract one const X* pointer from another in
+     an arm of an if-expression whose if-part is not a constant
+     expression */
+  const char *g = "string";
+  pcpcc = &g + (g ? g-g : 0);
+  /* HPUX 7.0 cc rejects these. */
+  ++pcpcc;
+  ppc = (char**) pcpcc;
+  pcpcc = (char const *const *) ppc;
+  { /* SCO 3.2v4 cc rejects this.  */
+    char *t;
+    char const *s = 0 ? (char *) 0 : (char const *) 0;
+
+    *t++ = 0;
+    if (s) return 0;
+  }
+  { /* Someone thinks the Sun supposedly-ANSI compiler will reject this.  */
+    int x[] = {25, 17};
+    const int *foo = &x[0];
+    ++foo;
+  }
+  { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */
+    typedef const int *iptr;
+    iptr p = 0;
+    ++p;
+  }
+  { /* AIX XL C 1.02.0.0 rejects this saying
+       "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */
+    struct s { int j; const int *ap[3]; };
+    struct s *b; b->j = 5;
+  }
+  { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */
+    const int foo = 10;
+    if (!foo) return 0;
+  }
+  return !cs[0] && !zero.x;
+#endif
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_cv_c_const=yes
+else
+  ac_cv_c_const=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_const" >&5
+$as_echo "$ac_cv_c_const" >&6; }
+if test $ac_cv_c_const = no; then
+
+$as_echo "#define const /**/" >>confdefs.h
+
+fi
+
+ac_fn_c_check_type "$LINENO" "pid_t" "ac_cv_type_pid_t" "$ac_includes_default"
+if test "x$ac_cv_type_pid_t" = x""yes; then :
+
+else
+
+cat >>confdefs.h <<_ACEOF
+#define pid_t int
+_ACEOF
+
+fi
+
+ac_fn_c_check_type "$LINENO" "mode_t" "ac_cv_type_mode_t" "$ac_includes_default"
+if test "x$ac_cv_type_mode_t" = x""yes; then :
+
+else
+
+cat >>confdefs.h <<_ACEOF
+#define mode_t int
+_ACEOF
+
+fi
+
+ac_fn_c_check_type "$LINENO" "size_t" "ac_cv_type_size_t" "$ac_includes_default"
+if test "x$ac_cv_type_size_t" = x""yes; then :
+
+else
+
+cat >>confdefs.h <<_ACEOF
+#define size_t unsigned int
+_ACEOF
+
+fi
+
+
+# Checks for library functions.
+for ac_header in stdlib.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "stdlib.h" "ac_cv_header_stdlib_h" "$ac_includes_default"
+if test "x$ac_cv_header_stdlib_h" = x""yes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_STDLIB_H 1
+_ACEOF
+
+fi
+
+done
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU libc compatible malloc" >&5
+$as_echo_n "checking for GNU libc compatible malloc... " >&6; }
+if test "${ac_cv_func_malloc_0_nonnull+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test "$cross_compiling" = yes; then :
+  ac_cv_func_malloc_0_nonnull=no
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#if defined STDC_HEADERS || defined HAVE_STDLIB_H
+# include <stdlib.h>
+#else
+char *malloc ();
+#endif
+
+int
+main ()
+{
+return ! malloc (0);
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_run "$LINENO"; then :
+  ac_cv_func_malloc_0_nonnull=yes
+else
+  ac_cv_func_malloc_0_nonnull=no
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+  conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_malloc_0_nonnull" >&5
+$as_echo "$ac_cv_func_malloc_0_nonnull" >&6; }
+if test $ac_cv_func_malloc_0_nonnull = yes; then :
+
+$as_echo "#define HAVE_MALLOC 1" >>confdefs.h
+
+else
+  $as_echo "#define HAVE_MALLOC 0" >>confdefs.h
+
+   case " $LIBOBJS " in
+  *" malloc.$ac_objext "* ) ;;
+  *) LIBOBJS="$LIBOBJS malloc.$ac_objext"
+ ;;
+esac
+
+
+$as_echo "#define malloc rpl_malloc" >>confdefs.h
+
+fi
+
+
+for ac_header in stdlib.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "stdlib.h" "ac_cv_header_stdlib_h" "$ac_includes_default"
+if test "x$ac_cv_header_stdlib_h" = x""yes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_STDLIB_H 1
+_ACEOF
+
+fi
+
+done
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU libc compatible realloc" >&5
+$as_echo_n "checking for GNU libc compatible realloc... " >&6; }
+if test "${ac_cv_func_realloc_0_nonnull+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test "$cross_compiling" = yes; then :
+  ac_cv_func_realloc_0_nonnull=no
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#if defined STDC_HEADERS || defined HAVE_STDLIB_H
+# include <stdlib.h>
+#else
+char *realloc ();
+#endif
+
+int
+main ()
+{
+return ! realloc (0, 0);
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_run "$LINENO"; then :
+  ac_cv_func_realloc_0_nonnull=yes
+else
+  ac_cv_func_realloc_0_nonnull=no
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+  conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_realloc_0_nonnull" >&5
+$as_echo "$ac_cv_func_realloc_0_nonnull" >&6; }
+if test $ac_cv_func_realloc_0_nonnull = yes; then :
+
+$as_echo "#define HAVE_REALLOC 1" >>confdefs.h
+
+else
+  $as_echo "#define HAVE_REALLOC 0" >>confdefs.h
+
+   case " $LIBOBJS " in
+  *" realloc.$ac_objext "* ) ;;
+  *) LIBOBJS="$LIBOBJS realloc.$ac_objext"
+ ;;
+esac
+
+
+$as_echo "#define realloc rpl_realloc" >>confdefs.h
+
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for uid_t in sys/types.h" >&5
+$as_echo_n "checking for uid_t in sys/types.h... " >&6; }
+if test "${ac_cv_type_uid_t+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <sys/types.h>
+
+_ACEOF
+if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
+  $EGREP "uid_t" >/dev/null 2>&1; then :
+  ac_cv_type_uid_t=yes
+else
+  ac_cv_type_uid_t=no
+fi
+rm -f conftest*
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_type_uid_t" >&5
+$as_echo "$ac_cv_type_uid_t" >&6; }
+if test $ac_cv_type_uid_t = no; then
+
+$as_echo "#define uid_t int" >>confdefs.h
+
+
+$as_echo "#define gid_t int" >>confdefs.h
+
+fi
+
+for ac_header in unistd.h
+do :
+  ac_fn_c_check_header_mongrel "$LINENO" "unistd.h" "ac_cv_header_unistd_h" "$ac_includes_default"
+if test "x$ac_cv_header_unistd_h" = x""yes; then :
+  cat >>confdefs.h <<_ACEOF
+#define HAVE_UNISTD_H 1
+_ACEOF
+
+fi
+
+done
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for working chown" >&5
+$as_echo_n "checking for working chown... " >&6; }
+if test "${ac_cv_func_chown_works+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test "$cross_compiling" = yes; then :
+  ac_cv_func_chown_works=no
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+$ac_includes_default
+#include <fcntl.h>
+
+int
+main ()
+{
+  char *f = "conftest.chown";
+  struct stat before, after;
+
+  if (creat (f, 0600) < 0)
+    return 1;
+  if (stat (f, &before) < 0)
+    return 1;
+  if (chown (f, (uid_t) -1, (gid_t) -1) == -1)
+    return 1;
+  if (stat (f, &after) < 0)
+    return 1;
+  return ! (before.st_uid == after.st_uid && before.st_gid == after.st_gid);
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_run "$LINENO"; then :
+  ac_cv_func_chown_works=yes
+else
+  ac_cv_func_chown_works=no
+fi
+rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
+  conftest.$ac_objext conftest.beam conftest.$ac_ext
+fi
+
+rm -f conftest.chown
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_chown_works" >&5
+$as_echo "$ac_cv_func_chown_works" >&6; }
+if test $ac_cv_func_chown_works = yes; then
+
+$as_echo "#define HAVE_CHOWN 1" >>confdefs.h
+
+fi
+
+for ac_func in strerror memset mkdir rmdir strdup
+do :
+  as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
+ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
+eval as_val=\$$as_ac_var
+   if test "x$as_val" = x""yes; then :
+  cat >>confdefs.h <<_ACEOF
+#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
+_ACEOF
+
+fi
+done
+
+
+ac_config_files="$ac_config_files Makefile"
+
+cat >confcache <<\_ACEOF
+# This file is a shell script that caches the results of configure
+# tests run on this system so they can be shared between configure
+# scripts and configure runs, see configure's option --config-cache.
+# It is not useful on other systems.  If it contains results you don't
+# want to keep, you may remove or edit it.
+#
+# config.status only pays attention to the cache file if you give it
+# the --recheck option to rerun configure.
+#
+# `ac_cv_env_foo' variables (set or unset) will be overridden when
+# loading this file, other *unset* `ac_cv_foo' will be assigned the
+# following values.
+
+_ACEOF
+
+# The following way of writing the cache mishandles newlines in values,
+# but we know of no workaround that is simple, portable, and efficient.
+# So, we kill variables containing newlines.
+# Ultrix sh set writes to stderr and can't be redirected directly,
+# and sets the high bit in the cache file unless we assign to the vars.
+(
+  for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do
+    eval ac_val=\$$ac_var
+    case $ac_val in #(
+    *${as_nl}*)
+      case $ac_var in #(
+      *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
+$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
+      esac
+      case $ac_var in #(
+      _ | IFS | as_nl) ;; #(
+      BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
+      *) { eval $ac_var=; unset $ac_var;} ;;
+      esac ;;
+    esac
+  done
+
+  (set) 2>&1 |
+    case $as_nl`(ac_space=' '; set) 2>&1` in #(
+    *${as_nl}ac_space=\ *)
+      # `set' does not quote correctly, so add quotes: double-quote
+      # substitution turns \\\\ into \\, and sed turns \\ into \.
+      sed -n \
+	"s/'/'\\\\''/g;
+	  s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
+      ;; #(
+    *)
+      # `set' quotes correctly as required by POSIX, so do not add quotes.
+      sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
+      ;;
+    esac |
+    sort
+) |
+  sed '
+     /^ac_cv_env_/b end
+     t clear
+     :clear
+     s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/
+     t end
+     s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
+     :end' >>confcache
+if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
+  if test -w "$cache_file"; then
+    test "x$cache_file" != "x/dev/null" &&
+      { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5
+$as_echo "$as_me: updating cache $cache_file" >&6;}
+    cat confcache >$cache_file
+  else
+    { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5
+$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;}
+  fi
+fi
+rm -f confcache
+
+test "x$prefix" = xNONE && prefix=$ac_default_prefix
+# Let make expand exec_prefix.
+test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
+
+DEFS=-DHAVE_CONFIG_H
+
+ac_libobjs=
+ac_ltlibobjs=
+for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
+  # 1. Remove the extension, and $U if already installed.
+  ac_script='s/\$U\././;s/\.o$//;s/\.obj$//'
+  ac_i=`$as_echo "$ac_i" | sed "$ac_script"`
+  # 2. Prepend LIBOBJDIR.  When used with automake>=1.10 LIBOBJDIR
+  #    will be set to the directory where LIBOBJS objects are built.
+  as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext"
+  as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo'
+done
+LIBOBJS=$ac_libobjs
+
+LTLIBOBJS=$ac_ltlibobjs
+
+
+
+: ${CONFIG_STATUS=./config.status}
+ac_write_fail=0
+ac_clean_files_save=$ac_clean_files
+ac_clean_files="$ac_clean_files $CONFIG_STATUS"
+{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5
+$as_echo "$as_me: creating $CONFIG_STATUS" >&6;}
+as_write_fail=0
+cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1
+#! $SHELL
+# Generated by $as_me.
+# Run this file to recreate the current configuration.
+# Compiler output produced by configure, useful for debugging
+# configure, is in config.log if it exists.
+
+debug=false
+ac_cs_recheck=false
+ac_cs_silent=false
+
+SHELL=\${CONFIG_SHELL-$SHELL}
+export SHELL
+_ASEOF
+cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1
+## -------------------- ##
+## M4sh Initialization. ##
+## -------------------- ##
+
+# Be more Bourne compatible
+DUALCASE=1; export DUALCASE # for MKS sh
+if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
+  emulate sh
+  NULLCMD=:
+  # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+  # is contrary to our usage.  Disable this feature.
+  alias -g '${1+"$@"}'='"$@"'
+  setopt NO_GLOB_SUBST
+else
+  case `(set -o) 2>/dev/null` in #(
+  *posix*) :
+    set -o posix ;; #(
+  *) :
+     ;;
+esac
+fi
+
+
+as_nl='
+'
+export as_nl
+# Printing a long string crashes Solaris 7 /usr/bin/printf.
+as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
+as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
+# Prefer a ksh shell builtin over an external printf program on Solaris,
+# but without wasting forks for bash or zsh.
+if test -z "$BASH_VERSION$ZSH_VERSION" \
+    && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
+  as_echo='print -r --'
+  as_echo_n='print -rn --'
+elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
+  as_echo='printf %s\n'
+  as_echo_n='printf %s'
+else
+  if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
+    as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
+    as_echo_n='/usr/ucb/echo -n'
+  else
+    as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
+    as_echo_n_body='eval
+      arg=$1;
+      case $arg in #(
+      *"$as_nl"*)
+	expr "X$arg" : "X\\(.*\\)$as_nl";
+	arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
+      esac;
+      expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
+    '
+    export as_echo_n_body
+    as_echo_n='sh -c $as_echo_n_body as_echo'
+  fi
+  export as_echo_body
+  as_echo='sh -c $as_echo_body as_echo'
+fi
+
+# The user is always right.
+if test "${PATH_SEPARATOR+set}" != set; then
+  PATH_SEPARATOR=:
+  (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+    (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+      PATH_SEPARATOR=';'
+  }
+fi
+
+
+# IFS
+# We need space, tab and new line, in precisely that order.  Quoting is
+# there to prevent editors from complaining about space-tab.
+# (If _AS_PATH_WALK were called with IFS unset, it would disable word
+# splitting by setting IFS to empty value.)
+IFS=" ""	$as_nl"
+
+# Find who we are.  Look in the path if we contain no directory separator.
+case $0 in #((
+  *[\\/]* ) as_myself=$0 ;;
+  *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+  done
+IFS=$as_save_IFS
+
+     ;;
+esac
+# We did not find ourselves, most probably we were run as `sh COMMAND'
+# in which case we are not to be found in the path.
+if test "x$as_myself" = x; then
+  as_myself=$0
+fi
+if test ! -f "$as_myself"; then
+  $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+  exit 1
+fi
+
+# Unset variables that we do not need and which cause bugs (e.g. in
+# pre-3.0 UWIN ksh).  But do not cause bugs in bash 2.01; the "|| exit 1"
+# suppresses any "Segmentation fault" message there.  '((' could
+# trigger a bug in pdksh 5.2.14.
+for as_var in BASH_ENV ENV MAIL MAILPATH
+do eval test x\${$as_var+set} = xset \
+  && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
+done
+PS1='$ '
+PS2='> '
+PS4='+ '
+
+# NLS nuisances.
+LC_ALL=C
+export LC_ALL
+LANGUAGE=C
+export LANGUAGE
+
+# CDPATH.
+(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+
+
+# as_fn_error ERROR [LINENO LOG_FD]
+# ---------------------------------
+# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
+# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
+# script with status $?, using 1 if that was 0.
+as_fn_error ()
+{
+  as_status=$?; test $as_status -eq 0 && as_status=1
+  if test "$3"; then
+    as_lineno=${as_lineno-"$2"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+    $as_echo "$as_me:${as_lineno-$LINENO}: error: $1" >&$3
+  fi
+  $as_echo "$as_me: error: $1" >&2
+  as_fn_exit $as_status
+} # as_fn_error
+
+
+# as_fn_set_status STATUS
+# -----------------------
+# Set $? to STATUS, without forking.
+as_fn_set_status ()
+{
+  return $1
+} # as_fn_set_status
+
+# as_fn_exit STATUS
+# -----------------
+# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
+as_fn_exit ()
+{
+  set +e
+  as_fn_set_status $1
+  exit $1
+} # as_fn_exit
+
+# as_fn_unset VAR
+# ---------------
+# Portably unset VAR.
+as_fn_unset ()
+{
+  { eval $1=; unset $1;}
+}
+as_unset=as_fn_unset
+# as_fn_append VAR VALUE
+# ----------------------
+# Append the text in VALUE to the end of the definition contained in VAR. Take
+# advantage of any shell optimizations that allow amortized linear growth over
+# repeated appends, instead of the typical quadratic growth present in naive
+# implementations.
+if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
+  eval 'as_fn_append ()
+  {
+    eval $1+=\$2
+  }'
+else
+  as_fn_append ()
+  {
+    eval $1=\$$1\$2
+  }
+fi # as_fn_append
+
+# as_fn_arith ARG...
+# ------------------
+# Perform arithmetic evaluation on the ARGs, and store the result in the
+# global $as_val. Take advantage of shells that can avoid forks. The arguments
+# must be portable across $(()) and expr.
+if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
+  eval 'as_fn_arith ()
+  {
+    as_val=$(( $* ))
+  }'
+else
+  as_fn_arith ()
+  {
+    as_val=`expr "$@" || test $? -eq 1`
+  }
+fi # as_fn_arith
+
+
+if expr a : '\(a\)' >/dev/null 2>&1 &&
+   test "X`expr 00001 : '.*\(...\)'`" = X001; then
+  as_expr=expr
+else
+  as_expr=false
+fi
+
+if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
+  as_basename=basename
+else
+  as_basename=false
+fi
+
+if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
+  as_dirname=dirname
+else
+  as_dirname=false
+fi
+
+as_me=`$as_basename -- "$0" ||
+$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+	 X"$0" : 'X\(//\)$' \| \
+	 X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X/"$0" |
+    sed '/^.*\/\([^/][^/]*\)\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\/\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\/\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
+
+# Avoid depending upon Character Ranges.
+as_cr_letters='abcdefghijklmnopqrstuvwxyz'
+as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
+as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+as_cr_digits='0123456789'
+as_cr_alnum=$as_cr_Letters$as_cr_digits
+
+ECHO_C= ECHO_N= ECHO_T=
+case `echo -n x` in #(((((
+-n*)
+  case `echo 'xy\c'` in
+  *c*) ECHO_T='	';;	# ECHO_T is single tab character.
+  xy)  ECHO_C='\c';;
+  *)   echo `echo ksh88 bug on AIX 6.1` > /dev/null
+       ECHO_T='	';;
+  esac;;
+*)
+  ECHO_N='-n';;
+esac
+
+rm -f conf$$ conf$$.exe conf$$.file
+if test -d conf$$.dir; then
+  rm -f conf$$.dir/conf$$.file
+else
+  rm -f conf$$.dir
+  mkdir conf$$.dir 2>/dev/null
+fi
+if (echo >conf$$.file) 2>/dev/null; then
+  if ln -s conf$$.file conf$$ 2>/dev/null; then
+    as_ln_s='ln -s'
+    # ... but there are two gotchas:
+    # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
+    # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
+    # In both cases, we have to default to `cp -p'.
+    ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
+      as_ln_s='cp -p'
+  elif ln conf$$.file conf$$ 2>/dev/null; then
+    as_ln_s=ln
+  else
+    as_ln_s='cp -p'
+  fi
+else
+  as_ln_s='cp -p'
+fi
+rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
+rmdir conf$$.dir 2>/dev/null
+
+
+# as_fn_mkdir_p
+# -------------
+# Create "$as_dir" as a directory, including parents if necessary.
+as_fn_mkdir_p ()
+{
+
+  case $as_dir in #(
+  -*) as_dir=./$as_dir;;
+  esac
+  test -d "$as_dir" || eval $as_mkdir_p || {
+    as_dirs=
+    while :; do
+      case $as_dir in #(
+      *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+      *) as_qdir=$as_dir;;
+      esac
+      as_dirs="'$as_qdir' $as_dirs"
+      as_dir=`$as_dirname -- "$as_dir" ||
+$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+	 X"$as_dir" : 'X\(//\)[^/]' \| \
+	 X"$as_dir" : 'X\(//\)$' \| \
+	 X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$as_dir" |
+    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)[^/].*/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
+      test -d "$as_dir" && break
+    done
+    test -z "$as_dirs" || eval "mkdir $as_dirs"
+  } || test -d "$as_dir" || as_fn_error "cannot create directory $as_dir"
+
+
+} # as_fn_mkdir_p
+if mkdir -p . 2>/dev/null; then
+  as_mkdir_p='mkdir -p "$as_dir"'
+else
+  test -d ./-p && rmdir ./-p
+  as_mkdir_p=false
+fi
+
+if test -x / >/dev/null 2>&1; then
+  as_test_x='test -x'
+else
+  if ls -dL / >/dev/null 2>&1; then
+    as_ls_L_option=L
+  else
+    as_ls_L_option=
+  fi
+  as_test_x='
+    eval sh -c '\''
+      if test -d "$1"; then
+	test -d "$1/.";
+      else
+	case $1 in #(
+	-*)set "./$1";;
+	esac;
+	case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #((
+	???[sx]*):;;*)false;;esac;fi
+    '\'' sh
+  '
+fi
+as_executable_p=$as_test_x
+
+# Sed expression to map a string onto a valid CPP name.
+as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
+
+# Sed expression to map a string onto a valid variable name.
+as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
+
+
+exec 6>&1
+## ----------------------------------- ##
+## Main body of $CONFIG_STATUS script. ##
+## ----------------------------------- ##
+_ASEOF
+test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# Save the log message, to keep $0 and so on meaningful, and to
+# report actual input values of CONFIG_FILES etc. instead of their
+# values after options handling.
+ac_log="
+This file was extended by runAs $as_me 0.1, which was
+generated by GNU Autoconf 2.65.  Invocation command line was
+
+  CONFIG_FILES    = $CONFIG_FILES
+  CONFIG_HEADERS  = $CONFIG_HEADERS
+  CONFIG_LINKS    = $CONFIG_LINKS
+  CONFIG_COMMANDS = $CONFIG_COMMANDS
+  $ $0 $@
+
+on `(hostname || uname -n) 2>/dev/null | sed 1q`
+"
+
+_ACEOF
+
+case $ac_config_files in *"
+"*) set x $ac_config_files; shift; ac_config_files=$*;;
+esac
+
+case $ac_config_headers in *"
+"*) set x $ac_config_headers; shift; ac_config_headers=$*;;
+esac
+
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+# Files that config.status was made for.
+config_files="$ac_config_files"
+config_headers="$ac_config_headers"
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+ac_cs_usage="\
+\`$as_me' instantiates files and other configuration actions
+from templates according to the current configuration.  Unless the files
+and actions are specified as TAGs, all are instantiated by default.
+
+Usage: $0 [OPTION]... [TAG]...
+
+  -h, --help       print this help, then exit
+  -V, --version    print version number and configuration settings, then exit
+      --config     print configuration, then exit
+  -q, --quiet, --silent
+                   do not print progress messages
+  -d, --debug      don't remove temporary files
+      --recheck    update $as_me by reconfiguring in the same conditions
+      --file=FILE[:TEMPLATE]
+                   instantiate the configuration file FILE
+      --header=FILE[:TEMPLATE]
+                   instantiate the configuration header FILE
+
+Configuration files:
+$config_files
+
+Configuration headers:
+$config_headers
+
+Report bugs to the package provider."
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
+ac_cs_version="\\
+runAs config.status 0.1
+configured by $0, generated by GNU Autoconf 2.65,
+  with options \\"\$ac_cs_config\\"
+
+Copyright (C) 2009 Free Software Foundation, Inc.
+This config.status script is free software; the Free Software Foundation
+gives unlimited permission to copy, distribute and modify it."
+
+ac_pwd='$ac_pwd'
+srcdir='$srcdir'
+test -n "\$AWK" || AWK=awk
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# The default lists apply if the user does not specify any file.
+ac_need_defaults=:
+while test $# != 0
+do
+  case $1 in
+  --*=*)
+    ac_option=`expr "X$1" : 'X\([^=]*\)='`
+    ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
+    ac_shift=:
+    ;;
+  *)
+    ac_option=$1
+    ac_optarg=$2
+    ac_shift=shift
+    ;;
+  esac
+
+  case $ac_option in
+  # Handling of the options.
+  -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
+    ac_cs_recheck=: ;;
+  --version | --versio | --versi | --vers | --ver | --ve | --v | -V )
+    $as_echo "$ac_cs_version"; exit ;;
+  --config | --confi | --conf | --con | --co | --c )
+    $as_echo "$ac_cs_config"; exit ;;
+  --debug | --debu | --deb | --de | --d | -d )
+    debug=: ;;
+  --file | --fil | --fi | --f )
+    $ac_shift
+    case $ac_optarg in
+    *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+    esac
+    as_fn_append CONFIG_FILES " '$ac_optarg'"
+    ac_need_defaults=false;;
+  --header | --heade | --head | --hea )
+    $ac_shift
+    case $ac_optarg in
+    *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+    esac
+    as_fn_append CONFIG_HEADERS " '$ac_optarg'"
+    ac_need_defaults=false;;
+  --he | --h)
+    # Conflict between --help and --header
+    as_fn_error "ambiguous option: \`$1'
+Try \`$0 --help' for more information.";;
+  --help | --hel | -h )
+    $as_echo "$ac_cs_usage"; exit ;;
+  -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+  | -silent | --silent | --silen | --sile | --sil | --si | --s)
+    ac_cs_silent=: ;;
+
+  # This is an error.
+  -*) as_fn_error "unrecognized option: \`$1'
+Try \`$0 --help' for more information." ;;
+
+  *) as_fn_append ac_config_targets " $1"
+     ac_need_defaults=false ;;
+
+  esac
+  shift
+done
+
+ac_configure_extra_args=
+
+if $ac_cs_silent; then
+  exec 6>/dev/null
+  ac_configure_extra_args="$ac_configure_extra_args --silent"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+if \$ac_cs_recheck; then
+  set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
+  shift
+  \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6
+  CONFIG_SHELL='$SHELL'
+  export CONFIG_SHELL
+  exec "\$@"
+fi
+
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+exec 5>>config.log
+{
+  echo
+  sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
+## Running $as_me. ##
+_ASBOX
+  $as_echo "$ac_log"
+} >&5
+
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+
+# Handling of arguments.
+for ac_config_target in $ac_config_targets
+do
+  case $ac_config_target in
+    "runAs.h") CONFIG_HEADERS="$CONFIG_HEADERS runAs.h" ;;
+    "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
+
+  *) as_fn_error "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
+  esac
+done
+
+
+# If the user did not use the arguments to specify the items to instantiate,
+# then the envvar interface is used.  Set only those that are not.
+# We use the long form for the default assignment because of an extremely
+# bizarre bug on SunOS 4.1.3.
+if $ac_need_defaults; then
+  test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files
+  test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers
+fi
+
+# Have a temporary directory for convenience.  Make it in the build tree
+# simply because there is no reason against having it here, and in addition,
+# creating and moving files from /tmp can sometimes cause problems.
+# Hook for its removal unless debugging.
+# Note that there is a small window in which the directory will not be cleaned:
+# after its creation but before its name has been assigned to `$tmp'.
+$debug ||
+{
+  tmp=
+  trap 'exit_status=$?
+  { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status
+' 0
+  trap 'as_fn_exit 1' 1 2 13 15
+}
+# Create a (secure) tmp directory for tmp files.
+
+{
+  tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
+  test -n "$tmp" && test -d "$tmp"
+}  ||
+{
+  tmp=./conf$$-$RANDOM
+  (umask 077 && mkdir "$tmp")
+} || as_fn_error "cannot create a temporary directory in ." "$LINENO" 5
+
+# Set up the scripts for CONFIG_FILES section.
+# No need to generate them if there are no CONFIG_FILES.
+# This happens for instance with `./config.status config.h'.
+if test -n "$CONFIG_FILES"; then
+
+
+ac_cr=`echo X | tr X '\015'`
+# On cygwin, bash can eat \r inside `` if the user requested igncr.
+# But we know of no other shell where ac_cr would be empty at this
+# point, so we can use a bashism as a fallback.
+if test "x$ac_cr" = x; then
+  eval ac_cr=\$\'\\r\'
+fi
+ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' </dev/null 2>/dev/null`
+if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then
+  ac_cs_awk_cr='\r'
+else
+  ac_cs_awk_cr=$ac_cr
+fi
+
+echo 'BEGIN {' >"$tmp/subs1.awk" &&
+_ACEOF
+
+
+{
+  echo "cat >conf$$subs.awk <<_ACEOF" &&
+  echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' &&
+  echo "_ACEOF"
+} >conf$$subs.sh ||
+  as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5
+ac_delim_num=`echo "$ac_subst_vars" | grep -c '$'`
+ac_delim='%!_!# '
+for ac_last_try in false false false false false :; do
+  . ./conf$$subs.sh ||
+    as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5
+
+  ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X`
+  if test $ac_delim_n = $ac_delim_num; then
+    break
+  elif $ac_last_try; then
+    as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5
+  else
+    ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
+  fi
+done
+rm -f conf$$subs.sh
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+cat >>"\$tmp/subs1.awk" <<\\_ACAWK &&
+_ACEOF
+sed -n '
+h
+s/^/S["/; s/!.*/"]=/
+p
+g
+s/^[^!]*!//
+:repl
+t repl
+s/'"$ac_delim"'$//
+t delim
+:nl
+h
+s/\(.\{148\}\)..*/\1/
+t more1
+s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/
+p
+n
+b repl
+:more1
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t nl
+:delim
+h
+s/\(.\{148\}\)..*/\1/
+t more2
+s/["\\]/\\&/g; s/^/"/; s/$/"/
+p
+b
+:more2
+s/["\\]/\\&/g; s/^/"/; s/$/"\\/
+p
+g
+s/.\{148\}//
+t delim
+' <conf$$subs.awk | sed '
+/^[^""]/{
+  N
+  s/\n//
+}
+' >>$CONFIG_STATUS || ac_write_fail=1
+rm -f conf$$subs.awk
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+_ACAWK
+cat >>"\$tmp/subs1.awk" <<_ACAWK &&
+  for (key in S) S_is_set[key] = 1
+  FS = ""
+
+}
+{
+  line = $ 0
+  nfields = split(line, field, "@")
+  substed = 0
+  len = length(field[1])
+  for (i = 2; i < nfields; i++) {
+    key = field[i]
+    keylen = length(key)
+    if (S_is_set[key]) {
+      value = S[key]
+      line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3)
+      len += length(value) + length(field[++i])
+      substed = 1
+    } else
+      len += 1 + keylen
+  }
+
+  print line
+}
+
+_ACAWK
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then
+  sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g"
+else
+  cat
+fi < "$tmp/subs1.awk" > "$tmp/subs.awk" \
+  || as_fn_error "could not setup config files machinery" "$LINENO" 5
+_ACEOF
+
+# VPATH may cause trouble with some makes, so we remove $(srcdir),
+# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and
+# trailing colons and then remove the whole line if VPATH becomes empty
+# (actually we leave an empty line to preserve line numbers).
+if test "x$srcdir" = x.; then
+  ac_vpsub='/^[	 ]*VPATH[	 ]*=/{
+s/:*\$(srcdir):*/:/
+s/:*\${srcdir}:*/:/
+s/:*@srcdir@:*/:/
+s/^\([^=]*=[	 ]*\):*/\1/
+s/:*$//
+s/^[^=]*=[	 ]*$//
+}'
+fi
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+fi # test -n "$CONFIG_FILES"
+
+# Set up the scripts for CONFIG_HEADERS section.
+# No need to generate them if there are no CONFIG_HEADERS.
+# This happens for instance with `./config.status Makefile'.
+if test -n "$CONFIG_HEADERS"; then
+cat >"$tmp/defines.awk" <<\_ACAWK ||
+BEGIN {
+_ACEOF
+
+# Transform confdefs.h into an awk script `defines.awk', embedded as
+# here-document in config.status, that substitutes the proper values into
+# config.h.in to produce config.h.
+
+# Create a delimiter string that does not exist in confdefs.h, to ease
+# handling of long lines.
+ac_delim='%!_!# '
+for ac_last_try in false false :; do
+  ac_t=`sed -n "/$ac_delim/p" confdefs.h`
+  if test -z "$ac_t"; then
+    break
+  elif $ac_last_try; then
+    as_fn_error "could not make $CONFIG_HEADERS" "$LINENO" 5
+  else
+    ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
+  fi
+done
+
+# For the awk script, D is an array of macro values keyed by name,
+# likewise P contains macro parameters if any.  Preserve backslash
+# newline sequences.
+
+ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]*
+sed -n '
+s/.\{148\}/&'"$ac_delim"'/g
+t rset
+:rset
+s/^[	 ]*#[	 ]*define[	 ][	 ]*/ /
+t def
+d
+:def
+s/\\$//
+t bsnl
+s/["\\]/\\&/g
+s/^ \('"$ac_word_re"'\)\(([^()]*)\)[	 ]*\(.*\)/P["\1"]="\2"\
+D["\1"]=" \3"/p
+s/^ \('"$ac_word_re"'\)[	 ]*\(.*\)/D["\1"]=" \2"/p
+d
+:bsnl
+s/["\\]/\\&/g
+s/^ \('"$ac_word_re"'\)\(([^()]*)\)[	 ]*\(.*\)/P["\1"]="\2"\
+D["\1"]=" \3\\\\\\n"\\/p
+t cont
+s/^ \('"$ac_word_re"'\)[	 ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p
+t cont
+d
+:cont
+n
+s/.\{148\}/&'"$ac_delim"'/g
+t clear
+:clear
+s/\\$//
+t bsnlc
+s/["\\]/\\&/g; s/^/"/; s/$/"/p
+d
+:bsnlc
+s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p
+b cont
+' <confdefs.h | sed '
+s/'"$ac_delim"'/"\\\
+"/g' >>$CONFIG_STATUS || ac_write_fail=1
+
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+  for (key in D) D_is_set[key] = 1
+  FS = ""
+}
+/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ {
+  line = \$ 0
+  split(line, arg, " ")
+  if (arg[1] == "#") {
+    defundef = arg[2]
+    mac1 = arg[3]
+  } else {
+    defundef = substr(arg[1], 2)
+    mac1 = arg[2]
+  }
+  split(mac1, mac2, "(") #)
+  macro = mac2[1]
+  prefix = substr(line, 1, index(line, defundef) - 1)
+  if (D_is_set[macro]) {
+    # Preserve the white space surrounding the "#".
+    print prefix "define", macro P[macro] D[macro]
+    next
+  } else {
+    # Replace #undef with comments.  This is necessary, for example,
+    # in the case of _POSIX_SOURCE, which is predefined and required
+    # on some systems where configure will not decide to define it.
+    if (defundef == "undef") {
+      print "/*", prefix defundef, macro, "*/"
+      next
+    }
+  }
+}
+{ print }
+_ACAWK
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+  as_fn_error "could not setup config headers machinery" "$LINENO" 5
+fi # test -n "$CONFIG_HEADERS"
+
+
+eval set X "  :F $CONFIG_FILES  :H $CONFIG_HEADERS    "
+shift
+for ac_tag
+do
+  case $ac_tag in
+  :[FHLC]) ac_mode=$ac_tag; continue;;
+  esac
+  case $ac_mode$ac_tag in
+  :[FHL]*:*);;
+  :L* | :C*:*) as_fn_error "invalid tag \`$ac_tag'" "$LINENO" 5;;
+  :[FH]-) ac_tag=-:-;;
+  :[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
+  esac
+  ac_save_IFS=$IFS
+  IFS=:
+  set x $ac_tag
+  IFS=$ac_save_IFS
+  shift
+  ac_file=$1
+  shift
+
+  case $ac_mode in
+  :L) ac_source=$1;;
+  :[FH])
+    ac_file_inputs=
+    for ac_f
+    do
+      case $ac_f in
+      -) ac_f="$tmp/stdin";;
+      *) # Look for the file first in the build tree, then in the source tree
+	 # (if the path is not absolute).  The absolute path cannot be DOS-style,
+	 # because $ac_f cannot contain `:'.
+	 test -f "$ac_f" ||
+	   case $ac_f in
+	   [\\/$]*) false;;
+	   *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
+	   esac ||
+	   as_fn_error "cannot find input file: \`$ac_f'" "$LINENO" 5;;
+      esac
+      case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac
+      as_fn_append ac_file_inputs " '$ac_f'"
+    done
+
+    # Let's still pretend it is `configure' which instantiates (i.e., don't
+    # use $as_me), people would be surprised to read:
+    #    /* config.h.  Generated by config.status.  */
+    configure_input='Generated from '`
+	  $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g'
+	`' by configure.'
+    if test x"$ac_file" != x-; then
+      configure_input="$ac_file.  $configure_input"
+      { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5
+$as_echo "$as_me: creating $ac_file" >&6;}
+    fi
+    # Neutralize special characters interpreted by sed in replacement strings.
+    case $configure_input in #(
+    *\&* | *\|* | *\\* )
+       ac_sed_conf_input=`$as_echo "$configure_input" |
+       sed 's/[\\\\&|]/\\\\&/g'`;; #(
+    *) ac_sed_conf_input=$configure_input;;
+    esac
+
+    case $ac_tag in
+    *:-:* | *:-) cat >"$tmp/stdin" \
+      || as_fn_error "could not create $ac_file" "$LINENO" 5 ;;
+    esac
+    ;;
+  esac
+
+  ac_dir=`$as_dirname -- "$ac_file" ||
+$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+	 X"$ac_file" : 'X\(//\)[^/]' \| \
+	 X"$ac_file" : 'X\(//\)$' \| \
+	 X"$ac_file" : 'X\(/\)' \| . 2>/dev/null ||
+$as_echo X"$ac_file" |
+    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)[^/].*/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\/\)$/{
+	    s//\1/
+	    q
+	  }
+	  /^X\(\/\).*/{
+	    s//\1/
+	    q
+	  }
+	  s/.*/./; q'`
+  as_dir="$ac_dir"; as_fn_mkdir_p
+  ac_builddir=.
+
+case "$ac_dir" in
+.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+*)
+  ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
+  # A ".." for each directory in $ac_dir_suffix.
+  ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+  case $ac_top_builddir_sub in
+  "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+  *)  ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+  esac ;;
+esac
+ac_abs_top_builddir=$ac_pwd
+ac_abs_builddir=$ac_pwd$ac_dir_suffix
+# for backward compatibility:
+ac_top_builddir=$ac_top_build_prefix
+
+case $srcdir in
+  .)  # We are building in place.
+    ac_srcdir=.
+    ac_top_srcdir=$ac_top_builddir_sub
+    ac_abs_top_srcdir=$ac_pwd ;;
+  [\\/]* | ?:[\\/]* )  # Absolute name.
+    ac_srcdir=$srcdir$ac_dir_suffix;
+    ac_top_srcdir=$srcdir
+    ac_abs_top_srcdir=$srcdir ;;
+  *) # Relative name.
+    ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
+    ac_top_srcdir=$ac_top_build_prefix$srcdir
+    ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
+esac
+ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+
+
+  case $ac_mode in
+  :F)
+  #
+  # CONFIG_FILE
+  #
+
+_ACEOF
+
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+# If the template does not know about datarootdir, expand it.
+# FIXME: This hack should be removed a few years after 2.60.
+ac_datarootdir_hack=; ac_datarootdir_seen=
+ac_sed_dataroot='
+/datarootdir/ {
+  p
+  q
+}
+/@datadir@/p
+/@docdir@/p
+/@infodir@/p
+/@localedir@/p
+/@mandir@/p'
+case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in
+*datarootdir*) ac_datarootdir_seen=yes;;
+*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*)
+  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
+$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
+_ACEOF
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+  ac_datarootdir_hack='
+  s&@datadir@&$datadir&g
+  s&@docdir@&$docdir&g
+  s&@infodir@&$infodir&g
+  s&@localedir@&$localedir&g
+  s&@mandir@&$mandir&g
+  s&\\\${datarootdir}&$datarootdir&g' ;;
+esac
+_ACEOF
+
+# Neutralize VPATH when `$srcdir' = `.'.
+# Shell code in configure.ac might set extrasub.
+# FIXME: do we really want to maintain this feature?
+cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ac_sed_extra="$ac_vpsub
+$extrasub
+_ACEOF
+cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+:t
+/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
+s|@configure_input@|$ac_sed_conf_input|;t t
+s&@top_builddir@&$ac_top_builddir_sub&;t t
+s&@top_build_prefix@&$ac_top_build_prefix&;t t
+s&@srcdir@&$ac_srcdir&;t t
+s&@abs_srcdir@&$ac_abs_srcdir&;t t
+s&@top_srcdir@&$ac_top_srcdir&;t t
+s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t
+s&@builddir@&$ac_builddir&;t t
+s&@abs_builddir@&$ac_abs_builddir&;t t
+s&@abs_top_builddir@&$ac_abs_top_builddir&;t t
+$ac_datarootdir_hack
+"
+eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$tmp/subs.awk" >$tmp/out \
+  || as_fn_error "could not create $ac_file" "$LINENO" 5
+
+test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
+  { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } &&
+  { ac_out=`sed -n '/^[	 ]*datarootdir[	 ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } &&
+  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined.  Please make sure it is defined." >&5
+$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+which seems to be undefined.  Please make sure it is defined." >&2;}
+
+  rm -f "$tmp/stdin"
+  case $ac_file in
+  -) cat "$tmp/out" && rm -f "$tmp/out";;
+  *) rm -f "$ac_file" && mv "$tmp/out" "$ac_file";;
+  esac \
+  || as_fn_error "could not create $ac_file" "$LINENO" 5
+ ;;
+  :H)
+  #
+  # CONFIG_HEADER
+  #
+  if test x"$ac_file" != x-; then
+    {
+      $as_echo "/* $configure_input  */" \
+      && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs"
+    } >"$tmp/config.h" \
+      || as_fn_error "could not create $ac_file" "$LINENO" 5
+    if diff "$ac_file" "$tmp/config.h" >/dev/null 2>&1; then
+      { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5
+$as_echo "$as_me: $ac_file is unchanged" >&6;}
+    else
+      rm -f "$ac_file"
+      mv "$tmp/config.h" "$ac_file" \
+	|| as_fn_error "could not create $ac_file" "$LINENO" 5
+    fi
+  else
+    $as_echo "/* $configure_input  */" \
+      && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" \
+      || as_fn_error "could not create -" "$LINENO" 5
+  fi
+ ;;
+
+
+  esac
+
+done # for ac_tag
+
+
+as_fn_exit 0
+_ACEOF
+ac_clean_files=$ac_clean_files_save
+
+test $ac_write_fail = 0 ||
+  as_fn_error "write failure creating $CONFIG_STATUS" "$LINENO" 5
+
+
+# configure is writing to config.log, and then calls config.status.
+# config.status does its own redirection, appending to config.log.
+# Unfortunately, on DOS this fails, as config.log is still kept open
+# by configure, so config.status won't be able to write to it; its
+# output is simply discarded.  So we exec the FD to /dev/null,
+# effectively closing config.log, so it can be properly (re)opened and
+# appended to by config.status.  When coming back to configure, we
+# need to make the FD available again.
+if test "$no_create" != yes; then
+  ac_cs_success=:
+  ac_config_status_args=
+  test "$silent" = yes &&
+    ac_config_status_args="$ac_config_status_args --quiet"
+  exec 5>/dev/null
+  $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false
+  exec 5>>config.log
+  # Use ||, not &&, to avoid exiting from the if with $? = 1, which
+  # would make configure fail if this is the last instruction.
+  $ac_cs_success || as_fn_exit $?
+fi
+if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5
+$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;}
+fi
+
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for stdbool.h that conforms to C99" >&5
+$as_echo_n "checking for stdbool.h that conforms to C99... " >&6; }
+if test "${ac_cv_header_stdbool_h+set}" = set; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+#include <stdbool.h>
+#ifndef bool
+ "error: bool is not defined"
+#endif
+#ifndef false
+ "error: false is not defined"
+#endif
+#if false
+ "error: false is not 0"
+#endif
+#ifndef true
+ "error: true is not defined"
+#endif
+#if true != 1
+ "error: true is not 1"
+#endif
+#ifndef __bool_true_false_are_defined
+ "error: __bool_true_false_are_defined is not defined"
+#endif
+
+	struct s { _Bool s: 1; _Bool t; } s;
+
+	char a[true == 1 ? 1 : -1];
+	char b[false == 0 ? 1 : -1];
+	char c[__bool_true_false_are_defined == 1 ? 1 : -1];
+	char d[(bool) 0.5 == true ? 1 : -1];
+	bool e = &s;
+	char f[(_Bool) 0.0 == false ? 1 : -1];
+	char g[true];
+	char h[sizeof (_Bool)];
+	char i[sizeof s.t];
+	enum { j = false, k = true, l = false * true, m = true * 256 };
+	/* The following fails for
+	   HP aC++/ANSI C B3910B A.05.55 [Dec 04 2003]. */
+	_Bool n[m];
+	char o[sizeof n == m * sizeof n[0] ? 1 : -1];
+	char p[-1 - (_Bool) 0 < 0 && -1 - (bool) 0 < 0 ? 1 : -1];
+#	if defined __xlc__ || defined __GNUC__
+	 /* Catch a bug in IBM AIX xlc compiler version 6.0.0.0
+	    reported by James Lemley on 2005-10-05; see
+	    http://lists.gnu.org/archive/html/bug-coreutils/2005-10/msg00086.html
+	    This test is not quite right, since xlc is allowed to
+	    reject this program, as the initializer for xlcbug is
+	    not one of the forms that C requires support for.
+	    However, doing the test right would require a runtime
+	    test, and that would make cross-compilation harder.
+	    Let us hope that IBM fixes the xlc bug, and also adds
+	    support for this kind of constant expression.  In the
+	    meantime, this test will reject xlc, which is OK, since
+	    our stdbool.h substitute should suffice.  We also test
+	    this with GCC, where it should work, to detect more
+	    quickly whether someone messes up the test in the
+	    future.  */
+	 char digs[] = "0123456789";
+	 int xlcbug = 1 / (&(digs + 5)[-2 + (bool) 1] == &digs[4] ? 1 : -1);
+#	endif
+	/* Catch a bug in an HP-UX C compiler.  See
+	   http://gcc.gnu.org/ml/gcc-patches/2003-12/msg02303.html
+	   http://lists.gnu.org/archive/html/bug-coreutils/2005-11/msg00161.html
+	 */
+	_Bool q = true;
+	_Bool *pq = &q;
+
+int
+main ()
+{
+
+	*pq |= q;
+	*pq |= ! q;
+	/* Refer to every declared value, to avoid compiler optimizations.  */
+	return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l
+		+ !m + !n + !o + !p + !q + !pq);
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_compile "$LINENO"; then :
+  ac_cv_header_stdbool_h=yes
+else
+  ac_cv_header_stdbool_h=no
+fi
+rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdbool_h" >&5
+$as_echo "$ac_cv_header_stdbool_h" >&6; }
+ac_fn_c_check_type "$LINENO" "_Bool" "ac_cv_type__Bool" "$ac_includes_default"
+if test "x$ac_cv_type__Bool" = x""yes; then :
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE__BOOL 1
+_ACEOF
+
+
+fi
+
+if test $ac_cv_header_stdbool_h = yes; then
+
+$as_echo "#define HAVE_STDBOOL_H 1" >>confdefs.h
+
+fi
+
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5
+$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; }
+set x ${MAKE-make}
+ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'`
+if { as_var=ac_cv_prog_make_${ac_make}_set; eval "test \"\${$as_var+set}\" = set"; }; then :
+  $as_echo_n "(cached) " >&6
+else
+  cat >conftest.make <<\_ACEOF
+SHELL = /bin/sh
+all:
+	@echo '@@@%%%=$(MAKE)=@@@%%%'
+_ACEOF
+# GNU make sometimes prints "make[1]: Entering...", which would confuse us.
+case `${MAKE-make} -f conftest.make 2>/dev/null` in
+  *@@@%%%=?*=@@@%%%*)
+    eval ac_cv_prog_make_${ac_make}_set=yes;;
+  *)
+    eval ac_cv_prog_make_${ac_make}_set=no;;
+esac
+rm -f conftest.make
+fi
+if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+$as_echo "yes" >&6; }
+  SET_MAKE=
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+  SET_MAKE="MAKE=${MAKE-make}"
+fi
+

+ 65 - 0
src/test/system/c++/runAs/configure.ac

@@ -0,0 +1,65 @@
+#                                               -*- Autoconf -*-
+# Process this file with autoconf to produce a configure script.
+
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+AC_PREREQ(2.59)
+AC_INIT([runAs],[0.1])
+
+#changing default prefix value to empty string, so that binary does not
+#gets installed within system
+AC_PREFIX_DEFAULT(.)
+
+#add new arguments --with-home
+AC_ARG_WITH(home,[--with-home path to hadoop home dir])
+AC_CONFIG_SRCDIR([main.c])
+AC_CONFIG_HEADER([runAs.h])
+
+# Checks for programs.
+AC_PROG_CC
+
+# Checks for libraries.
+
+# Checks for header files.
+AC_HEADER_STDC
+AC_CHECK_HEADERS([stdlib.h string.h unistd.h fcntl.h])
+
+#check for HADOOP_HOME
+if test "$with_home" != ""
+then
+AC_DEFINE_UNQUOTED(HADOOP_HOME,"$with_home")
+fi
+
+# Checks for typedefs, structures, and compiler characteristics.
+AC_C_CONST
+AC_TYPE_PID_T
+AC_TYPE_MODE_T
+AC_TYPE_SIZE_T
+
+# Checks for library functions.
+AC_FUNC_MALLOC
+AC_FUNC_REALLOC
+AC_FUNC_CHOWN
+AC_CHECK_FUNCS([strerror memset mkdir rmdir strdup])
+
+AC_CONFIG_FILES([Makefile])
+AC_OUTPUT
+
+AC_HEADER_STDBOOL
+AC_PROG_MAKE_SET

+ 59 - 0
src/test/system/c++/runAs/main.c

@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "runAs.h"
+
+/**
+ * The binary would be accepting the command of following format:
+ * cluster-controller user hostname hadoop-daemon.sh-command
+ */
+int main(int argc, char **argv) {
+  int errorcode;
+  char *user;
+  char *hostname;
+  char *command;
+  struct passwd user_detail;
+  int i = 1;
+  /*
+   * Minimum number of arguments required for the binary to perform.
+   */
+  if (argc < 4) {
+    fprintf(stderr, "Invalid number of arguments passed to the binary\n");
+    return INVALID_ARGUMENT_NUMER;
+  }
+
+  user = argv[1];
+  if (user == NULL) {
+    fprintf(stderr, "Invalid user name\n");
+    return INVALID_USER_NAME;
+  }
+
+  if (getuserdetail(user, &user_detail) != 0) {
+    fprintf(stderr, "Invalid user name\n");
+    return INVALID_USER_NAME;
+  }
+
+  if (user_detail.pw_gid == 0 || user_detail.pw_uid == 0) {
+      fprintf(stderr, "Cannot run tasks as super user\n");
+      return SUPER_USER_NOT_ALLOWED_TO_RUN_COMMANDS;
+  }
+
+  hostname = argv[2];
+  command = argv[3];
+  return process_controller_command(user, hostname, command);
+}

+ 111 - 0
src/test/system/c++/runAs/runAs.c

@@ -0,0 +1,111 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "runAs.h"
+
+/*
+ * Function to get the user details populated given a user name. 
+ */
+int getuserdetail(char *user, struct passwd *user_detail) {
+  struct passwd *tempPwdPtr;
+  int size = sysconf(_SC_GETPW_R_SIZE_MAX);
+  char pwdbuffer[size];
+  if ((getpwnam_r(user, user_detail, pwdbuffer, size, &tempPwdPtr)) != 0) {
+    fprintf(stderr, "Invalid user provided to getpwnam\n");
+    return -1;
+  }
+  return 0;
+}
+
+/**
+ * Function to switch the user identity and set the appropriate 
+ * group control as the user specified in the argument.
+ */
+int switchuser(char *user) {
+  //populate the user details
+  struct passwd user_detail;
+  if ((getuserdetail(user, &user_detail)) != 0) {
+    return INVALID_USER_NAME;
+  }
+  //set the right supplementary groups for the user.
+  if (initgroups(user_detail.pw_name, user_detail.pw_gid) != 0) {
+    fprintf(stderr, "Init groups call for the user : %s failed\n",
+        user_detail.pw_name);
+    return INITGROUPS_FAILED;
+  }
+  errno = 0;
+  //switch the group.
+  setgid(user_detail.pw_gid);
+  if (errno != 0) {
+    fprintf(stderr, "Setgid for the user : %s failed\n", user_detail.pw_name);
+    return SETUID_OPER_FAILED;
+  }
+  errno = 0;
+  //swith the user
+  setuid(user_detail.pw_uid);
+  if (errno != 0) {
+    fprintf(stderr, "Setuid for the user : %s failed\n", user_detail.pw_name);
+    return SETUID_OPER_FAILED;
+  }
+  errno = 0;
+  //set the effective user id.
+  seteuid(user_detail.pw_uid);
+  if (errno != 0) {
+    fprintf(stderr, "Seteuid for the user : %s failed\n", user_detail.pw_name);
+    return SETUID_OPER_FAILED;
+  }
+  return 0;
+}
+
+/*
+ * Top level method which processes a cluster management
+ * command.
+ */
+int process_cluster_command(char * user,  char * node , char *command) {
+  char *finalcommandstr;
+  int len;
+  int errorcode = 0;
+  if (strncmp(command, "", strlen(command)) == 0) {
+    fprintf(stderr, "Invalid command passed\n");
+    return INVALID_COMMAND_PASSED;
+  }
+  len = STRLEN + strlen(command);
+  finalcommandstr = (char *) malloc((len + 1) * sizeof(char));
+  snprintf(finalcommandstr, len, SCRIPT_DIR_PATTERN, HADOOP_HOME,
+      command);
+  finalcommandstr[len + 1] = '\0';
+  errorcode = switchuser(user);
+  if (errorcode != 0) {
+    fprintf(stderr, "switch user failed\n");
+    return errorcode;
+  }
+  errno = 0;
+  execlp(SSH_COMMAND, SSH_COMMAND, node, finalcommandstr, NULL);
+  if (errno != 0) {
+    fprintf(stderr, "Excelp failed dude to : %s\n", strerror(errno));
+  }
+  return 0;
+}
+
+/*
+ * Process cluster controller command the API exposed to the 
+ * main in order to execute the cluster commands.
+ */
+int process_controller_command(char *user, char * node, char *command) {
+  return process_cluster_command(user, node, command);
+}

+ 59 - 0
src/test/system/c++/runAs/runAs.h.in

@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <pwd.h>
+#include <assert.h>
+#include <getopt.h>
+#include <grp.h>
+
+/*
+* List of possible error codes.
+*/
+enum errorcodes {
+  INVALID_ARGUMENT_NUMER = 1,
+  INVALID_USER_NAME, //2
+  SUPER_USER_NOT_ALLOWED_TO_RUN_COMMANDS, //3
+  INITGROUPS_FAILED, //4
+  SETUID_OPER_FAILED, //5
+  INVALID_COMMAND_PASSED, //6
+};
+
+#undef HADOOP_HOME
+
+#define SSH_COMMAND "ssh"
+
+#define SCRIPT_DIR_PATTERN "%s/bin/hadoop-daemon.sh %s" //%s to be substituded 
+
+#define STRLEN strlen(SCRIPT_DIR_PATTERN) + strlen(HADOOP_HOME)
+
+/*
+ * Function to get the user details populated given a user name. 
+ */
+int getuserdetails(char *user, struct passwd *user_detail);
+
+ /*
+ * Process cluster controller command the API exposed to the 
+ * main in order to execute the cluster commands.
+ */
+int process_controller_command(char *user, char *node, char *command);

+ 52 - 0
src/test/system/conf/hadoop-policy-system-test.xml

@@ -0,0 +1,52 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<configuration>
+<!--
+  This is Herriot specific protocols. This section shouldn't be present in
+  a production cluster configuration. This file needs to be linked up to the
+  main conf/hadoop-policy.xml in the deployment process
+-->
+  <property>
+    <name>security.daemon.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for DaemonProtocol, extended by all other
+    Herriot RPC protocols.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.nn.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for NNProtocol, used by the
+    Herriot AbstractDaemonCluster's implementations to connect to a remote
+    NameNode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.dn.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for DNProtocol, used by the
+    Herriot AbstractDaemonCluster's implementations to connect to a remote
+    DataNode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.tt.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for TTProtocol, used by the
+    Herriot AbstractDaemonCluster's implementations to connect to a remote
+    TaskTracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+</configuration>

+ 128 - 0
src/test/system/conf/system-test.xml

@@ -0,0 +1,128 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+<!-- Mandatory properties that are to be set and uncommented before running the tests -->
+
+<!-- $(YINST_ROOT) variable needs to be replace with something corporate
+neutral at the forward-port stage -->
+<property>
+  <name>test.system.hdrc.hadoophome</name>
+  <value>$(YINST_ROOT)/share/hadoop-current</value>
+  <description> This is the path to the home directory of the hadoop deployment.
+  </description>
+</property>
+<property>
+  <name>test.system.hdrc.hadoopconfdir</name>
+  <value>$(YINST_ROOT)/conf/hadoop</value>
+  <description> This is the path to the configuration directory of the hadoop
+  cluster that is deployed.
+  </description>
+</property>
+
+<property>
+  <name>test.system.hdrc.tt.hostfile</name>
+  <value>slaves.localcopy.txt</value>
+  <description> File name containing the hostnames where the TaskTrackers are running.
+  </description>
+</property>
+<property>
+  <name>test.system.hdrc.dn.hostfile</name>
+  <value>slaves.localcopy.txt</value>
+  <description> File name containing the hostnames where the DataNodes are running.
+  </description>
+</property>
+<property>
+  <name>test.system.mr.clusterprocess.impl.class</name>
+  <value>org.apache.hadoop.mapreduce.test.system.MRCluster$MRProcessManager</value>
+  <description>
+  Cluster process manager for the Mapreduce subsystem of the cluster. The value
+  org.apache.hadoop.mapreduce.test.system.MRCluster$MultiMRProcessManager can
+  be used to enable multi user support.
+  </description>
+</property>
+<property>
+  <name>test.system.hdfs.clusterprocess.impl.class</name>
+  <value>org.apache.hadoop.hdfs.test.system.HDFSCluster$HDFSProcessManager</value>
+  <description>
+  Cluster process manager for the Hdfs subsystem of the cluster. The value
+  org.apache.hadoop.hdfs.test.system.HDFSCluster$MultiUserHDFSProcessManager can
+  be used to enable multi user support.
+  </description>
+</property>
+<property>
+   <name>test.system.hdrc.deployed.scripts.dir</name>
+   <value>./src/test/system/scripts</value>
+   <description>
+     This directory hosts the scripts in the deployed location where
+     the system test client runs.
+   </description>
+</property>
+<property>
+  <name>test.system.hdrc.hadoopnewconfdir</name>
+  <value>$(TO_DO_GLOBAL_TMP_DIR)</value>
+  <description>
+  The directory where the new config files will be copied to in all
+  the clusters is pointed out this directory. 
+  </description>
+</property>
+
+<!-- Mandatory keys to be set for the multi user support to be enabled.  -->
+
+ <property>
+  <name>test.system.mr.clusterprocess.impl.class</name>
+  <value>org.apache.hadoop.mapreduce.test.system.MRCluster$MultiMRProcessManager</value>
+  <description>
+    Enabling multi user based cluster process manger.
+  </description>
+</property>
+<property>
+  <name>test.system.hdfs.clusterprocess.impl.class</name>
+  <value>org.apache.hadoop.hdfs.test.system.HDFSCluster$MultiUserHDFSProcessManager</value>
+  <description>
+    Enabling multi user based cluster process manger.
+  </description>
+</property>
+<property>
+  <name>test.system.hdrc.multi-user.binary.path</name>
+  <value>$(YINST_ROOT)/conf/hadoop/runAs</value>
+  <description>
+    Local file system path on gate way to cluster-controller binary including the binary name.
+    To build the binary the following commands need to be executed:
+     % ant run-as -Drun-as.hadoop.home.dir=(HADOOP_HOME of setup cluster)
+     % cp build-fi/system/c++-build/runAs $(test.system.hdrc.multi-user.binary.path)
+    Location of the cluster is important security precaution.
+    The binary should be owned by root and test user group permission should be set such a
+    way that it can be executed by binary. Example usage would be:
+     % sudo chown root binary
+     % sudo chmod 6511 binary
+    Change permission appropriately to make it more secure.
+  </description>
+</property>
+<property>
+  <name>test.system.hdrc.multi-user.managinguser.namenode</name>
+  <value>hdfs</value>
+  <description>
+    User value for managing the particular daemon, please note that these user should be
+    present on gateways also, an example configuration for the above would be 
+    key name = test.system.hdrc.multi-user.managinguser.jobtracker
+    key value = guest
+    Please note the daemon names are all lower case, corresponding to hadoop-daemon.sh command.
+  </description>
+</property>
+<property>
+  <name>test.system.hdrc.multi-user.managinguser.datanode</name>
+  <value>hdfs</value>
+</property>
+<property>
+  <name>test.system.hdrc.multi-user.managinguser.jobtracker</name>
+  <value>mapred</value>
+</property>
+<property>
+  <name>test.system.hdrc.multi-user.managinguser.tasktracker</name>
+  <value>mapred</value>
+</property>
+ 
+</configuration>

+ 69 - 0
src/test/system/java/org/apache/hadoop/hdfs/TestHL040.java

@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+import java.util.Collection;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.test.system.DNClient;
+import org.apache.hadoop.hdfs.test.system.HDFSCluster;
+import org.apache.hadoop.hdfs.test.system.NNClient;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestHL040 {
+  private HDFSCluster cluster = null;
+  private static final Log LOG = LogFactory.getLog(TestHL040.class);
+
+  public TestHL040() throws Exception {
+  }
+
+  @Before
+  public void setupUp() throws Exception {
+    cluster = HDFSCluster.createCluster(new Configuration());
+    cluster.setUp();
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    cluster.tearDown();
+  }
+
+  @Test
+  public void testConnect() throws IOException {
+    LOG.info("Staring TestHL040: connecting to the HDFSCluster ");
+    LOG.info("================ Getting namenode info ================");
+    NNClient dfsMaster = cluster.getNNClient();
+    LOG.info("Process info of namenode " + dfsMaster.getHostName() + " is: " +
+        dfsMaster.getProcessInfo());
+    LOG.info("================ Getting datanode info ================");
+    Collection<DNClient> clients = cluster.getDNClients();
+    for (DNClient dnC : clients) {
+      LOG.info("Process info of datanode " + dnC.getHostName() + " is: " +
+          dnC.getProcessInfo());
+      Assert.assertNotNull("Datanode process info isn't suppose to be null",
+          dnC.getProcessInfo());
+    }
+  }
+}

+ 82 - 0
src/test/system/java/org/apache/hadoop/hdfs/test/system/DNClient.java

@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.test.system;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.test.system.process.RemoteProcess;
+
+/**
+ * Datanode client for system tests. Assumption of the class is that the
+ * configuration key is set for the configuration key : {@code
+ * DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY} is set, only the port portion of
+ * the address is used.
+ */
+public class DNClient extends HDFSDaemonClient<DNProtocol> {
+
+  DNProtocol proxy;
+
+  public DNClient(Configuration conf, RemoteProcess process) throws IOException {
+    super(conf, process);
+  }
+
+  @Override
+  public void connect() throws IOException {
+    if (isConnected()) {
+      return;
+    }
+    String sockAddrStr = getConf().get(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY);
+    if (sockAddrStr == null) {
+      throw new IllegalArgumentException("Datenode IPC address is not set."
+          + "Check if " + DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY
+          + " is configured.");
+    }
+    String[] splits = sockAddrStr.split(":");
+    if (splits.length != 2) {
+      throw new IllegalArgumentException(
+          "Datanode IPC address is not correctly configured");
+    }
+    String port = splits[1];
+    String sockAddr = getHostName() + ":" + port;
+    InetSocketAddress bindAddr = NetUtils.createSocketAddr(sockAddr);
+    proxy = (DNProtocol) RPC.getProxy(DNProtocol.class, DNProtocol.versionID,
+        bindAddr, getConf());
+    setConnected(true);
+  }
+
+  @Override
+  public void disconnect() throws IOException {
+    RPC.stopProxy(proxy);
+    setConnected(false);
+  }
+
+  @Override
+  protected DNProtocol getProxy() {
+    return proxy;
+  }
+
+  public Configuration getDatanodeConfig() throws IOException {
+    return getProxy().getDaemonConf();
+  }
+}

+ 36 - 0
src/test/system/java/org/apache/hadoop/hdfs/test/system/DNProtocol.java

@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.test.system;
+
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.test.system.DaemonProtocol;
+
+/**
+ * Client side API exposed from Datanode.
+ * Actual implementations are likely to be injected
+ *
+ * The protocol has to be annotated so KerberosInfo can be filled in during
+ * creation of a ipc.Client connection
+ */
+@KerberosInfo(
+    serverPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY)
+public interface DNProtocol extends DaemonProtocol {
+  public static final long versionID = 1L;
+}

+ 149 - 0
src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSCluster.java

@@ -0,0 +1,149 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.test.system;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Iterator;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.test.system.AbstractDaemonClient;
+import org.apache.hadoop.test.system.AbstractDaemonCluster;
+import org.apache.hadoop.test.system.process.ClusterProcessManager;
+import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster;
+import org.apache.hadoop.test.system.process.MultiUserHadoopDaemonRemoteCluster;
+import org.apache.hadoop.test.system.process.RemoteProcess;
+import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster.HadoopDaemonInfo;
+
+public class HDFSCluster extends AbstractDaemonCluster {
+
+  static {
+    Configuration.addDefaultResource("hdfs-site.xml");
+  }
+
+  private static final Log LOG = LogFactory.getLog(HDFSCluster.class);
+  public static final String CLUSTER_PROCESS_MGR_IMPL =
+    "test.system.hdfs.clusterprocess.impl.class";
+
+  private HDFSCluster(Configuration conf, ClusterProcessManager rCluster)
+    throws IOException {
+    super(conf, rCluster);
+  }
+
+  /**
+   * Key is used to to point to the file containing hostnames of tasktrackers
+   */
+  public static final String CONF_HADOOP_DN_HOSTFILE_NAME =
+    "test.system.hdrc.dn.hostfile";
+
+  private static List<HadoopDaemonInfo> hdfsDaemonInfos;
+
+  private static String nnHostName;
+  private static String DN_hostFileName;
+
+  protected enum Role {NN, DN}
+
+  @Override
+  protected AbstractDaemonClient
+    createClient(RemoteProcess process) throws IOException {
+    Enum<?> pRole = process.getRole();
+    if (Role.NN.equals(pRole)) {
+      return createNNClient(process);
+    } else if (Role.DN.equals(pRole)) {
+      return createDNClient(process);
+    } else throw new IOException("Role " + pRole +
+      " is not supported by HDFSCluster");
+  }
+
+  protected DNClient createDNClient(RemoteProcess dnDaemon) throws IOException {
+    return new DNClient(getConf(), dnDaemon);
+  }
+
+  protected NNClient createNNClient(RemoteProcess nnDaemon) throws IOException {
+    return new NNClient(getConf(), nnDaemon);
+  }
+
+  public NNClient getNNClient () {
+    Iterator<AbstractDaemonClient> iter = getDaemons().get(Role.NN).iterator();
+    return (NNClient) iter.next();
+  }
+
+  public List<DNClient> getDNClients () {
+    return (List) getDaemons().get(Role.DN);
+  }
+
+  public DNClient getDNClient (String hostname) {
+    for (DNClient dnC : getDNClients()) {
+      if (dnC.getHostName().equals(hostname))
+        return dnC;
+    }
+    return null;
+  }
+
+  public static class HDFSProcessManager extends HadoopDaemonRemoteCluster {
+    public HDFSProcessManager() {
+      super(hdfsDaemonInfos);
+    }
+  }
+
+  public static class MultiUserHDFSProcessManager
+      extends MultiUserHadoopDaemonRemoteCluster {
+    public MultiUserHDFSProcessManager() {
+      super(hdfsDaemonInfos);
+    }
+  }
+
+
+  public static HDFSCluster createCluster(Configuration conf) throws Exception {
+    conf.addResource("system-test.xml");
+    String sockAddrStr = FileSystem.getDefaultUri(conf).getAuthority();
+    if (sockAddrStr == null) {
+      throw new IllegalArgumentException("Namenode IPC address is not set");
+    }
+    String[] splits = sockAddrStr.split(":");
+    if (splits.length != 2) {
+      throw new IllegalArgumentException(
+          "Namenode report IPC is not correctly configured");
+    }
+    nnHostName = splits[0];
+    DN_hostFileName = conf.get(CONF_HADOOP_DN_HOSTFILE_NAME, "slaves");
+
+    hdfsDaemonInfos = new ArrayList<HadoopDaemonInfo>();
+    hdfsDaemonInfos.add(new HadoopDaemonInfo("namenode", 
+        Role.NN, Arrays.asList(new String[]{nnHostName})));
+    hdfsDaemonInfos.add(new HadoopDaemonInfo("datanode", 
+        Role.DN, DN_hostFileName));
+    
+    String implKlass = conf.get(CLUSTER_PROCESS_MGR_IMPL);
+    if (implKlass == null || implKlass.isEmpty()) {
+      implKlass = HDFSCluster.HDFSProcessManager.class.getName();
+    }
+    Class<ClusterProcessManager> klass =
+      (Class<ClusterProcessManager>) Class.forName(implKlass);
+    ClusterProcessManager clusterProcessMgr = klass.newInstance();
+    LOG.info("Created ClusterProcessManager as " + implKlass);
+    clusterProcessMgr.init(conf);
+    return new HDFSCluster(conf, clusterProcessMgr);
+  }
+}

+ 43 - 0
src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSDaemonClient.java

@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.test.system;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.test.system.AbstractDaemonClient;
+import org.apache.hadoop.test.system.DaemonProtocol;
+import org.apache.hadoop.test.system.process.RemoteProcess;
+
+public abstract class HDFSDaemonClient<PROXY extends DaemonProtocol>
+  extends AbstractDaemonClient<PROXY> {
+
+  public HDFSDaemonClient(Configuration conf, RemoteProcess process)
+      throws IOException {
+    super(conf, process);
+  }
+
+  public String[] getHDFSDataDirs() throws IOException {
+    return getProxy().getDaemonConf().getStrings("dfs.data.dir");
+  }
+
+  public String getHDFSNameDirs() throws IOException {
+    return getProxy().getDaemonConf().getStrings("dfs.name.dir")[0];
+  }
+}

+ 71 - 0
src/test/system/java/org/apache/hadoop/hdfs/test/system/NNClient.java

@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.test.system;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.test.system.process.RemoteProcess;
+
+public class NNClient extends HDFSDaemonClient<NNProtocol> {
+  
+  NNProtocol proxy;
+
+  public NNClient(Configuration conf, RemoteProcess process) throws IOException {
+    super(conf, process);
+  }
+
+  @Override
+  public void connect() throws IOException {
+    if (isConnected())
+      return;
+    String sockAddrStr = FileSystem.getDefaultUri(getConf()).getAuthority();
+    if (sockAddrStr == null) {
+      throw new IllegalArgumentException("Namenode IPC address is not set");
+    }
+    String[] splits = sockAddrStr.split(":");
+    if (splits.length != 2) {
+      throw new IllegalArgumentException(
+          "Namenode report IPC is not correctly configured");
+    }
+    String port = splits[1];
+    String sockAddr = getHostName() + ":" + port;
+
+    InetSocketAddress bindAddr = NetUtils.createSocketAddr(sockAddr);
+    proxy = (NNProtocol) RPC.getProxy(NNProtocol.class, NNProtocol.versionID,
+        bindAddr, getConf());
+    setConnected(true);
+  }
+
+  @Override
+  public void disconnect() throws IOException {
+    RPC.stopProxy(proxy);
+    setConnected(false);
+  }
+
+  @Override
+  protected NNProtocol getProxy() {
+    return proxy;
+  }
+}

+ 36 - 0
src/test/system/java/org/apache/hadoop/hdfs/test/system/NNProtocol.java

@@ -0,0 +1,36 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.test.system;
+
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.test.system.DaemonProtocol;
+
+/**
+ * Client side API exposed from Namenode.
+ * Actual implementations are likely to be injected
+ *
+ * The protocol has to be annotated so KerberosInfo can be filled in during
+ * creation of a ipc.Client connection
+ */
+@KerberosInfo(
+    serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
+public interface NNProtocol extends DaemonProtocol {
+  public static final long versionID = 1L;
+}

+ 18 - 5
src/test/system/java/org/apache/hadoop/mapred/TTTaskInfoImpl.java

@@ -37,13 +37,14 @@ abstract class TTTaskInfoImpl implements TTTaskInfo {
   Configuration conf;
   String user;
   boolean isTaskCleanupTask;
+  private String pid;
 
   public TTTaskInfoImpl() {
   }
 
   public TTTaskInfoImpl(boolean slotTaken, boolean wasKilled,
       TaskStatus status, Configuration conf, String user,
-      boolean isTaskCleanupTask) {
+      boolean isTaskCleanupTask, String pid) {
     super();
     this.slotTaken = slotTaken;
     this.wasKilled = wasKilled;
@@ -51,6 +52,7 @@ abstract class TTTaskInfoImpl implements TTTaskInfo {
     this.conf = conf;
     this.user = user;
     this.isTaskCleanupTask = isTaskCleanupTask;
+    this.pid = pid;
   }
 
   @Override
@@ -81,6 +83,11 @@ abstract class TTTaskInfoImpl implements TTTaskInfo {
     return isTaskCleanupTask;
   }
   
+  @Override
+  public String getPid() {
+    return pid;
+  }
+  
   @Override
   public void readFields(DataInput in) throws IOException {
     slotTaken = in.readBoolean();
@@ -89,6 +96,7 @@ abstract class TTTaskInfoImpl implements TTTaskInfo {
     conf.readFields(in);
     user = in.readUTF();
     isTaskCleanupTask = in.readBoolean();
+    pid = in.readUTF();
   }
 
   @Override
@@ -98,6 +106,11 @@ abstract class TTTaskInfoImpl implements TTTaskInfo {
     conf.write(out);
     out.writeUTF(user);
     out.writeBoolean(isTaskCleanupTask);
+    if (pid != null) {
+      out.writeUTF(pid);
+    } else {
+      out.writeUTF("");
+    }
     status.write(out);
   }
 
@@ -109,8 +122,8 @@ abstract class TTTaskInfoImpl implements TTTaskInfo {
 
     public MapTTTaskInfo(boolean slotTaken, boolean wasKilled,
         MapTaskStatus status, Configuration conf, String user,
-        boolean isTaskCleanup) {
-      super(slotTaken, wasKilled, status, conf, user, isTaskCleanup);
+        boolean isTaskCleanup,String pid) {
+      super(slotTaken, wasKilled, status, conf, user, isTaskCleanup, pid);
     }
 
     @Override
@@ -133,8 +146,8 @@ abstract class TTTaskInfoImpl implements TTTaskInfo {
 
     public ReduceTTTaskInfo(boolean slotTaken, boolean wasKilled,
         ReduceTaskStatus status, Configuration conf, String user,
-        boolean isTaskCleanup) {
-      super(slotTaken, wasKilled, status, conf, user, isTaskCleanup);
+        boolean isTaskCleanup, String pid) {
+      super(slotTaken, wasKilled, status, conf, user, isTaskCleanup, pid);
     }
 
     @Override

+ 153 - 14
src/test/system/java/org/apache/hadoop/mapred/TestCluster.java

@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.mapred;
 
+import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
 
 import junit.framework.Assert;
@@ -39,6 +40,8 @@ import org.apache.hadoop.mapreduce.test.system.TTClient;
 import org.apache.hadoop.mapreduce.test.system.TTInfo;
 import org.apache.hadoop.mapreduce.test.system.TTTaskInfo;
 import org.apache.hadoop.mapreduce.test.system.TaskInfo;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.test.system.AbstractDaemonClient;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
@@ -86,22 +89,41 @@ public class TestCluster {
     cluster.getJTClient().verifyJobHistory(rJob.getID());
   }
 
-  @Test
+  //@Test
   public void testFileStatus() throws Exception {
-    JTClient jt = cluster.getJTClient();
-    String dir = ".";
-    checkFileStatus(jt.getFileStatus(dir, true));
-    checkFileStatus(jt.listStatus(dir, false, true), dir);
-    for (TTClient tt : cluster.getTTClients()) {
-      String[] localDirs = tt.getMapredLocalDirs();
-      for (String localDir : localDirs) {
-        checkFileStatus(tt.listStatus(localDir, true, false), localDir);
-        checkFileStatus(tt.listStatus(localDir, true, true), localDir);
+    UserGroupInformation ugi =
+        UserGroupInformation.createRemoteUser(cluster
+            .getJTClient().getProxy().getDaemonUser());
+    ugi.doAs(new PrivilegedExceptionAction<Void>() {
+      @Override
+      public Void run() throws Exception {
+        MRCluster myCluster = null;
+        try {
+          myCluster = MRCluster.createCluster(cluster.getConf());
+          myCluster.connect();
+          JTClient jt = myCluster.getJTClient();
+          String dir = ".";
+          checkFileStatus(jt.getFileStatus(dir, true));
+          checkFileStatus(jt.listStatus(dir, false, true), dir);
+          for (TTClient tt : myCluster.getTTClients()) {
+            String[] localDirs = tt.getMapredLocalDirs();
+            for (String localDir : localDirs) {
+              checkFileStatus(tt.listStatus(localDir, true, false), localDir);
+              checkFileStatus(tt.listStatus(localDir, true, true), localDir);
+            }
+          }
+          String systemDir = jt.getClient().getSystemDir().toString();
+          checkFileStatus(jt.listStatus(systemDir, false, true), systemDir);
+          checkFileStatus(jt.listStatus(jt.getLogDir(), true, true), jt
+              .getLogDir());
+        } finally {
+          if (myCluster != null) {
+            myCluster.disconnect();
+          }
+        }
+        return null;
       }
-    }
-    String systemDir = jt.getClient().getSystemDir().toString();
-    checkFileStatus(jt.listStatus(systemDir, false, true), systemDir);
-    checkFileStatus(jt.listStatus(jt.getLogDir(), true, true), jt.getLogDir());
+    });
   }
 
   private void checkFileStatus(FileStatus[] fs, String path) {
@@ -149,6 +171,10 @@ public class TestCluster {
     LOG.info("Waiting till job starts running one map");
 
     TaskInfo[] myTaskInfos = wovenClient.getTaskInfo(id);
+    boolean isOneTaskStored = false;
+    String sometaskpid = null;
+    org.apache.hadoop.mapreduce.TaskAttemptID sometaskId = null;
+    TTClient myCli = null;
     for(TaskInfo info : myTaskInfos) {
       if(!info.isSetupOrCleanup()) {
         String[] taskTrackers = info.getTaskTrackers();
@@ -162,6 +188,27 @@ public class TestCluster {
           Assert.assertNotNull(ttTaskInfo.getUser());
           Assert.assertTrue(ttTaskInfo.getTaskStatus().getProgress() >= 0.0);
           Assert.assertTrue(ttTaskInfo.getTaskStatus().getProgress() <= 1.0);
+          //Get the pid of the task attempt. The task need not have 
+          //reported the pid of the task by the time we are checking
+          //the pid. So perform null check.
+          String pid = ttTaskInfo.getPid();
+          int i = 1;
+          while(pid.isEmpty()) {
+            Thread.sleep(1000);
+            LOG.info("Waiting for task to report its pid back");
+            ttTaskInfo = ttCli.getProxy().getTask(taskId);
+            pid = ttTaskInfo.getPid();
+            if(i == 40) {
+              Assert.fail("The task pid not reported for 40 seconds.");
+            }
+            i++;
+          }
+          if(!isOneTaskStored) {
+            sometaskpid = pid;
+            sometaskId = ttTaskInfo.getTaskStatus().getTaskID();
+            myCli = ttCli;
+            isOneTaskStored = true;
+          }
           LOG.info("verified task progress to be between 0 and 1");
           State state = ttTaskInfo.getTaskStatus().getRunState();
           if (ttTaskInfo.getTaskStatus().getProgress() < 1.0 &&
@@ -176,5 +223,97 @@ public class TestCluster {
       }
     }
     rJob.killJob();
+    int i = 1;
+    while (!rJob.isComplete()) {
+      Thread.sleep(1000);
+      if (i == 40) {
+        Assert
+            .fail("The job not completed within 40 seconds after killing it.");
+      }
+      i++;
+    }
+    TTTaskInfo myTaskInfo = myCli.getProxy().getTask(sometaskId.getTaskID());
+    i = 0;
+    while (myTaskInfo != null && !myTaskInfo.getPid().isEmpty()) {
+      LOG.info("sleeping till task is retired from TT memory");
+      Thread.sleep(1000);
+      myTaskInfo = myCli.getProxy().getTask(sometaskId.getTaskID());
+      if (i == 40) {
+        Assert
+            .fail("Task not retired from TT memory within 40 seconds of job completeing");
+      }
+      i++;
+    }
+    Assert.assertFalse(myCli.getProxy().isProcessTreeAlive(sometaskpid));
   }
+  
+  @Test
+  public void testClusterRestart() throws Exception {
+    cluster.stop();
+    // Give the cluster time to stop the whole cluster.
+    AbstractDaemonClient cli = cluster.getJTClient();
+    int i = 1;
+    while (i < 40) {
+      try {
+        cli.ping();
+        Thread.sleep(1000);
+        i++;
+      } catch (Exception e) {
+        break;
+      }
+    }
+    if (i >= 40) {
+      Assert.fail("JT on " + cli.getHostName() + " Should have been down.");
+    }
+    i = 1;
+    for (AbstractDaemonClient tcli : cluster.getTTClients()) {
+      i = 1;
+      while (i < 40) {
+        try {
+          tcli.ping();
+          Thread.sleep(1000);
+          i++;
+        } catch (Exception e) {
+          break;
+        }
+      }
+      if (i >= 40) {
+        Assert.fail("TT on " + tcli.getHostName() + " Should have been down.");
+      }
+    }
+    cluster.start();
+    cli = cluster.getJTClient();
+    i = 1;
+    while (i < 40) {
+      try {
+        cli.ping();
+        break;
+      } catch (Exception e) {
+        i++;
+        Thread.sleep(1000);
+        LOG.info("Waiting for Jobtracker on host : "
+            + cli.getHostName() + " to come up.");
+      }
+    }
+    if (i >= 40) {
+      Assert.fail("JT on " + cli.getHostName() + " Should have been up.");
+    }
+    for (AbstractDaemonClient tcli : cluster.getTTClients()) {
+      i = 1;
+      while (i < 40) {
+        try {
+          tcli.ping();
+          break;
+        } catch (Exception e) {
+          i++;
+          Thread.sleep(1000);
+          LOG.info("Waiting for Tasktracker on host : "
+              + tcli.getHostName() + " to come up.");
+        }
+      }
+      if (i >= 40) {
+        Assert.fail("TT on " + tcli.getHostName() + " Should have been Up.");
+      }
+    }
+  } 
 }

+ 15 - 4
src/test/system/java/org/apache/hadoop/mapred/TestControlledJob.java

@@ -98,11 +98,22 @@ public class TestControlledJob {
     }
     
     jInfo = wovenClient.getJobInfo(id);
-    while(!jInfo.getStatus().isJobComplete()) {
-      Thread.sleep(1000);
-      jInfo = wovenClient.getJobInfo(id);
+    int i = 1;
+    if (jInfo != null) {
+      while (!jInfo.getStatus().isJobComplete()) {
+        Thread.sleep(1000);
+        jInfo = wovenClient.getJobInfo(id);
+        if (jInfo == null) {
+          break;
+        }
+        if(i > 40) {
+          Assert.fail("Controlled Job with ID : "
+              + jInfo.getID()
+              + " has not completed in 40 seconds after signalling.");
+        }
+        i++;
+      }
     }
-    
     LOG.info("Job sucessfully completed after signalling!!!!");
   }
 }

+ 342 - 0
src/test/system/java/org/apache/hadoop/mapred/TestDistributedCacheModifiedFile.java

@@ -0,0 +1,342 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import java.io.DataOutputStream;
+import java.net.URI;
+import java.util.Collection;
+import java.util.ArrayList;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.mapreduce.test.system.JTProtocol;
+import org.apache.hadoop.mapreduce.test.system.TTClient;
+import org.apache.hadoop.mapreduce.test.system.JobInfo;
+import org.apache.hadoop.mapreduce.test.system.TaskInfo;
+import org.apache.hadoop.mapreduce.test.system.MRCluster;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapred.UtilsForTests;
+
+import org.apache.hadoop.mapreduce.test.system.FinishTaskControlAction;
+import org.apache.hadoop.filecache.DistributedCache;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.examples.SleepJob;
+
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.AfterClass;
+import org.junit.Test;
+
+/**
+ * Verify the Distributed Cache functionality.
+ * This test scenario is for a distributed cache file behaviour
+ * when it is modified before and after being
+ * accessed by maximum two jobs. Once a job uses a distributed cache file
+ * that file is stored in the mapred.local.dir. If the next job
+ * uses the same file, but with differnt timestamp, then that 
+ * file is stored again. So, if two jobs choose
+ * the same tasktracker for their job execution
+ * then, the distributed cache file should be found twice.
+ *
+ * This testcase runs a job with a distributed cache file. All the
+ * tasks' corresponding tasktracker's handle is got and checked for
+ * the presence of distributed cache with proper permissions in the
+ * proper directory. Next when job 
+ * runs again and if any of its tasks hits the same tasktracker, which
+ * ran one of the task of the previous job, then that
+ * file should be uploaded again and task should not use the old file. 
+ * This is verified.
+*/
+
+public class TestDistributedCacheModifiedFile {
+
+  private static MRCluster cluster = null;
+  private static FileSystem dfs = null;
+  private static FileSystem ttFs = null;
+  private static JobClient client = null;
+  private static FsPermission permission = new FsPermission((short)00777);
+
+  private static String uriPath = "hdfs:///tmp/test.txt";
+  private static final Path URIPATH = new Path(uriPath);
+  private String distributedFileName = "test.txt";
+
+  static final Log LOG = LogFactory.
+                           getLog(TestDistributedCacheModifiedFile.class);
+
+  public TestDistributedCacheModifiedFile() throws Exception {
+  }
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    cluster = MRCluster.createCluster(new Configuration());
+    cluster.setUp();
+    client = cluster.getJTClient().getClient();
+    dfs = client.getFs();
+    //Deleting the file if it already exists
+    dfs.delete(URIPATH, true);
+
+    Collection<TTClient> tts = cluster.getTTClients();
+    //Stopping all TTs
+    for (TTClient tt : tts) {
+      tt.kill();
+    }
+    //Starting all TTs
+    for (TTClient tt : tts) {
+      tt.start();
+    }
+    //Waiting for 5 seconds to make sure tasktrackers are ready 
+    Thread.sleep(5000);
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    cluster.tearDown();
+    dfs.delete(URIPATH, true);
+    
+    Collection<TTClient> tts = cluster.getTTClients();
+    //Stopping all TTs
+    for (TTClient tt : tts) {
+      tt.kill();
+    }
+    //Starting all TTs
+    for (TTClient tt : tts) {
+      tt.start();
+    }
+  }
+
+  @Test
+  /**
+   * This tests Distributed Cache for modified file
+   * @param none
+   * @return void
+   */
+  public void testDistributedCache() throws Exception {
+    Configuration conf = new Configuration(cluster.getConf());
+    JTProtocol wovenClient = cluster.getJTClient().getProxy();
+
+    //This counter will check for count of a loop, 
+    //which might become infinite.
+    int count = 0;
+    //This boolean will decide whether to run job again
+    boolean continueLoop = true;
+    //counter for job Loop
+    int countLoop = 0;
+    //This counter increases with all the tasktrackers in which tasks ran
+    int taskTrackerCounter = 0;
+    //This will store all the tasktrackers in which tasks ran
+    ArrayList<String> taskTrackerCollection = new ArrayList<String>();
+    //This boolean tells if two tasks ran onteh same tasktracker or not
+    boolean taskTrackerFound = false;
+
+    do {
+      SleepJob job = new SleepJob();
+      job.setConf(conf);
+      conf = job.setupJobConf(5, 1, 1000, 1000, 100, 100);
+
+      //Before starting, Modify the file
+      String input = "This will be the content of\n" + "distributed cache\n";
+      //Creating the path with the file
+      DataOutputStream file =
+          UtilsForTests.createTmpFileDFS(dfs, URIPATH, permission, input);
+
+      DistributedCache.createSymlink(conf);
+      URI uri = URI.create(uriPath);
+      DistributedCache.addCacheFile(uri, conf);
+      JobConf jconf = new JobConf(conf);
+
+      //Controls the job till all verification is done 
+      FinishTaskControlAction.configureControlActionForJob(conf);
+
+      //Submitting the job
+      RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
+
+      //counter for job Loop
+      countLoop++;
+
+      TTClient tClient = null;
+      JobInfo jInfo = wovenClient.getJobInfo(rJob.getID());
+      LOG.info("jInfo is :" + jInfo);
+
+      //Assert if jobInfo is null
+      Assert.assertNotNull("jobInfo is null", jInfo);
+
+      //Wait for the job to start running.
+      count = 0;
+      while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
+        UtilsForTests.waitFor(10000);
+        count++;
+        jInfo = wovenClient.getJobInfo(rJob.getID());
+        //If the count goes beyond a point, then break; This is to avoid
+        //infinite loop under unforeseen circumstances. Testcase will anyway
+        //fail later.
+        if (count > 10) {
+          Assert.fail("job has not reached running state for more than" +
+            "100 seconds. Failing at this point");
+        }
+      }
+
+      LOG.info("job id is :" + rJob.getID().toString());
+
+      TaskInfo[] taskInfos = cluster.getJTClient().getProxy()
+             .getTaskInfo(rJob.getID());
+
+      boolean distCacheFileIsFound;
+
+      for (TaskInfo taskInfo : taskInfos) {
+        distCacheFileIsFound = false;
+        String[] taskTrackers = taskInfo.getTaskTrackers();
+        for (String taskTracker : taskTrackers) {
+          //Formatting tasktracker to get just its FQDN 
+          taskTracker = UtilsForTests.getFQDNofTT(taskTracker);
+          LOG.info("taskTracker is :" + taskTracker);
+
+          //The tasktrackerFound variable is initialized
+          taskTrackerFound = false;
+
+          //This will be entered from the second job onwards
+          if (countLoop > 1) {
+            if (taskTracker != null) {
+              continueLoop = taskTrackerCollection.contains(taskTracker);
+            }
+            if (continueLoop) {
+              taskTrackerFound = true;
+            }
+          }
+          //Collecting the tasktrackers
+          if (taskTracker != null)
+            taskTrackerCollection.add(taskTracker);
+
+          //we have loopped through two times to look for task
+          //getting submitted on same tasktrackers.The same tasktracker 
+          //for subsequent jobs was not hit maybe because of many number 
+          //of tasktrackers. So, testcase has to stop here.
+          if (countLoop > 1) {
+            continueLoop = false;
+          }
+
+          tClient = cluster.getTTClient(taskTracker);
+
+          //tClient maybe null because the task is already dead. Ex: setup
+          if (tClient == null) {
+            continue;
+          }
+
+          String[] localDirs = tClient.getMapredLocalDirs();
+          int distributedFileCount = 0;
+          //Go to every single path
+          for (String localDir : localDirs) {
+            //Public Distributed cache will always be stored under
+            //mapre.local.dir/tasktracker/archive
+            localDir = localDir + Path.SEPARATOR + 
+                   TaskTracker.getPublicDistributedCacheDir();
+            LOG.info("localDir is : " + localDir);
+
+            //Get file status of all the directories 
+            //and files under that path.
+            FileStatus[] fileStatuses = tClient.listStatus(localDir, 
+                true, true);
+            for (FileStatus  fileStatus : fileStatuses) {
+              Path path = fileStatus.getPath();
+              LOG.info("path is :" + path.toString());
+              //Checking if the received path ends with 
+              //the distributed filename
+              distCacheFileIsFound = (path.toString()).
+                  endsWith(distributedFileName);
+              //If file is found, check for its permission. 
+              //Since the file is found break out of loop
+              if (distCacheFileIsFound){
+                LOG.info("PATH found is :" + path.toString());
+                distributedFileCount++;
+                String filename = path.getName();
+                FsPermission fsPerm = fileStatus.getPermission();
+                Assert.assertTrue("File Permission is not 777",
+                    fsPerm.equals(new FsPermission("777")));
+              }
+            }
+          }
+
+          LOG.debug("The distributed FileCount is :" + distributedFileCount);
+          LOG.debug("The taskTrackerFound is :" + taskTrackerFound);
+
+          // If distributed cache is modified in dfs
+          // between two job runs, it can be present more than once
+          // in any of the task tracker, in which job ran.
+          if (distributedFileCount != 2 && taskTrackerFound) {
+            Assert.fail("The distributed cache file has to be two. " +
+            		"But found was " + distributedFileCount);
+          } else if (distributedFileCount > 1 && !taskTrackerFound) {
+            Assert.fail("The distributed cache file cannot more than one." +
+            		" But found was " + distributedFileCount);
+          } else if (distributedFileCount < 1)
+            Assert.fail("The distributed cache file is less than one. " +
+            		"But found was " + distributedFileCount);
+          if (!distCacheFileIsFound) {
+            Assert.assertEquals("The distributed cache file does not exist",
+                distCacheFileIsFound, false);
+          }
+        }
+      }
+      //Allow the job to continue through MR control job.
+      for (TaskInfo taskInfoRemaining : taskInfos) {
+        FinishTaskControlAction action = new FinishTaskControlAction(TaskID
+           .downgrade(taskInfoRemaining.getTaskID()));
+        Collection<TTClient> tts = cluster.getTTClients();
+        for (TTClient cli : tts) {
+          cli.getProxy().sendAction(action);
+        }
+      }
+
+      //Killing the job because all the verification needed
+      //for this testcase is completed.
+      rJob.killJob();
+
+      //Waiting for 3 seconds for cleanup to start
+      Thread.sleep(3000);
+
+      //Getting the last cleanup task's tasktracker also, as
+      //distributed cache gets uploaded even during cleanup.
+      TaskInfo[] myTaskInfos = wovenClient.getTaskInfo(rJob.getID());
+      if (myTaskInfos != null) {
+        for(TaskInfo info : myTaskInfos) {
+          if(info.isSetupOrCleanup()) {
+            String[] taskTrackers = info.getTaskTrackers();
+            for(String taskTracker : taskTrackers) {
+              //Formatting tasktracker to get just its FQDN
+              taskTracker = UtilsForTests.getFQDNofTT(taskTracker);
+              LOG.info("taskTracker is :" + taskTracker);
+              //Collecting the tasktrackers
+              if (taskTracker != null)
+                taskTrackerCollection.add(taskTracker);
+            }    
+          }
+        }
+      }
+
+      //Making sure that the job is complete.
+      while (jInfo != null && !jInfo.getStatus().isJobComplete()) {
+        Thread.sleep(10000);
+        jInfo = wovenClient.getJobInfo(rJob.getID());
+      }
+
+    } while (continueLoop);
+  }
+}

+ 281 - 0
src/test/system/java/org/apache/hadoop/mapred/TestDistributedCachePrivateFile.java

@@ -0,0 +1,281 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import java.io.DataOutputStream;
+import java.net.URI;
+import java.util.Collection;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.mapreduce.test.system.JTProtocol;
+import org.apache.hadoop.mapreduce.test.system.TTClient;
+import org.apache.hadoop.mapreduce.test.system.JobInfo;
+import org.apache.hadoop.mapreduce.test.system.TaskInfo;
+import org.apache.hadoop.mapreduce.test.system.MRCluster;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapred.UtilsForTests;
+
+import org.apache.hadoop.mapreduce.test.system.FinishTaskControlAction;
+import org.apache.hadoop.filecache.DistributedCache;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.examples.SleepJob;
+
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.AfterClass;
+import org.junit.Test;
+
+/**
+ * Verify the Distributed Cache functionality.
+ * This test scenario is for a distributed cache file behaviour
+ * when the file is private. Once a job uses a distributed 
+ * cache file with private permissions that file is stored in the
+ * mapred.local.dir, under the directory which has the same name 
+ * as job submitter's username. The directory has 700 permission 
+ * and the file under it, should have 777 permissions. 
+*/
+
+public class TestDistributedCachePrivateFile {
+
+  private static MRCluster cluster = null;
+  private static FileSystem dfs = null;
+  private static JobClient client = null;
+  private static FsPermission permission = new FsPermission((short)00770);
+
+  private static String uriPath = "hdfs:///tmp/test.txt";
+  private static final Path URIPATH = new Path(uriPath);
+  private String distributedFileName = "test.txt";
+
+  static final Log LOG = LogFactory.
+                           getLog(TestDistributedCachePrivateFile.class);
+
+  public TestDistributedCachePrivateFile() throws Exception {
+  }
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    cluster = MRCluster.createCluster(new Configuration());
+    cluster.setUp();
+    client = cluster.getJTClient().getClient();
+    dfs = client.getFs();
+    //Deleting the file if it already exists
+    dfs.delete(URIPATH, true);
+
+    Collection<TTClient> tts = cluster.getTTClients();
+    //Stopping all TTs
+    for (TTClient tt : tts) {
+      tt.kill();
+    }
+    //Starting all TTs
+    for (TTClient tt : tts) {
+      tt.start();
+    }
+
+    String input = "This will be the content of\n" + "distributed cache\n";
+    //Creating the path with the file
+    DataOutputStream file = 
+        UtilsForTests.createTmpFileDFS(dfs, URIPATH, permission, input);
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    cluster.tearDown();
+    dfs.delete(URIPATH, true);
+    
+    Collection<TTClient> tts = cluster.getTTClients();
+    //Stopping all TTs
+    for (TTClient tt : tts) {
+      tt.kill();
+    }
+    //Starting all TTs
+    for (TTClient tt : tts) {
+      tt.start();
+    }
+  }
+
+  @Test
+  /**
+   * This tests Distributed Cache for private file
+   * @param none
+   * @return void
+   */
+  public void testDistributedCache() throws Exception {
+    Configuration conf = new Configuration(cluster.getConf());
+    JTProtocol wovenClient = cluster.getJTClient().getProxy();
+
+    //This counter will check for count of a loop,
+    //which might become infinite.
+    int count = 0;
+
+    SleepJob job = new SleepJob();
+    job.setConf(conf);
+    conf = job.setupJobConf(5, 1, 1000, 1000, 100, 100);
+
+    DistributedCache.createSymlink(conf);
+    URI uri = URI.create(uriPath);
+    DistributedCache.addCacheFile(uri, conf);
+    JobConf jconf = new JobConf(conf);
+
+    //Controls the job till all verification is done 
+    FinishTaskControlAction.configureControlActionForJob(conf);
+
+    //Submitting the job
+    RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
+
+    JobStatus[] jobStatus = client.getAllJobs();
+    String userName = jobStatus[0].getUsername();
+
+    TTClient tClient = null;
+    JobInfo jInfo = wovenClient.getJobInfo(rJob.getID());
+    LOG.info("jInfo is :" + jInfo);
+
+    //Assert if jobInfo is null
+    Assert.assertNotNull("jobInfo is null", jInfo);
+
+    //Wait for the job to start running.
+    count = 0;
+    while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
+      UtilsForTests.waitFor(10000);
+      count++;
+      jInfo = wovenClient.getJobInfo(rJob.getID());
+      //If the count goes beyond a point, then Assert; This is to avoid
+      //infinite loop under unforeseen circumstances.
+      if (count > 10) {
+        Assert.fail("job has not reached running state for more than" +
+            "100 seconds. Failing at this point");
+      }
+    }
+
+    LOG.info("job id is :" + rJob.getID().toString());
+
+    TaskInfo[] taskInfos = cluster.getJTClient().getProxy()
+           .getTaskInfo(rJob.getID());
+
+    boolean distCacheFileIsFound;
+
+    for (TaskInfo taskInfo : taskInfos) {
+      distCacheFileIsFound = false;
+      String[] taskTrackers = taskInfo.getTaskTrackers();
+
+      for(String taskTracker : taskTrackers) {
+        //Getting the exact FQDN of the tasktracker from
+        //the tasktracker string.
+        taskTracker = UtilsForTests.getFQDNofTT(taskTracker);
+        tClient =  cluster.getTTClient(taskTracker);
+        String[] localDirs = tClient.getMapredLocalDirs();
+        int distributedFileCount = 0;
+        String localDirOnly = null;
+
+        boolean FileNotPresentForThisDirectoryPath = false;
+
+        //Go to every single path
+        for (String localDir : localDirs) {
+          FileNotPresentForThisDirectoryPath = false;
+          localDirOnly = localDir;
+
+          //Public Distributed cache will always be stored under
+          //mapred.local.dir/tasktracker/archive
+          localDirOnly = localDir + Path.SEPARATOR + TaskTracker.SUBDIR + 
+              Path.SEPARATOR +  userName;
+
+          //Private Distributed cache will always be stored under
+          //mapre.local.dir/taskTracker/<username>/distcache
+          //Checking for username directory to check if it has the
+          //proper permissions
+          localDir = localDir + Path.SEPARATOR +
+                  TaskTracker.getPrivateDistributedCacheDir(userName);
+
+          FileStatus fileStatusMapredLocalDirUserName = null;
+
+          try {
+            fileStatusMapredLocalDirUserName = tClient.
+                            getFileStatus(localDirOnly, true);
+          } catch (Exception e) {
+            LOG.info("LocalDirOnly :" + localDirOnly + " not found");
+            FileNotPresentForThisDirectoryPath = true;
+          }
+
+          //File will only be stored under one of the mapred.lcoal.dir
+          //If other paths were hit, just continue  
+          if (FileNotPresentForThisDirectoryPath)
+            continue;
+
+          Path pathMapredLocalDirUserName = 
+              fileStatusMapredLocalDirUserName.getPath();
+          FsPermission fsPermMapredLocalDirUserName =
+              fileStatusMapredLocalDirUserName.getPermission();
+          Assert.assertTrue("Directory Permission is not 700",
+            fsPermMapredLocalDirUserName.equals(new FsPermission("700")));
+
+          //Get file status of all the directories 
+          //and files under that path.
+          FileStatus[] fileStatuses = tClient.listStatus(localDir, 
+              true, true);
+          for (FileStatus  fileStatus : fileStatuses) {
+            Path path = fileStatus.getPath();
+            LOG.info("path is :" + path.toString());
+            //Checking if the received path ends with 
+            //the distributed filename
+            distCacheFileIsFound = (path.toString()).
+                endsWith(distributedFileName);
+            //If file is found, check for its permission. 
+            //Since the file is found break out of loop
+            if (distCacheFileIsFound){
+              LOG.info("PATH found is :" + path.toString());
+              distributedFileCount++;
+              String filename = path.getName();
+              FsPermission fsPerm = fileStatus.getPermission();
+              Assert.assertTrue("File Permission is not 777",
+                fsPerm.equals(new FsPermission("777")));
+            }
+          }
+        }
+
+        LOG.info("Distributed File count is :" + distributedFileCount);
+
+        if (distributedFileCount > 1) {
+          Assert.fail("The distributed cache file is more than one");
+        } else if (distributedFileCount < 1)
+          Assert.fail("The distributed cache file is less than one");
+        if (!distCacheFileIsFound) {
+          Assert.assertEquals("The distributed cache file does not exist", 
+              distCacheFileIsFound, false);
+        }
+      }
+
+      //Allow the job to continue through MR control job.
+      for (TaskInfo taskInfoRemaining : taskInfos) {
+        FinishTaskControlAction action = new FinishTaskControlAction(TaskID
+           .downgrade(taskInfoRemaining.getTaskID()));
+        Collection<TTClient> tts = cluster.getTTClients();
+        for (TTClient cli : tts) {
+          cli.getProxy().sendAction(action);
+        }
+      }
+
+      //Killing the job because all the verification needed
+      //for this testcase is completed.
+      rJob.killJob();
+    }
+  }
+}

+ 300 - 0
src/test/system/java/org/apache/hadoop/mapred/TestDistributedCacheUnModifiedFile.java

@@ -0,0 +1,300 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import java.io.DataOutputStream;
+import java.net.URI;
+import java.util.Collection;
+import java.util.ArrayList;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.Log;
+import org.apache.hadoop.mapreduce.test.system.JTProtocol;
+import org.apache.hadoop.mapreduce.test.system.TTClient;
+import org.apache.hadoop.mapreduce.test.system.JobInfo;
+import org.apache.hadoop.mapreduce.test.system.TaskInfo;
+import org.apache.hadoop.mapreduce.test.system.MRCluster;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapred.UtilsForTests;
+
+import org.apache.hadoop.mapreduce.test.system.FinishTaskControlAction;
+import org.apache.hadoop.filecache.DistributedCache;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.examples.SleepJob;
+
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.AfterClass;
+import org.junit.Test;
+
+/**
+ * Verify the Distributed Cache functionality.
+ * This test scenario is for a distributed cache file behaviour
+ * when it is not modified before and after being
+ * accessed by maximum two jobs. Once a job uses a distributed cache file
+ * that file is stored in the mapred.local.dir. If the next job
+ * uses the same file, then that is not stored again.
+ * So, if two jobs choose the same tasktracker for their job execution
+ * then, the distributed cache file should not be found twice.
+ *
+ * This testcase runs a job with a distributed cache file. All the
+ * tasks' corresponding tasktracker's handle is got and checked for
+ * the presence of distributed cache with proper permissions in the
+ * proper directory. Next when job 
+ * runs again and if any of its tasks hits the same tasktracker, which
+ * ran one of the task of the previous job, then that
+ * file should not be uploaded again and task use the old file. 
+ * This is verified.
+*/
+
+public class TestDistributedCacheUnModifiedFile {
+
+  private static MRCluster cluster = null;
+  private static FileSystem dfs = null;
+  private static FileSystem ttFs = null;
+  private static JobClient client = null;
+  private static FsPermission permission = new FsPermission((short)00777);
+
+  private static String uriPath = "hdfs:///tmp/test.txt";
+  private static final Path URIPATH = new Path(uriPath);
+  private String distributedFileName = "test.txt";
+
+  static final Log LOG = LogFactory.
+                           getLog(TestDistributedCacheUnModifiedFile.class);
+
+  public TestDistributedCacheUnModifiedFile() throws Exception {
+  }
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    cluster = MRCluster.createCluster(new Configuration());
+    cluster.setUp();
+    client = cluster.getJTClient().getClient();
+    dfs = client.getFs();
+    //Deleting the file if it already exists
+    dfs.delete(URIPATH, true);
+
+    Collection<TTClient> tts = cluster.getTTClients();
+    //Stopping all TTs
+    for (TTClient tt : tts) {
+      tt.kill();
+    }
+    //Starting all TTs
+    for (TTClient tt : tts) {
+      tt.start();
+    }
+   
+    //Waiting for 5 seconds to make sure tasktrackers are ready
+    Thread.sleep(5000);
+
+    String input = "This will be the content of\n" + "distributed cache\n";
+    //Creating the path with the file
+    DataOutputStream file = 
+        UtilsForTests.createTmpFileDFS(dfs,URIPATH,permission,input);
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    cluster.tearDown();
+    dfs.delete(URIPATH, true);
+    
+    Collection<TTClient> tts = cluster.getTTClients();
+    //Stopping all TTs
+    for (TTClient tt : tts) {
+      tt.kill();
+    }
+    //Starting all TTs
+    for (TTClient tt : tts) {
+      tt.start();
+    }
+  }
+
+  @Test
+  /**
+   * This tests Distributed Cache for unmodified file
+   * @param none
+   * @return void
+   */
+  public void testDistributedCache() throws Exception {
+    Configuration conf = new Configuration(cluster.getConf());
+    JTProtocol wovenClient = cluster.getJTClient().getProxy();
+
+    //This counter will check for count of a loop, 
+    //which might become infinite.
+    int count = 0;
+    //This boolean will decide whether to run job again
+    boolean continueLoop = true;
+    //counter for job Loop
+    int countLoop = 0;
+    //This counter incerases with all the tasktrackers in which tasks ran
+    int taskTrackerCounter = 0;
+    //This will store all the tasktrackers in which tasks ran
+    ArrayList<String> taskTrackerCollection = new ArrayList<String>();
+
+    do {
+      SleepJob job = new SleepJob();
+      job.setConf(conf);
+      conf = job.setupJobConf(5, 1, 1000, 1000, 100, 100);
+
+      DistributedCache.createSymlink(conf);
+      URI uri = URI.create(uriPath);
+      DistributedCache.addCacheFile(uri, conf);
+      JobConf jconf = new JobConf(conf);
+ 
+      //Controls the job till all verification is done 
+      FinishTaskControlAction.configureControlActionForJob(conf);
+
+      //Submitting the job
+      RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
+
+      //counter for job Loop
+      countLoop++;
+
+      TTClient tClient = null;
+      JobInfo jInfo = wovenClient.getJobInfo(rJob.getID());
+      LOG.info("jInfo is :" + jInfo);
+
+      //Assert if jobInfo is null
+      Assert.assertNotNull("jobInfo is null", jInfo);
+
+      //Wait for the job to start running.
+      count = 0;
+      while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
+        UtilsForTests.waitFor(10000);
+        count++;
+        jInfo = wovenClient.getJobInfo(rJob.getID());
+        //If the count goes beyond a point, then break; This is to avoid
+        //infinite loop under unforeseen circumstances. Testcase will anyway
+        //fail later.
+        if (count > 10) {
+          Assert.fail("job has not reached running state for more than" + 
+            "100 seconds. Failing at this point");
+        }
+      }
+
+      LOG.info("job id is :" + rJob.getID().toString());
+
+      TaskInfo[] taskInfos = cluster.getJTClient().getProxy()
+             .getTaskInfo(rJob.getID());
+
+      boolean distCacheFileIsFound;
+       
+      for (TaskInfo taskInfo : taskInfos) {
+        distCacheFileIsFound = false;
+        String[] taskTrackers = taskInfo.getTaskTrackers();
+        for (String taskTracker : taskTrackers) {
+          //Formatting tasktracker to get just its FQDN 
+          taskTracker = UtilsForTests.getFQDNofTT(taskTracker);
+          LOG.info("taskTracker is :" + taskTracker);
+
+          //This will be entered from the second job onwards
+          if (countLoop > 1) {
+            if (taskTracker != null) {
+              continueLoop = taskTrackerCollection.contains(taskTracker);
+            }
+            if (!continueLoop) {
+              break;
+            }
+          }
+
+          //Collecting the tasktrackers
+          if (taskTracker != null)  
+            taskTrackerCollection.add(taskTracker);
+
+          //we have loopped through enough number of times to look for task
+          // getting submitted on same tasktrackers.The same tasktracker 
+          //for subsequent jobs was not hit maybe because of  many number 
+          //of tasktrackers. So, testcase has to stop here.
+          if (countLoop > 2) {
+            continueLoop = false;
+          }
+
+          tClient = cluster.getTTClient(taskTracker);
+
+          //tClient maybe null because the task is already dead. Ex: setup
+          if (tClient == null) {
+            continue;
+          }
+
+          String[] localDirs = tClient.getMapredLocalDirs();
+          int distributedFileCount = 0;
+          //Go to every single path
+          for (String localDir : localDirs) {
+            //Public Distributed cache will always be stored under
+            //mapre.local.dir/tasktracker/archive
+            localDir = localDir + Path.SEPARATOR + 
+                   TaskTracker.getPublicDistributedCacheDir();
+            LOG.info("localDir is : " + localDir);
+
+            //Get file status of all the directories 
+            //and files under that path.
+            FileStatus[] fileStatuses = tClient.listStatus(localDir, 
+                true, true);
+            for (FileStatus  fileStatus : fileStatuses) {
+              Path path = fileStatus.getPath();
+              LOG.info("path is :" + path.toString());
+              //Checking if the received path ends with 
+              //the distributed filename
+              distCacheFileIsFound = (path.toString()).
+                  endsWith(distributedFileName);
+              //If file is found, check for its permission. 
+              //Since the file is found break out of loop
+              if (distCacheFileIsFound){
+                LOG.info("PATH found is :" + path.toString());
+                distributedFileCount++;
+                String filename = path.getName();
+                FsPermission fsPerm = fileStatus.getPermission();
+                Assert.assertTrue("File Permission is not 777", 
+                    fsPerm.equals(new FsPermission("777")));
+              }
+            }
+          }
+
+          // Since distributed cache is unmodified in dfs
+          // between two job runs, it should not be present more than once
+          // in any of the task tracker, in which job ran. 
+          if (distributedFileCount > 1) {
+            Assert.fail("The distributed cache file is more than one");
+          } else if (distributedFileCount < 1)
+            Assert.fail("The distributed cache file is less than one");
+          if (!distCacheFileIsFound) {
+            Assert.assertEquals("The distributed cache file does not exist",
+                distCacheFileIsFound, false);
+          }
+        }
+      }
+      //Allow the job to continue through MR control job.
+      for (TaskInfo taskInfoRemaining : taskInfos) {
+        FinishTaskControlAction action = new FinishTaskControlAction(TaskID
+           .downgrade(taskInfoRemaining.getTaskID()));
+        Collection<TTClient> tts = cluster.getTTClients();
+        for (TTClient cli : tts) {
+          cli.getProxy().sendAction(action);
+        }
+      }
+
+      //Killing the job because all the verification needed
+      //for this testcase is completed.
+      rJob.killJob();
+    } while (continueLoop);
+  }
+}

+ 216 - 0
src/test/system/java/org/apache/hadoop/mapred/TestFileOwner.java

@@ -0,0 +1,216 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.examples.SleepJob;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.test.system.FinishTaskControlAction;
+import org.apache.hadoop.mapreduce.test.system.JTProtocol;
+import org.apache.hadoop.mapreduce.test.system.JobInfo;
+import org.apache.hadoop.mapreduce.test.system.MRCluster;
+import org.apache.hadoop.mapreduce.test.system.TTClient;
+import org.apache.hadoop.mapreduce.test.system.TTInfo;
+import org.apache.hadoop.mapreduce.test.system.TTTaskInfo;
+import org.apache.hadoop.mapreduce.test.system.TaskInfo;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestFileOwner {
+  public static MRCluster cluster;
+
+  private StringBuffer jobIdDir = new StringBuffer();
+  private JTProtocol wovenClient = null;
+  private static final Log LOG = LogFactory.getLog(TestFileOwner.class);
+  private String taskController = null;
+  private final FsPermission PERM_777 = new FsPermission("777");
+  private final FsPermission PERM_755 = new FsPermission("755");
+  private final FsPermission PERM_644 = new FsPermission("644");
+
+  @BeforeClass
+  public static void setUp() throws java.lang.Exception {
+    cluster = MRCluster.createCluster(new Configuration());
+    cluster.setUp();
+  }
+
+  /*
+   * The test is used to check the file permission of local files
+   * in mapred.local.dir. The job control is used which will make the
+   * tasks wait for completion until it is signaled
+   *
+   * @throws Exception in case of test errors
+   */
+  @Test
+  public void testFilePermission() throws Exception {
+    wovenClient = cluster.getJTClient().getProxy();
+    Configuration conf = new Configuration(cluster.getConf());
+    FinishTaskControlAction.configureControlActionForJob(conf);
+    SleepJob job = new SleepJob();
+    job.setConf(conf);
+    conf = job.setupJobConf(1, 0, 100, 100, 100, 100);
+    JobConf jconf = new JobConf(conf);
+    RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
+    taskController = conf.get("mapred.task.tracker.task-controller");
+    // get the job info so we can get the env variables from the daemon.
+    // Now wait for the task to be in the running state, only then the
+    // directories will be created
+    JobInfo info = wovenClient.getJobInfo(rJob.getID());
+    Assert.assertNotNull("JobInfo is null",info);
+    JobID id = rJob.getID();
+    while (info.runningMaps() != 1) {
+      Thread.sleep(1000);
+      info = wovenClient.getJobInfo(id);
+    }
+    TaskInfo[] myTaskInfos = wovenClient.getTaskInfo(id);
+    for (TaskInfo tInfo : myTaskInfos) {
+      if (!tInfo.isSetupOrCleanup()) {
+        String[] taskTrackers = tInfo.getTaskTrackers();
+        for (String taskTracker : taskTrackers) {
+          TTInfo ttInfo = wovenClient.getTTInfo(taskTracker);
+          TTClient ttCli = cluster.getTTClient(ttInfo.getStatus().getHost());
+          Assert.assertNotNull("TTClient instance is null",ttCli);
+          TTTaskInfo ttTaskInfo = ttCli.getProxy().getTask(tInfo.getTaskID());
+          Assert.assertNotNull("TTTaskInfo is null",ttTaskInfo);
+          while (ttTaskInfo.getTaskStatus().getRunState() !=
+                 TaskStatus.State.RUNNING) {
+            Thread.sleep(100);
+            ttTaskInfo = ttCli.getProxy().getTask(tInfo.getTaskID());
+          }
+          testPermissionWithTaskController(ttCli, conf, info);
+          FinishTaskControlAction action = new FinishTaskControlAction(TaskID
+              .downgrade(tInfo.getTaskID()));
+          for (TTClient cli : cluster.getTTClients()) {
+            cli.getProxy().sendAction(action);
+          }
+        }
+      }
+    }
+    JobInfo jInfo = wovenClient.getJobInfo(id);
+    jInfo = cluster.getJTClient().getProxy().getJobInfo(id);
+    while (!jInfo.getStatus().isJobComplete()) {
+      Thread.sleep(100);
+      jInfo = cluster.getJTClient().getProxy().getJobInfo(id);
+    }
+  }
+
+  private void testPermissionWithTaskController(TTClient tClient,
+      Configuration conf,
+      JobInfo info) {
+    Assert.assertNotNull("TTclient is null",tClient);
+    FsPermission fsPerm = null;
+    String[] pathInfo = conf.getStrings("mapred.local.dir");
+    for (int i = 0; i < pathInfo.length; i++) {
+      // First verify the jobid directory exists
+      jobIdDir = new StringBuffer();
+      String userName = null;
+      try {
+        JobStatus[] jobStatus = cluster.getJTClient().getClient().getAllJobs();
+        userName = jobStatus[0].getUsername();
+      } catch(Exception ex) {
+        LOG.error("Failed to get user name");
+        boolean status = false;
+        Assert.assertTrue("Failed to get the userName", status);
+      }
+      jobIdDir.append(pathInfo[i]).append(Path.SEPARATOR);
+      jobIdDir.append(TaskTracker.getLocalJobDir(userName,
+                                   info.getID().toString()));
+      FileStatus[] fs = null;
+      try {
+        fs = tClient.listStatus(jobIdDir.toString(), true);
+      } catch (Exception ex) {
+        LOG.error("Failed to get the jobIdDir files " + ex);
+      }
+      Assert.assertEquals("Filestatus length is zero",fs.length != 0, true);
+      for (FileStatus file : fs) {
+        try {
+          String filename = file.getPath().getName();
+          if (filename.equals(TaskTracker.JOBFILE)) {
+            if (taskController == DefaultTaskController.class.getName()) {
+              fsPerm = file.getPermission();
+              Assert.assertTrue("FilePermission failed for "+filename,
+                  fsPerm.equals(PERM_777));
+            }
+          }
+          if (filename.startsWith("attempt")) {
+            StringBuffer attemptDir = new StringBuffer(jobIdDir);
+            attemptDir.append(Path.SEPARATOR).append(filename);
+            if (tClient.getFileStatus(attemptDir.toString(), true) != null) {
+              FileStatus[] attemptFs = tClient.listStatus(
+                  attemptDir.toString(), true, true);
+              for (FileStatus attemptfz : attemptFs) {
+                Assert.assertNotNull("FileStatus is null",attemptfz);
+                fsPerm = attemptfz.getPermission();
+                Assert.assertNotNull("FsPermission is null",fsPerm);
+                if (taskController == DefaultTaskController.class.getName()) {
+                  if (!attemptfz.isDir()) {
+                    Assert.assertTrue("FilePermission failed for "+filename,
+                        fsPerm.equals(PERM_777));
+                  } else {
+                    Assert.assertTrue("FilePermission failed for "+filename,
+                        fsPerm.equals(PERM_755));
+                  }
+                }
+              }
+            }
+          }
+          if (filename.equals(TaskTracker.TASKJARDIR)) {
+            StringBuffer jarsDir = new StringBuffer(jobIdDir);
+            jarsDir.append(Path.SEPARATOR).append(filename);
+            FileStatus[] jarsFs = tClient.listStatus(jarsDir.toString(), true,
+                true);
+            for (FileStatus jarsfz : jarsFs) {
+              Assert.assertNotNull("FileStatus is null",jarsfz);
+              fsPerm = jarsfz.getPermission();
+              Assert.assertNotNull("File permission is null",fsPerm);
+              if (taskController == DefaultTaskController.class.getName()) {
+                if (!jarsfz.isDir()) {
+                  if (jarsfz.getPath().getName().equals("job.jar")) {
+                    Assert.assertTrue("FilePermission failed for "+filename,
+                        fsPerm.equals(PERM_777));
+                  } else {
+                    Assert.assertTrue("FilePermission failed for "+filename,
+                        fsPerm.equals(PERM_644));
+                  }
+                } else {
+                  Assert.assertTrue("FilePermission failed for "+filename,
+                      fsPerm.equals(PERM_755));
+                }
+              }
+            }
+          }
+        } catch (Exception ex) {
+          LOG.error("The exception occurred while searching for nonexsistent"
+              + "file, ignoring and continuing. " + ex);
+        }
+      }// for loop ends
+    }// for loop ends
+  }
+
+  @AfterClass
+  public static void tearDown() throws java.lang.Exception {
+    cluster.tearDown();
+  }
+}

+ 185 - 0
src/test/system/java/org/apache/hadoop/mapred/TestJobKill.java

@@ -0,0 +1,185 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Job;
+import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.Reducer;
+import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.mapreduce.test.system.JTProtocol;
+import org.apache.hadoop.mapreduce.test.system.JobInfo;
+import org.apache.hadoop.mapreduce.test.system.MRCluster;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import testjar.JobKillCommitter;
+
+public class TestJobKill {
+  private static final Log LOG = LogFactory.getLog(TestJobKill.class);
+  private JTProtocol wovenClient = null;
+  private static Path outDir = new Path("output");
+  private static Path inDir = new Path("input");
+  private static FileSystem fs = null;
+  private static MRCluster cluster;
+
+  @BeforeClass
+  public static void setUp() throws Exception {
+    cluster = MRCluster.createCluster(new Configuration());
+    cluster.setUp();
+    fs = inDir.getFileSystem(cluster.getJTClient().getConf());
+    if(!fs.exists(inDir)){
+      fs.create(inDir);
+    }
+    if (fs.exists(outDir)) {
+      fs.delete(outDir,true);
+    }
+  }
+
+  @AfterClass
+  public static void tearDown() throws Exception {
+    if(fs.exists(inDir)) {
+      fs.delete(inDir,true);
+    }    
+    if (fs.exists(outDir)) {
+      fs.delete(outDir,true);
+    }
+    cluster.tearDown();
+  }
+
+  /*
+   * The test case intention is to test the job failure due to system
+   * exceptions, so the exceptions are thrown intentionally and the job is
+   * verified for failure. At the end of the test, the verification is made
+   * that the success file is not present in the hdfs location. This is because
+   * the success file only should exist if the actual job had succeeded. 
+   * 
+   * @throws Exception in a case of test errors
+   */
+  @Test
+  public void testSystemJobKill() throws Exception {
+    wovenClient = cluster.getJTClient().getProxy();
+    Configuration conf = new Configuration(cluster.getConf());
+    conf.set("mapred.map.max.attempts", "1");
+    conf.set("mapred.reduce.max.attempts", "1");
+    // fail the mapper job
+    failJob(conf, JobKillCommitter.CommitterWithNoError.class, "JobMapperFail",
+        JobKillCommitter.MapperFail.class, JobKillCommitter.ReducerPass.class,
+        false);
+    // fail the reducer job
+    failJob(conf, JobKillCommitter.CommitterWithNoError.class,
+        "JobReducerFail", JobKillCommitter.MapperPass.class,
+        JobKillCommitter.ReducerFail.class,false);
+    // fail the set up job
+    failJob(conf, JobKillCommitter.CommitterWithFailSetup.class,
+        "JobSetupFail", JobKillCommitter.MapperPass.class,
+        JobKillCommitter.ReducerPass.class,false);
+    // fail the clean up job
+    failJob(conf, JobKillCommitter.CommitterWithFailCleanup.class,
+        "JobCleanupFail", JobKillCommitter.MapperPass.class,
+        JobKillCommitter.ReducerPass.class,false);
+  }
+
+  private void failJob(Configuration conf,
+      Class<? extends OutputCommitter> theClass, String confName,
+      Class<? extends Mapper> mapClass, Class<? extends Reducer> redClass,
+      boolean isUserKill)
+      throws Exception {
+    Job job = new Job(conf, confName);
+    job.setJarByClass(JobKillCommitter.class);
+    job.setMapperClass(mapClass);
+    job.setCombinerClass(redClass);
+    job.setMapOutputKeyClass(Text.class);
+    job.setMapOutputValueClass(Text.class);
+    job.setReducerClass(redClass);
+    job.setNumReduceTasks(1);
+    FileInputFormat.addInputPath(job, inDir);
+    FileOutputFormat.setOutputPath(job, outDir);
+    JobConf jconf = new JobConf(job.getConfiguration(), JobKillCommitter.class);
+    jconf.setOutputCommitter(theClass);
+    if(!isUserKill)
+    {  
+      RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
+      JobID id = rJob.getID();
+      JobInfo jInfo = wovenClient.getJobInfo(id);
+      Assert.assertTrue("Job is not in PREP state",
+          jInfo.getStatus().getRunState() == JobStatus.PREP);
+    }
+    else
+    {
+      //user kill job
+      RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
+      JobInfo info = wovenClient.getJobInfo(rJob.getID());
+      Assert.assertNotNull("Job Info is null",info);
+      JobID id = rJob.getID();
+      while (info.runningMaps() != 1) {
+        Thread.sleep(1000);
+        info = wovenClient.getJobInfo(id);
+      }
+      rJob.killJob();
+    }
+    checkCleanup(jconf);
+    deleteOutputDir();
+  }
+  
+  /**
+   * This test is used to kill the job by explicity calling the kill api
+   * and making sure the clean up happens
+   * @throws Exception
+   */
+  @Test
+  public void testUserJobKill() throws Exception{
+    wovenClient = cluster.getJTClient().getProxy();
+    Configuration conf = new Configuration(cluster.getConf());
+    conf.set("mapred.map.max.attempts", "1");
+    conf.set("mapred.reduce.max.attempts", "1");
+    // fail the mapper job
+    failJob(conf, JobKillCommitter.CommitterWithNoError.class, "JobUserKill",
+        JobKillCommitter.MapperPassSleep.class, 
+        JobKillCommitter.ReducerPass.class,true);    
+  }
+
+  private void checkCleanup(JobConf conf) throws Exception {
+    if (outDir != null) {
+      if (fs.exists(outDir)) {
+        Path filePath = new Path(outDir,
+            FileOutputCommitter.SUCCEEDED_FILE_NAME);
+        // check to make sure the success file is not there since the job
+        // failed.
+        Assert.assertTrue("The success file is present when the job failed",
+            !fs.exists(filePath));
+      }
+    }
+  }
+
+  private void deleteOutputDir() throws Exception {
+    if (fs != null) {
+      fs.delete(outDir, true);
+    }
+  }
+}

+ 145 - 0
src/test/system/java/org/apache/hadoop/mapred/TestPushConfig.java

@@ -0,0 +1,145 @@
+package org.apache.hadoop.mapred;
+import java.io.File;
+import java.io.FileOutputStream;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.test.system.MRCluster;
+import org.apache.hadoop.test.system.AbstractDaemonClient;
+import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestPushConfig {
+  private static MRCluster cluster;
+  private String localConfDir = "localconf";
+  private static final Log LOG = LogFactory.getLog(
+      TestPushConfig.class.getName());
+  
+  @BeforeClass
+  public static void before() throws Exception {
+    String [] expExcludeList = new String[2];
+    expExcludeList[0] = "java.net.ConnectException";
+    expExcludeList[1] = "java.io.IOException";
+    
+    cluster = MRCluster.createCluster(new Configuration());
+    cluster.setExcludeExpList(expExcludeList);
+    cluster.setUp();
+  }
+
+  @AfterClass
+  public static void after() throws Exception {
+    cluster.tearDown();
+  }
+  
+  /**
+   * This test about testing the pushConfig feature. The pushConfig functionality
+   * available as part of the cluster process manager. The functionality takes
+   * in local input directory and pushes all the files from the local to the 
+   * remote conf directory. This functionality is required is change the config
+   * on the fly and restart the cluster which will be used by other test cases
+   * @throws Exception 
+   */
+  @Test
+  public void testPushConfig() throws Exception {
+    final String DUMMY_CONFIG_STRING = "mapred.newdummy.conf";
+    final String DUMMY_CONFIG_STRING_VALUE = "HerriotTestRules";
+    Configuration origconf = new Configuration(cluster.getConf());
+    origconf.set(DUMMY_CONFIG_STRING, DUMMY_CONFIG_STRING_VALUE);
+    String localDir = HadoopDaemonRemoteCluster.getDeployedHadoopConfDir() + 
+        File.separator + localConfDir;
+    File lFile = new File(localDir);
+    if(!lFile.exists()){
+      lFile.mkdir();
+    }
+    String mapredConf = localDir + File.separator + "mapred-site.xml";
+    File file = new File(mapredConf);
+    origconf.writeXml(new FileOutputStream(file));    
+    Configuration daemonConf =  cluster.getJTClient().getProxy().getDaemonConf();
+    Assert.assertTrue("Dummy varialble is expected to be null before restart.",
+        daemonConf.get(DUMMY_CONFIG_STRING) == null);
+    String newDir = cluster.getClusterManager().pushConfig(localDir);
+    cluster.stop();
+    AbstractDaemonClient cli = cluster.getJTClient();
+    waitForClusterStop(cli);
+    // make sure the cluster has actually stopped
+    cluster.getClusterManager().start(newDir);
+    cli = cluster.getJTClient();
+    waitForClusterStart(cli);
+    // make sure the cluster has actually started
+    Configuration newconf = cluster.getJTClient().getProxy().getDaemonConf();
+    Assert.assertTrue("Extra varialble is expected to be set",
+        newconf.get(DUMMY_CONFIG_STRING).equals(DUMMY_CONFIG_STRING_VALUE));
+    cluster.getClusterManager().stop(newDir);
+    cli = cluster.getJTClient();
+    // make sure the cluster has actually stopped
+    waitForClusterStop(cli);
+    // start the daemons with original conf dir
+    cluster.getClusterManager().start();
+    cli = cluster.getJTClient();    
+    waitForClusterStart(cli);  
+    daemonConf =  cluster.getJTClient().getProxy().getDaemonConf();
+    Assert.assertTrue("Dummy variable is expected to be null after restart.",
+        daemonConf.get(DUMMY_CONFIG_STRING) == null);
+    lFile.delete();
+  }
+  
+  private void waitForClusterStop(AbstractDaemonClient cli) throws Exception {
+    int i=1;
+    while (i < 40) {
+      try {
+        cli.ping();
+        Thread.sleep(1000);
+        i++;
+      } catch (Exception e) {
+        break;
+      }
+    }
+    for (AbstractDaemonClient tcli : cluster.getTTClients()) {
+      i = 1;
+      while (i < 40) {
+        try {
+          tcli.ping();
+          Thread.sleep(1000);
+          i++;
+        } catch (Exception e) {
+          break;
+        }
+      }
+      if (i >= 40) {
+        Assert.fail("TT on " + tcli.getHostName() + " Should have been down.");
+      }
+    }
+  }
+  
+  private void waitForClusterStart(AbstractDaemonClient cli) throws Exception {
+    int i=1;
+    while (i < 40) {
+      try {
+        cli.ping();
+        break;
+      } catch (Exception e) {
+        i++;
+        Thread.sleep(1000);
+        LOG.info("Waiting for Jobtracker on host : "
+            + cli.getHostName() + " to come up.");
+      }
+    }
+    for (AbstractDaemonClient tcli : cluster.getTTClients()) {
+      i = 1;
+      while (i < 40) {
+        try {
+          tcli.ping();
+          break;
+        } catch (Exception e) {
+          i++;
+          Thread.sleep(1000);
+          LOG.info("Waiting for Tasktracker on host : "
+              + tcli.getHostName() + " to come up.");
+        }
+      }
+    }
+  }
+}

+ 625 - 0
src/test/system/java/org/apache/hadoop/mapred/TestTaskKilling.java

@@ -0,0 +1,625 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.AfterClass;
+import org.junit.Test;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.test.system.MRCluster;
+import org.apache.hadoop.mapreduce.test.system.JTProtocol;
+import org.apache.hadoop.mapreduce.test.system.JobInfo;
+import org.apache.hadoop.mapreduce.test.system.TaskInfo;
+import org.apache.hadoop.mapreduce.test.system.TTClient;
+import org.apache.hadoop.mapreduce.test.system.FinishTaskControlAction;
+import org.apache.hadoop.mapred.JobClient.NetworkedJob;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.IntWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.examples.SleepJob;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+
+/**
+ * A System test for verifying the status after killing the 
+ * tasks at different conditions.
+ */
+public class TestTaskKilling {
+  private static final Log LOG = LogFactory.getLog(TestTaskKilling.class);
+  private static MRCluster cluster;
+  private static JobClient jobClient = null;
+  private static JTProtocol remoteJTClient = null;
+
+  public TestTaskKilling() {
+  }
+
+  @BeforeClass
+  public static void before() throws Exception {
+    Configuration conf = new Configuration();
+    cluster = MRCluster.createCluster(conf);
+    cluster.setUp();
+    jobClient = cluster.getJTClient().getClient();
+    remoteJTClient = cluster.getJTClient().getProxy();
+  }
+
+  @AfterClass
+  public static void after() throws Exception {
+    cluster.tearDown();
+  }
+
+  /**
+   * Verifying the running job status whether it succeeds or not
+   * after failing some of its tasks.
+   */
+  @Test
+  public void testFailedTaskJobStatus() throws IOException, 
+          InterruptedException {
+    Configuration conf = new Configuration(cluster.getConf());
+    TaskInfo taskInfo = null;
+    SleepJob job = new SleepJob();
+    job.setConf(conf);
+    conf = job.setupJobConf(3, 1, 4000, 4000, 100, 100);
+    JobConf jobConf = new JobConf(conf);
+    jobConf.setMaxMapAttempts(20);
+    jobConf.setMaxReduceAttempts(20);
+    RunningJob runJob = jobClient.submitJob(jobConf);
+    JobID id = runJob.getID();
+    JobInfo jInfo = remoteJTClient.getJobInfo(id);
+    int counter = 0;
+    while (counter < 60) {
+      if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
+        break;
+      } else {
+        UtilsForTests.waitFor(1000);
+        jInfo = remoteJTClient.getJobInfo(id);
+      }
+      counter ++;
+    }
+    Assert.assertTrue("Job has not been started for 1 min.", counter != 60);
+
+    TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(id);
+    for (TaskInfo taskinfo : taskInfos) {
+      if (!taskinfo.isSetupOrCleanup()) {
+        taskInfo = taskinfo;
+      }
+    }
+
+    counter = 0;
+    taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
+    while (counter < 60) {
+      if (taskInfo.getTaskStatus().length > 0) {
+        if (taskInfo.getTaskStatus()[0].getRunState() 
+                == TaskStatus.State.RUNNING) {
+          break;
+        }
+      }
+      UtilsForTests.waitFor(1000);
+      taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
+      counter++;
+    }
+    Assert.assertTrue("Task has not been started for 1 min.", counter != 60);
+
+    NetworkedJob networkJob = jobClient.new NetworkedJob(jInfo.getStatus());
+    TaskID tID = TaskID.downgrade(taskInfo.getTaskID());
+    TaskAttemptID taskAttID = new TaskAttemptID(tID , 0);
+    networkJob.killTask(taskAttID, false);
+
+    LOG.info("Waiting till the job is completed...");
+    while (!jInfo.getStatus().isJobComplete()) {
+      UtilsForTests.waitFor(100);
+      jInfo = remoteJTClient.getJobInfo(id);
+    }
+
+    Assert.assertEquals("JobStatus", jInfo.getStatus().getRunState(), 
+            JobStatus.SUCCEEDED);
+  }
+
+
+  /**
+   * Verifying whether task temporary output directory is cleaned up or not
+   * after killing the task.
+   */
+  @Test
+  public void testDirCleanupAfterTaskKilled() throws IOException, 
+          InterruptedException {
+    TaskInfo taskInfo = null;
+    boolean isTempFolderExists = false;
+    String localTaskDir = null;
+    TTClient ttClient = null;
+    TaskID tID = null;
+    FileStatus filesStatus [] = null;
+    Path inputDir = new Path("input");
+    Path outputDir = new Path("output");
+    Configuration conf = new Configuration(cluster.getConf());
+    JobConf jconf = new JobConf(conf);
+    jconf.setJobName("Word Count");
+    jconf.setJarByClass(WordCount.class);
+    jconf.setMapperClass(WordCount.MapClass.class);
+    jconf.setCombinerClass(WordCount.Reduce.class);
+    jconf.setReducerClass(WordCount.Reduce.class);
+    jconf.setNumMapTasks(1);
+    jconf.setNumReduceTasks(1);
+    jconf.setMaxMapAttempts(20);
+    jconf.setMaxReduceAttempts(20);
+    jconf.setOutputKeyClass(Text.class);
+    jconf.setOutputValueClass(IntWritable.class);
+
+    cleanup(inputDir, conf);
+    cleanup(outputDir, conf);
+    createInput(inputDir, conf);
+    FileInputFormat.setInputPaths(jconf, inputDir);
+    FileOutputFormat.setOutputPath(jconf, outputDir);
+    RunningJob runJob = jobClient.submitJob(jconf);
+    JobID id = runJob.getID();
+    JobInfo jInfo = remoteJTClient.getJobInfo(id);
+    int counter = 0;
+    while (counter < 60) {
+      if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
+        break;
+      } else {
+        UtilsForTests.waitFor(1000);
+        jInfo = remoteJTClient.getJobInfo(id);
+      }
+      counter ++;
+    }
+    Assert.assertTrue("Job has not been started for 1 min.", counter != 60);
+
+    JobStatus[] jobStatus = jobClient.getAllJobs();
+    String userName = jobStatus[0].getUsername();
+    TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(id);
+    for (TaskInfo taskinfo : taskInfos) {
+      if (!taskinfo.isSetupOrCleanup()) {
+        taskInfo = taskinfo;
+        break;
+      }
+    }
+
+    counter = 0;
+    while (counter < 30) {
+      if (taskInfo.getTaskStatus().length > 0) {
+        if (taskInfo.getTaskStatus()[0].getRunState() 
+                == TaskStatus.State.RUNNING) {
+          break;
+        }
+      }
+      UtilsForTests.waitFor(1000);
+      taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
+      counter ++;
+    }
+    Assert.assertTrue("Task has not been started for 30 sec.", 
+            counter != 30);
+
+    tID = TaskID.downgrade(taskInfo.getTaskID());
+    FinishTaskControlAction action = new FinishTaskControlAction(tID);
+
+    String[] taskTrackers = taskInfo.getTaskTrackers();
+    counter = 0;
+    while (counter < 30) {
+      if (taskTrackers.length != 0) {
+        break;
+      }
+      UtilsForTests.waitFor(100);
+      taskTrackers = taskInfo.getTaskTrackers();
+      counter ++;
+    }
+
+    String hostName = taskTrackers[0].split("_")[1];
+    hostName = hostName.split(":")[0];
+    ttClient = cluster.getTTClient(hostName);
+    ttClient.getProxy().sendAction(action);
+    String localDirs[] = ttClient.getMapredLocalDirs();
+    TaskAttemptID taskAttID = new TaskAttemptID(tID, 0);
+    for (String localDir : localDirs) {
+      localTaskDir = localDir + "/" 
+              + TaskTracker.getLocalTaskDir(userName, 
+                      id.toString(), taskAttID.toString());
+      filesStatus = ttClient.listStatus(localTaskDir, true);
+      if (filesStatus.length > 0) {
+        isTempFolderExists = true;
+        NetworkedJob networkJob = jobClient.new NetworkedJob(jInfo.getStatus());
+        networkJob.killTask(taskAttID, false);
+        break;
+      }
+    }
+
+    Assert.assertTrue("Task Attempt directory " + 
+            taskAttID + " has not been found while task was running.", 
+                    isTempFolderExists);
+    taskInfo = remoteJTClient.getTaskInfo(tID);
+
+    counter = 0;
+    while (counter < 60) {
+      UtilsForTests.waitFor(1000);
+      taskInfo = remoteJTClient.getTaskInfo(tID);
+      filesStatus = ttClient.listStatus(localTaskDir, true);
+      if (filesStatus.length == 0) {
+        break;
+      }
+      counter ++;
+    }
+
+    Assert.assertTrue("Task attempt temporary folder has not been cleaned.", 
+            isTempFolderExists && filesStatus.length == 0);
+    counter = 0;
+    while (counter < 30) {
+      UtilsForTests.waitFor(1000);
+      taskInfo = remoteJTClient.getTaskInfo(tID);
+      counter ++;
+    }
+    taskInfo = remoteJTClient.getTaskInfo(tID);
+    Assert.assertEquals("Task status has not been changed to KILLED.", 
+            TaskStatus.State.KILLED, 
+                    taskInfo.getTaskStatus()[0].getRunState());
+  }
+
+  private void cleanup(Path dir, Configuration conf) throws 
+          IOException {
+    FileSystem fs = dir.getFileSystem(conf);
+    fs.delete(dir, true);
+  }
+
+  private void createInput(Path inDir, Configuration conf) throws 
+          IOException {
+    String input = "Hadoop is framework for data intensive distributed " 
+            + "applications.\n" 
+            + "Hadoop enables applications to work with thousands of nodes.";
+    FileSystem fs = inDir.getFileSystem(conf);
+    if (!fs.mkdirs(inDir)) {
+      throw new IOException("Failed to create the input directory:" 
+            + inDir.toString());
+    }
+    fs.setPermission(inDir, new FsPermission(FsAction.ALL, 
+            FsAction.ALL, FsAction.ALL));
+    DataOutputStream file = fs.create(new Path(inDir, "data.txt"));
+    int i = 0;
+    while(i < 1000 * 3000) {
+      file.writeBytes(input);
+      i++;
+    }
+    file.close();
+  }
+
+  /**
+   * Verifying whether task temporary output directory is cleaned up or not
+   * after failing the task.
+   */
+  @Test
+  public void testDirCleanupAfterTaskFailed() throws IOException, 
+          InterruptedException {
+    TTClient ttClient = null;
+    FileStatus filesStatus [] = null;
+    String localTaskDir = null;
+    TaskInfo taskInfo = null;
+    TaskID tID = null;
+    boolean isTempFolderExists = false;
+    Path inputDir = new Path("input");
+    Path outputDir = new Path("output");
+    Configuration conf = new Configuration(cluster.getConf());
+    JobConf jconf = new JobConf(conf);
+    jconf.setJobName("Task Failed job");
+    jconf.setJarByClass(UtilsForTests.class);
+    jconf.setMapperClass(FailedMapperClass.class);
+    jconf.setNumMapTasks(1);
+    jconf.setNumReduceTasks(0);
+    jconf.setMaxMapAttempts(1);
+    cleanup(inputDir, conf);
+    cleanup(outputDir, conf);
+    createInput(inputDir, conf);
+    FileInputFormat.setInputPaths(jconf, inputDir);
+    FileOutputFormat.setOutputPath(jconf, outputDir);
+    RunningJob runJob = jobClient.submitJob(jconf);
+    JobID id = runJob.getID();
+    JobInfo jInfo = remoteJTClient.getJobInfo(id);
+    
+    int counter = 0;
+    while (counter < 60) {
+      if (jInfo.getStatus().getRunState() == JobStatus.RUNNING) {
+        break;
+      } else {
+        UtilsForTests.waitFor(1000);
+        jInfo = remoteJTClient.getJobInfo(id);
+      }
+      counter ++;
+    }
+    Assert.assertTrue("Job has not been started for 1 min.", counter != 60);
+
+    JobStatus[] jobStatus = jobClient.getAllJobs();
+    String userName = jobStatus[0].getUsername();
+    TaskInfo[] taskInfos = remoteJTClient.getTaskInfo(id);
+    for (TaskInfo taskinfo : taskInfos) {
+      if (!taskinfo.isSetupOrCleanup()) {
+        taskInfo = taskinfo;
+        break;
+      }
+    }
+
+    tID = TaskID.downgrade(taskInfo.getTaskID());
+    FinishTaskControlAction action = new FinishTaskControlAction(tID);
+    String[] taskTrackers = taskInfo.getTaskTrackers();
+    counter = 0;
+    while (counter < 30) {
+      if (taskTrackers.length != 0) {
+        break;
+      }
+      UtilsForTests.waitFor(1000);
+      taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
+      taskTrackers = taskInfo.getTaskTrackers();
+      counter ++;
+    }
+    Assert.assertTrue("Task tracker not found.", taskTrackers.length != 0);
+    String hostName = taskTrackers[0].split("_")[1];
+    hostName = hostName.split(":")[0];
+    ttClient = cluster.getTTClient(hostName);
+    ttClient.getProxy().sendAction(action);
+
+    counter = 0;
+    while(counter < 60) {
+      if (taskInfo.getTaskStatus().length > 0) {
+        if (taskInfo.getTaskStatus()[0].getRunState() 
+                == TaskStatus.State.RUNNING) {
+          break;
+        }
+      }
+      UtilsForTests.waitFor(1000);
+      taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
+      counter ++;
+    }
+    Assert.assertTrue("Task has not been started for 1 min.", 
+            counter != 60);
+
+    String localDirs[] = ttClient.getMapredLocalDirs();
+    TaskAttemptID taskAttID = new TaskAttemptID(tID, 0);
+    for (String localDir : localDirs) {
+      localTaskDir = localDir + "/" 
+              + TaskTracker.getLocalTaskDir(userName, 
+                      id.toString(), taskAttID.toString());
+      filesStatus = ttClient.listStatus(localTaskDir, true);
+      if (filesStatus.length > 0) {
+        isTempFolderExists = true;
+        break;
+      }
+    }
+
+    taskInfo = remoteJTClient.getTaskInfo(taskInfo.getTaskID());
+    Assert.assertTrue("Task Attempt directory " + 
+            taskAttID + " has not been found while task was running.", 
+                    isTempFolderExists);
+    counter = 0;
+    while (counter < 30) {
+      UtilsForTests.waitFor(1000);
+      taskInfo = remoteJTClient.getTaskInfo(tID);
+      counter ++;
+    }
+
+    Assert.assertEquals("Task status has not been changed to FAILED.", 
+            taskInfo.getTaskStatus()[0].getRunState(), 
+                    TaskStatus.State.FAILED);
+
+    filesStatus = ttClient.listStatus(localTaskDir, true);
+    Assert.assertTrue("Temporary folder has not been cleanup.", 
+            filesStatus.length == 0);
+  }
+
+  public static class FailedMapperClass implements 
+          Mapper<NullWritable, NullWritable, NullWritable, NullWritable> {
+    public void configure(JobConf job) {
+    }
+    public void map(NullWritable key, NullWritable value, 
+            OutputCollector<NullWritable, NullWritable> output, 
+                    Reporter reporter) throws IOException {
+      int counter = 0;
+      while (counter < 240) {
+        UtilsForTests.waitFor(1000);
+        counter ++;
+      }
+      if (counter == 240) {
+        throw new IOException();
+      }
+    }
+    public void close() {
+    }
+  }
+  
+  @Test
+  /**
+   * This tests verification of job killing by killing of all task 
+   * attempts of a particular task
+   * @param none
+   * @return void
+   */
+  public void testAllTaskAttemptKill() throws Exception {
+    Configuration conf = new Configuration(cluster.getConf());
+
+    JobStatus[] jobStatus = null;
+
+    SleepJob job = new SleepJob();
+    job.setConf(conf);
+    conf = job.setupJobConf(3, 1, 40000, 1000, 100, 100);
+    JobConf jconf = new JobConf(conf);
+
+    //Submitting the job
+    RunningJob rJob = cluster.getJTClient().getClient().submitJob(jconf);
+
+    int MAX_MAP_TASK_ATTEMPTS = Integer.
+        parseInt(jconf.get("mapred.map.max.attempts"));
+
+    LOG.info("MAX_MAP_TASK_ATTEMPTS is : " + MAX_MAP_TASK_ATTEMPTS);
+
+    Assert.assertTrue(MAX_MAP_TASK_ATTEMPTS > 0);
+
+    TTClient tClient = null;
+    TTClient[] ttClients = null;
+
+    JobInfo jInfo = remoteJTClient.getJobInfo(rJob.getID());
+
+    //Assert if jobInfo is null
+    Assert.assertNotNull(jInfo.getStatus().getRunState());
+
+    //Wait for the job to start running.
+    while (jInfo.getStatus().getRunState() != JobStatus.RUNNING) {
+      try {
+        Thread.sleep(10000);
+      } catch (InterruptedException e) {};
+      jInfo = remoteJTClient.getJobInfo(rJob.getID());
+    }
+
+    //Temporarily store the jobid to use it later for comparision.
+    JobID jobidStore = rJob.getID();
+    jobidStore = JobID.downgrade(jobidStore);
+    LOG.info("job id is :" + jobidStore.toString());
+
+    TaskInfo[] taskInfos = null;
+
+    //After making sure that the job is running,
+    //the test execution has to make sure that
+    //at least one task has started running before continuing.
+    boolean runningCount = false;
+    int count = 0;
+    do {
+      taskInfos = cluster.getJTClient().getProxy()
+        .getTaskInfo(rJob.getID());
+      runningCount = false;
+      for (TaskInfo taskInfo : taskInfos) {
+        TaskStatus[] taskStatuses = taskInfo.getTaskStatus();
+        if (taskStatuses.length > 0){
+          LOG.info("taskStatuses[0].getRunState() is :" +
+            taskStatuses[0].getRunState());
+          if (taskStatuses[0].getRunState() == TaskStatus.State.RUNNING){
+            runningCount = true;
+            break;
+          } else {
+            LOG.info("Sleeping 5 seconds");
+            Thread.sleep(5000);
+          }
+        }
+      }
+      count++;
+      //If the count goes beyond a point, then break; This is to avoid
+      //infinite loop under unforeseen circumstances. Testcase will anyway
+      //fail later.
+      if (count > 10) {
+        Assert.fail("Since the sleep count has reached beyond a point" +
+          "failing at this point");
+      } 
+    } while (!runningCount);
+
+    //This whole module is about getting the task Attempt id
+    //of one task and killing it MAX_MAP_TASK_ATTEMPTS times,
+    //whenever it re-attempts to run.
+    String taskIdKilled = null;
+    for (int i = 0 ; i<MAX_MAP_TASK_ATTEMPTS; i++) {
+      taskInfos = cluster.getJTClient().getProxy()
+          .getTaskInfo(rJob.getID());
+
+      for (TaskInfo taskInfo : taskInfos) {
+        TaskAttemptID taskAttemptID;
+        if (!taskInfo.isSetupOrCleanup()) {
+          //This is the task which is going to be killed continously in
+          //all its task attempts.The first task is getting picked up.
+          TaskID taskid = TaskID.downgrade(taskInfo.getTaskID());
+          LOG.info("taskid is :" + taskid);
+          if (i==0) {
+            taskIdKilled = taskid.toString();
+            taskAttemptID = new TaskAttemptID(taskid, i);
+            LOG.info("taskAttemptid going to be killed is : " + taskAttemptID);
+            (jobClient.new NetworkedJob(jInfo.getStatus())).
+                killTask(taskAttemptID,true);
+            checkTaskCompletionEvent(taskAttemptID, jInfo);
+            break;
+          } else {
+            if (taskIdKilled.equals(taskid.toString())) {
+              taskAttemptID = new TaskAttemptID(taskid, i);
+              LOG.info("taskAttemptid going to be killed is : " +
+                  taskAttemptID);
+              (jobClient.new NetworkedJob(jInfo.getStatus())).
+                  killTask(taskAttemptID,true);
+              checkTaskCompletionEvent(taskAttemptID,jInfo);
+              break;
+            }
+          }
+        }
+      }
+    }
+    //Making sure that the job is complete.
+    while (jInfo != null && !jInfo.getStatus().isJobComplete()) {
+      Thread.sleep(10000);
+      jInfo = remoteJTClient.getJobInfo(rJob.getID());
+    }
+
+    //Making sure that the correct jobstatus is got from all the jobs
+    jobStatus = jobClient.getAllJobs();
+    JobStatus jobStatusFound = null;
+    for (JobStatus jobStatusTmp : jobStatus) {
+      if (JobID.downgrade(jobStatusTmp.getJobID()).equals(jobidStore)) {
+        jobStatusFound = jobStatusTmp;
+        LOG.info("jobStatus found is :" + jobStatusFound.getJobId().toString());
+      }
+    }
+
+    //Making sure that the job has FAILED
+    Assert.assertEquals("The job should have failed at this stage",
+        JobStatus.FAILED,jobStatusFound.getRunState());
+  }
+
+  //This method checks if task Attemptid occurs in the list
+  //of tasks that are completed (killed) for a job.This is
+  //required because after issuing a kill comamnd, the task
+  //has to be killed and appear in the taskCompletion event.
+  //After this a new task attempt will start running in a
+  //matter of few seconds.
+  public void checkTaskCompletionEvent (TaskAttemptID taskAttemptID,
+      JobInfo jInfo) throws Exception {
+    boolean match = false;
+    int count = 0;
+    while (!match) {
+      TaskCompletionEvent[] taskCompletionEvents =  jobClient.new
+        NetworkedJob(jInfo.getStatus()).getTaskCompletionEvents(0);
+      for (TaskCompletionEvent taskCompletionEvent : taskCompletionEvents) {
+        if ((taskCompletionEvent.getTaskAttemptId().toString()).
+            equals(taskAttemptID.toString())){
+          match = true;
+          //Sleeping for 10 seconds giving time for the next task
+          //attempt to run
+          Thread.sleep(10000);
+          break;
+        }
+      }
+      if (!match) {
+        LOG.info("Thread is sleeping for 10 seconds");
+        Thread.sleep(10000);
+        count++;
+      }
+      //If the count goes beyond a point, then break; This is to avoid
+      //infinite loop under unforeseen circumstances.Testcase will anyway
+      //fail later.
+      if (count > 10) {
+        Assert.fail("Since the task attemptid is not appearing in the" +
+            "TaskCompletionEvent, it seems this task attempt was not killed");
+      }
+    }
+  }
+}

+ 9 - 26
src/test/system/java/org/apache/hadoop/mapred/TestTaskOwner.java

@@ -19,40 +19,22 @@
 package org.apache.hadoop.mapred;
 
 import java.io.BufferedReader;
-import java.io.IOException;
 import java.io.InputStreamReader;
-import java.util.Collection;
-import java.util.Iterator;
 import java.util.StringTokenizer;
 
-import junit.framework.Assert;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-
-import org.apache.hadoop.examples.SleepJob;
-import org.apache.hadoop.examples.WordCount.IntSumReducer;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.IntWritable;
-import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapreduce.Job;
 import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
 import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-import org.apache.hadoop.mapreduce.Job;
-import org.apache.hadoop.mapreduce.Mapper;
-import org.apache.hadoop.mapreduce.OutputFormat;
-import org.apache.hadoop.mapreduce.Reducer;
-import org.apache.hadoop.mapred.TextOutputFormat;
-
-import org.apache.hadoop.mapreduce.test.system.JTClient;
 import org.apache.hadoop.mapreduce.test.system.MRCluster;
-import org.apache.hadoop.mapreduce.test.system.TTClient;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.io.Text;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
@@ -73,7 +55,11 @@ public class TestTaskOwner {
     cluster = MRCluster.createCluster(new Configuration());
     cluster.setUp();
     FileSystem fs = inDir.getFileSystem(cluster.getJTClient().getConf());
-    fs.create(inDir);
+    // Make sure that all is clean in case last tearDown wasn't successful
+    fs.delete(outDir, true);
+    fs.delete(inDir, true);
+
+    fs.create(inDir, true);
   }
 
   @Test
@@ -104,7 +90,6 @@ public class TestTaskOwner {
     // as the
     // user name that was used to launch the task in the first place
     FileSystem fs = outDir.getFileSystem(conf);
-    StringBuffer result = new StringBuffer();
 
     Path[] fileList = FileUtil.stat2Paths(fs.listStatus(outDir,
      new Utils.OutputFileUtils.OutputFilesFilter()));
@@ -128,20 +113,18 @@ public class TestTaskOwner {
               .toString());
            break;
          }
-
         }
         file.close();
      }
-
   }
 
   @AfterClass
   public static void tearDown() throws java.lang.Exception {
     FileSystem fs = outDir.getFileSystem(cluster.getJTClient().getConf());
     fs.delete(outDir, true);
+    fs.delete(inDir, true);
     cluster.tearDown();
    }
-
 }
 
 

+ 17 - 5
src/test/system/java/org/apache/hadoop/mapreduce/test/system/JTProtocol.java

@@ -32,30 +32,42 @@ public interface JTProtocol extends DaemonProtocol {
 
   /**
    * Get the information pertaining to given job.<br/>
+   * The returned JobInfo object can be null when the
+   * specified job by the job id is retired from the 
+   * JobTracker memory which happens after job is 
+   * completed. <br/>
    * 
    * @param id
    *          of the job for which information is required.
-   * @return information of regarding job.
+   * @return information of regarding job null if job is 
+   *         retired from JobTracker memory.
    * @throws IOException
    */
   public JobInfo getJobInfo(JobID jobID) throws IOException;
 
   /**
    * Gets the information pertaining to a task. <br/>
-   * 
+   * The returned TaskInfo object can be null when the 
+   * specified task specified by the task id is retired
+   * from the JobTracker memory which happens after the
+   * job is completed. <br/>
    * @param id
    *          of the task for which information is required.
-   * @return information of regarding the task.
+   * @return information of regarding the task null if the 
+   *          task is retired from JobTracker memory.
    * @throws IOException
    */
   public TaskInfo getTaskInfo(TaskID taskID) throws IOException;
 
   /**
    * Gets the information pertaining to a given TaskTracker. <br/>
-   * 
+   * The returned TTInfo class can be null if the given TaskTracker
+   * information is removed from JobTracker memory which is done
+   * when the TaskTracker is marked lost by the JobTracker. <br/>
    * @param name
    *          of the tracker.
-   * @return information regarding the tracker.
+   * @return information regarding the tracker null if the TaskTracker
+   *          is marked lost by the JobTracker.
    * @throws IOException
    */
   public TTInfo getTTInfo(String trackerName) throws IOException;

+ 33 - 19
src/test/system/java/org/apache/hadoop/mapreduce/test/system/MRCluster.java

@@ -19,6 +19,7 @@
 package org.apache.hadoop.mapreduce.test.system;
 
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Iterator;
 import java.util.List;
@@ -30,7 +31,9 @@ import org.apache.hadoop.test.system.AbstractDaemonClient;
 import org.apache.hadoop.test.system.AbstractDaemonCluster;
 import org.apache.hadoop.test.system.process.ClusterProcessManager;
 import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster;
+import org.apache.hadoop.test.system.process.MultiUserHadoopDaemonRemoteCluster;
 import org.apache.hadoop.test.system.process.RemoteProcess;
+import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster.HadoopDaemonInfo;
 
 /**
  * Concrete AbstractDaemonCluster representing a Map-Reduce cluster.
@@ -43,22 +46,24 @@ public class MRCluster extends AbstractDaemonCluster {
   public static final String CLUSTER_PROCESS_MGR_IMPL = 
     "test.system.mr.clusterprocess.impl.class";
 
-  /**
-   * Key is used to to point to the file containing hostname of the jobtracker
-   */
-  public static final String CONF_HADOOP_JT_HOSTFILE_NAME =
-    "test.system.hdrc.jt.hostfile";
   /**
    * Key is used to to point to the file containing hostnames of tasktrackers
    */
   public static final String CONF_HADOOP_TT_HOSTFILE_NAME =
     "test.system.hdrc.tt.hostfile";
 
-  private static String JT_hostFileName;
+  private static List<HadoopDaemonInfo> mrDaemonInfos = 
+    new ArrayList<HadoopDaemonInfo>();
   private static String TT_hostFileName;
+  private static String jtHostName;
 
   protected enum Role {JT, TT};
-  
+
+  static{
+    Configuration.addDefaultResource("mapred-default.xml");
+    Configuration.addDefaultResource("mapred-site.xml");
+  }
+
   private MRCluster(Configuration conf, ClusterProcessManager rCluster)
       throws IOException {
     super(conf, rCluster);
@@ -74,14 +79,20 @@ public class MRCluster extends AbstractDaemonCluster {
    */
   public static MRCluster createCluster(Configuration conf) 
       throws Exception {
-    JT_hostFileName = conf.get(CONF_HADOOP_JT_HOSTFILE_NAME,
-      System.getProperty(CONF_HADOOP_JT_HOSTFILE_NAME,
-        "clusterControl.masters.jt"));
-    TT_hostFileName = conf.get(CONF_HADOOP_TT_HOSTFILE_NAME,
-      System.getProperty(CONF_HADOOP_TT_HOSTFILE_NAME, "slaves"));
-
-    String implKlass = conf.get(CLUSTER_PROCESS_MGR_IMPL, System
-        .getProperty(CLUSTER_PROCESS_MGR_IMPL));
+    conf.addResource("system-test.xml");
+    TT_hostFileName = conf.get(CONF_HADOOP_TT_HOSTFILE_NAME, "slaves");
+    String jtHostPort = conf.get("mapred.job.tracker");
+    if (jtHostPort == null) {
+      throw new Exception("mapred.job.tracker is not set.");
+    }
+    jtHostName = jtHostPort.trim().split(":")[0];
+    
+    mrDaemonInfos.add(new HadoopDaemonInfo("jobtracker", 
+        Role.JT, Arrays.asList(new String[]{jtHostName})));
+    mrDaemonInfos.add(new HadoopDaemonInfo("tasktracker", 
+        Role.TT, TT_hostFileName));
+    
+    String implKlass = conf.get(CLUSTER_PROCESS_MGR_IMPL);
     if (implKlass == null || implKlass.isEmpty()) {
       implKlass = MRProcessManager.class.getName();
     }
@@ -145,12 +156,15 @@ public class MRCluster extends AbstractDaemonCluster {
   }
 
   public static class MRProcessManager extends HadoopDaemonRemoteCluster{
-    private static final List<HadoopDaemonInfo> mrDaemonInfos = 
-      Arrays.asList(new HadoopDaemonInfo[]{
-          new HadoopDaemonInfo("jobtracker", Role.JT, JT_hostFileName),
-          new HadoopDaemonInfo("tasktracker", Role.TT, TT_hostFileName)});
     public MRProcessManager() {
       super(mrDaemonInfos);
     }
   }
+
+  public static class MultiMRProcessManager
+      extends MultiUserHadoopDaemonRemoteCluster {
+    public MultiMRProcessManager() {
+      super(mrDaemonInfos);
+    }
+  }
 }

+ 28 - 8
src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTProtocol.java

@@ -18,17 +18,26 @@
 
 package org.apache.hadoop.mapreduce.test.system;
 
-import java.io.IOException;
-
 import org.apache.hadoop.mapred.JobTracker;
 import org.apache.hadoop.mapred.TaskTracker;
 import org.apache.hadoop.mapred.TaskTrackerStatus;
 import org.apache.hadoop.mapreduce.TaskID;
+import org.apache.hadoop.mapreduce.security.token.JobTokenSelector;
+import org.apache.hadoop.security.KerberosInfo;
+import org.apache.hadoop.security.token.TokenInfo;
 import org.apache.hadoop.test.system.DaemonProtocol;
 
+import java.io.IOException;
+
 /**
  * TaskTracker RPC interface to be used for cluster tests.
+ *
+ * The protocol has to be annotated so KerberosInfo can be filled in during
+ * creation of a ipc.Client connection
  */
+@KerberosInfo(
+    serverPrincipal = TaskTracker.TT_USER_NAME)
+@TokenInfo(JobTokenSelector.class)
 public interface TTProtocol extends DaemonProtocol {
 
   public static final long versionID = 1L;
@@ -36,8 +45,8 @@ public interface TTProtocol extends DaemonProtocol {
    * Gets latest status which was sent in heartbeat to the {@link JobTracker}. 
    * <br/>
    * 
-   * @return status
-   * @throws IOException
+   * @return status of the TaskTracker daemon
+   * @throws IOException in case of errors
    */
   TaskTrackerStatus getStatus() throws IOException;
 
@@ -45,17 +54,28 @@ public interface TTProtocol extends DaemonProtocol {
    * Gets list of all the tasks in the {@link TaskTracker}.<br/>
    * 
    * @return list of all the tasks
-   * @throws IOException
+   * @throws IOException in case of errors
    */
   TTTaskInfo[] getTasks() throws IOException;
 
   /**
    * Gets the task associated with the id.<br/>
    * 
-   * @param id of the task.
+   * @param taskID of the task.
    * 
-   * @return
-   * @throws IOException
+   * @return returns task info <code>TTTaskInfo</code>
+   * @throws IOException in case of errors
    */
   TTTaskInfo getTask(TaskID taskID) throws IOException;
+
+  /**
+   * Checks if any of process in the process tree of the task is alive
+   * or not. <br/>
+   * 
+   * @param pid
+   *          of the task attempt
+   * @return true if task process tree is alive.
+   * @throws IOException in case of errors
+   */
+  boolean isProcessTreeAlive(String pid) throws IOException;
 }

+ 8 - 1
src/test/system/java/org/apache/hadoop/mapreduce/test/system/TTTaskInfo.java

@@ -68,4 +68,11 @@ public interface TTTaskInfo extends Writable {
    * @return true if it is a clean up of task.
    */
   boolean isTaskCleanupTask();
-}
+
+  /**
+   * Gets the pid of the running task on the task-tracker.
+   * 
+   * @return pid of the task.
+   */
+  String getPid();
+}

+ 37 - 30
src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonClient.java

@@ -22,9 +22,7 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.ConcurrentModificationException;
 import java.util.List;
-
 import junit.framework.Assert;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -224,67 +222,73 @@ public abstract class AbstractDaemonClient<PROXY extends DaemonProtocol> {
    * Gets number of times FATAL log messages where logged in Daemon logs. 
    * <br/>
    * Pattern used for searching is FATAL. <br/>
-   * 
+   * @param excludeExpList list of exception to exclude 
    * @return number of occurrence of fatal message.
    * @throws IOException
    */
-  public int getNumberOfFatalStatementsInLog() throws IOException {
+  public int getNumberOfFatalStatementsInLog(String [] excludeExpList)
+      throws IOException {
     DaemonProtocol proxy = getProxy();
     String pattern = "FATAL";
-    return proxy.getNumberOfMatchesInLogFile(pattern);
+    return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
   }
 
   /**
    * Gets number of times ERROR log messages where logged in Daemon logs. 
    * <br/>
    * Pattern used for searching is ERROR. <br/>
-   * 
+   * @param excludeExpList list of exception to exclude 
    * @return number of occurrence of error message.
    * @throws IOException
    */
-  public int getNumberOfErrorStatementsInLog() throws IOException {
+  public int getNumberOfErrorStatementsInLog(String[] excludeExpList) 
+      throws IOException {
     DaemonProtocol proxy = getProxy();
-    String pattern = "ERROR";
-    return proxy.getNumberOfMatchesInLogFile(pattern);
+    String pattern = "ERROR";    
+    return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
   }
 
   /**
    * Gets number of times Warning log messages where logged in Daemon logs. 
    * <br/>
    * Pattern used for searching is WARN. <br/>
-   * 
+   * @param excludeExpList list of exception to exclude 
    * @return number of occurrence of warning message.
    * @throws IOException
    */
-  public int getNumberOfWarnStatementsInLog() throws IOException {
+  public int getNumberOfWarnStatementsInLog(String[] excludeExpList) 
+      throws IOException {
     DaemonProtocol proxy = getProxy();
     String pattern = "WARN";
-    return proxy.getNumberOfMatchesInLogFile(pattern);
+    return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
   }
 
   /**
    * Gets number of time given Exception were present in log file. <br/>
    * 
    * @param e exception class.
+   * @param excludeExpList list of exceptions to exclude. 
    * @return number of exceptions in log
    * @throws IOException
    */
-  public int getNumberOfExceptionsInLog(Exception e)
-      throws IOException {
+  public int getNumberOfExceptionsInLog(Exception e,
+      String[] excludeExpList) throws IOException {
     DaemonProtocol proxy = getProxy();
-    String pattern = e.getClass().getSimpleName();
-    return proxy.getNumberOfMatchesInLogFile(pattern);
+    String pattern = e.getClass().getSimpleName();    
+    return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
   }
 
   /**
    * Number of times ConcurrentModificationException present in log file. 
    * <br/>
+   * @param excludeExpList list of exceptions to exclude.
    * @return number of times exception in log file.
    * @throws IOException
    */
-  public int getNumberOfConcurrentModificationExceptionsInLog()
-      throws IOException {
-    return getNumberOfExceptionsInLog(new ConcurrentModificationException());
+  public int getNumberOfConcurrentModificationExceptionsInLog(
+      String[] excludeExpList) throws IOException {
+    return getNumberOfExceptionsInLog(new ConcurrentModificationException(),
+        excludeExpList);
   }
 
   private int errorCount;
@@ -294,16 +298,17 @@ public abstract class AbstractDaemonClient<PROXY extends DaemonProtocol> {
   /**
    * Populate the initial exception counts to be used to assert once a testcase
    * is done there was no exception in the daemon when testcase was run.
-   * 
+   * @param excludeExpList list of exceptions to exclude
    * @throws IOException
    */
-  protected void populateExceptionCount() throws IOException {
-    errorCount = getNumberOfErrorStatementsInLog();
+  protected void populateExceptionCount(String [] excludeExpList) 
+      throws IOException {
+    errorCount = getNumberOfErrorStatementsInLog(excludeExpList);
     LOG.info("Number of error messages in logs : " + errorCount);
-    fatalCount = getNumberOfFatalStatementsInLog();
+    fatalCount = getNumberOfFatalStatementsInLog(excludeExpList);
     LOG.info("Number of fatal statement in logs : " + fatalCount);
     concurrentExceptionCount =
-        getNumberOfConcurrentModificationExceptionsInLog();
+        getNumberOfConcurrentModificationExceptionsInLog(excludeExpList);
     LOG.info("Number of concurrent modification in logs : "
         + concurrentExceptionCount);
   }
@@ -314,16 +319,18 @@ public abstract class AbstractDaemonClient<PROXY extends DaemonProtocol> {
    * <b><i>
    * Pre-req for the method is that populateExceptionCount() has 
    * to be called before calling this method.</b></i>
+   * @param excludeExpList list of exceptions to exclude
    * @throws IOException
    */
-  protected void assertNoExceptionsOccurred() throws IOException {
-    int newerrorCount = getNumberOfErrorStatementsInLog();
-    LOG.info("Number of error messages while asserting : " + newerrorCount);
-    int newfatalCount = getNumberOfFatalStatementsInLog();
+  protected void assertNoExceptionsOccurred(String [] excludeExpList) 
+      throws IOException {
+    int newerrorCount = getNumberOfErrorStatementsInLog(excludeExpList);
+    LOG.info("Number of error messages while asserting :" + newerrorCount);
+    int newfatalCount = getNumberOfFatalStatementsInLog(excludeExpList);
     LOG.info("Number of fatal messages while asserting : " + newfatalCount);
     int newconcurrentExceptionCount =
-        getNumberOfConcurrentModificationExceptionsInLog();
-    LOG.info("Number of concurrentmodification execption while asserting :"
+        getNumberOfConcurrentModificationExceptionsInLog(excludeExpList);
+    LOG.info("Number of concurrentmodification exception while asserting :"
         + newconcurrentExceptionCount);
     Assert.assertEquals(
         "New Error Messages logged in the log file", errorCount, newerrorCount);

+ 31 - 7
src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonCluster.java

@@ -23,7 +23,6 @@ import java.util.ArrayList;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
-
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -37,7 +36,7 @@ import org.apache.hadoop.test.system.process.RemoteProcess;
 public abstract class AbstractDaemonCluster {
 
   private static final Log LOG = LogFactory.getLog(AbstractDaemonCluster.class);
-
+  private String [] excludeExpList ;
   private Configuration conf;
   protected ClusterProcessManager clusterManager;
   private Map<Enum<?>, List<AbstractDaemonClient>> daemons = 
@@ -60,6 +59,17 @@ public abstract class AbstractDaemonCluster {
     createAllClients();
   }
 
+  /**
+   * The method returns the cluster manager. The system test cases require an
+   * instance of HadoopDaemonRemoteCluster to invoke certain operation on the
+   * daemon.
+   * 
+   * @return instance of clusterManager
+   */
+  public ClusterProcessManager getClusterManager() {
+    return clusterManager;
+  }
+
   protected void createAllClients() throws IOException {
     for (RemoteProcess p : clusterManager.getAllProcesses()) {
       List<AbstractDaemonClient> dms = daemons.get(p.getRole());
@@ -131,14 +141,18 @@ public abstract class AbstractDaemonCluster {
   }
 
   protected void waitForDaemon(AbstractDaemonClient d) {
+    final int TEN_SEC = 10000;
     while(true) {
       try {
-        LOG.info("Waiting for daemon in host to come up : " + d.getHostName());
+        LOG.info("Waiting for daemon at " + d.getHostName() + " to come up.");
+        LOG.info("Daemon might not be " +
+            "ready or the call to setReady() method hasn't been " +
+            "injected to " + d.getClass() + " ");
         d.connect();
         break;
       } catch (IOException e) {
         try {
-          Thread.sleep(10000);
+          Thread.sleep(TEN_SEC);
         } catch (InterruptedException ie) {
         }
       }
@@ -212,7 +226,17 @@ public abstract class AbstractDaemonCluster {
     ensureClean();
     populateExceptionCounts();
   }
-
+  
+  /**
+   * This is mainly used for the test cases to set the list of exceptions
+   * that will be excluded.
+   * @param excludeExpList list of exceptions to exclude
+   */
+  public void setExcludeExpList(String [] excludeExpList)
+  {
+    this.excludeExpList = excludeExpList;
+  }
+  
   public void clearAllControlActions() throws IOException {
     for (List<AbstractDaemonClient> set : daemons.values()) {
       for (AbstractDaemonClient daemon : set) {
@@ -248,7 +272,7 @@ public abstract class AbstractDaemonCluster {
   protected void populateExceptionCounts() throws IOException {
     for(List<AbstractDaemonClient> lst : daemons.values()) {
       for(AbstractDaemonClient d : lst) {
-        d.populateExceptionCount();
+        d.populateExceptionCount(excludeExpList);
       }
     }
   }
@@ -261,7 +285,7 @@ public abstract class AbstractDaemonCluster {
   protected void assertNoExceptionMessages() throws IOException {
     for(List<AbstractDaemonClient> lst : daemons.values()) {
       for(AbstractDaemonClient d : lst) {
-        d.assertNoExceptionsOccurred();
+        d.assertNoExceptionsOccurred(excludeExpList);
       }
     }
   }

+ 25 - 15
src/test/system/java/org/apache/hadoop/test/system/DaemonProtocol.java

@@ -35,7 +35,7 @@ public interface DaemonProtocol extends VersionedProtocol{
   /**
    * Returns the Daemon configuration.
    * @return Configuration
-   * @throws IOException
+   * @throws IOException in case of errors
    */
   Configuration getDaemonConf() throws IOException;
 
@@ -51,7 +51,7 @@ public interface DaemonProtocol extends VersionedProtocol{
    * Check if the Daemon is ready to accept RPC connections.
    * 
    * @return true if Daemon is ready to accept RPC connection.
-   * @throws IOException
+   * @throws IOException in case of errors
    */
   boolean isReady() throws IOException;
 
@@ -60,7 +60,7 @@ public interface DaemonProtocol extends VersionedProtocol{
    * 
    * @return returns system level view of the Daemon process.
    * 
-   * @throws IOException
+   * @throws IOException in case of errors
    */
   ProcessInfo getProcessInfo() throws IOException;
   
@@ -85,16 +85,16 @@ public interface DaemonProtocol extends VersionedProtocol{
    * @param local
    *          whether the path is local or not
    * @return the statuses of the files/directories in the given patch
-   * @throws IOException
+   * @throws IOException in case of errors
    */
   FileStatus[] listStatus(String path, boolean local) throws IOException;
   
   /**
    * Enables a particular control action to be performed on the Daemon <br/>
    * 
-   * @param control action to be enabled.
+   * @param action is a control action  to be enabled.
    * 
-   * @throws IOException
+   * @throws IOException in case of errors
    */
   @SuppressWarnings("unchecked")
   void sendAction(ControlAction action) throws IOException;
@@ -107,7 +107,7 @@ public interface DaemonProtocol extends VersionedProtocol{
    * 
    * @return true if action is still in waiting queue of 
    *          actions to be delivered.
-   * @throws IOException
+   * @throws IOException in case of errors
    */
   @SuppressWarnings("unchecked")
   boolean isActionPending(ControlAction action) throws IOException;
@@ -117,7 +117,7 @@ public interface DaemonProtocol extends VersionedProtocol{
    * daemon maintains. <br/>
    * <i><b>Not to be directly called by Test Case or clients.</b></i>
    * @param action to be removed
-   * @throws IOException
+   * @throws IOException in case of errors
    */
   
   @SuppressWarnings("unchecked")
@@ -126,7 +126,7 @@ public interface DaemonProtocol extends VersionedProtocol{
   /**
    * Clears out the list of control actions on the particular daemon.
    * <br/>
-   * @throws IOException
+   * @throws IOException in case of errors
    */
   void clearActions() throws IOException;
   
@@ -136,7 +136,7 @@ public interface DaemonProtocol extends VersionedProtocol{
    * <i><b>Not to be directly used by clients</b></i>
    * @param key target
    * @return list of actions.
-   * @throws IOException
+   * @throws IOException in case of errors
    */
   @SuppressWarnings("unchecked")
   ControlAction[] getActions(Writable key) throws IOException;
@@ -145,11 +145,21 @@ public interface DaemonProtocol extends VersionedProtocol{
    * Gets the number of times a particular pattern has been found in the 
    * daemons log file.<br/>
    * <b><i>Please note that search spans across all previous messages of
-   * Daemon, so better practise is to get previous counts before an operation
-   * and then recheck if the sequence of action has caused any problems</i></b>
-   * @param pattern
+   * Daemon, so better practice is to get previous counts before an operation
+   * and then re-check if the sequence of action has caused any problems</i></b>
+   * @param pattern to look for in the damon's log file
+   * @param List of exceptions to ignore
    * @return number of times the pattern if found in log file.
-   * @throws IOException
+   * @throws IOException in case of errors
+   */
+  int getNumberOfMatchesInLogFile(String pattern,String[] list) 
+      throws IOException;
+
+  /**
+   * Gets the user who started the particular daemon initially. <br/>
+   * 
+   * @return user who started the particular daemon.
+   * @throws IOException in case of errors
    */
-  int getNumberOfMatchesInLogFile(String pattern) throws IOException;
+  String getDaemonUser() throws IOException;
 }

+ 31 - 0
src/test/system/java/org/apache/hadoop/test/system/process/ClusterProcessManager.java

@@ -59,11 +59,42 @@ public interface ClusterProcessManager {
    */
   void start() throws IOException;
 
+  /**
+   * Starts the daemon from the user specified conf dir.
+   * @param newConfLocation the dir where the new conf files reside.
+   * @throws IOException
+   */
+  void start(String newConfLocation) throws IOException;
+
+  /**
+   * Stops the daemon running from user specified conf dir.
+   * 
+   * @param newConfLocation
+   *          the dir where ther new conf files reside.
+   * @throws IOException
+   */
+  void stop(String newConfLocation) throws IOException;
+
   /**
    * Method to shutdown all the remote daemons.<br/>
    * 
    * @throws IOException if shutdown procedure fails.
    */
   void stop() throws IOException;
+  
+  /**
+   * Gets if multi-user support is enabled for this cluster. 
+   * <br/>
+   * @return true if multi-user support is enabled.
+   * @throws IOException
+   */
+  boolean isMultiUserSupported() throws IOException;
 
+  /**
+   * The pushConfig is used to push a new config to the daemons.
+   * @param localDir
+   * @return is the remoteDir location where config will be pushed
+   * @throws IOException
+   */
+  String pushConfig(String localDir) throws IOException;
 }

+ 190 - 63
src/test/system/java/org/apache/hadoop/test/system/process/HadoopDaemonRemoteCluster.java

@@ -56,11 +56,17 @@ public abstract class HadoopDaemonRemoteCluster
   private static final Log LOG = LogFactory
       .getLog(HadoopDaemonRemoteCluster.class.getName());
 
+  public static final String CONF_HADOOPNEWCONFDIR =
+    "test.system.hdrc.hadoopnewconfdir";
   /**
    * Key used to configure the HADOOP_HOME to be used by the
    * HadoopDaemonRemoteCluster.
    */
-  public final static String CONF_HADOOPHOME = "test.system.hdrc.hadoophome";
+  public final static String CONF_HADOOPHOME =
+    "test.system.hdrc.hadoophome";
+
+  public final static String CONF_SCRIPTDIR =
+    "test.system.hdrc.deployed.scripts.dir";
   /**
    * Key used to configure the HADOOP_CONF_DIR to be used by the
    * HadoopDaemonRemoteCluster.
@@ -72,26 +78,70 @@ public abstract class HadoopDaemonRemoteCluster
     "test.system.hdrc.deployed.hadoopconfdir";
 
   private String hadoopHome;
-  private String hadoopConfDir;
-  private String deployed_hadoopConfDir;
+  protected String hadoopConfDir;
+  protected String scriptsDir;
+  protected String hadoopNewConfDir;
   private final Set<Enum<?>> roles;
-
   private final List<HadoopDaemonInfo> daemonInfos;
   private List<RemoteProcess> processes;
-
+  protected Configuration conf;
+  
   public static class HadoopDaemonInfo {
     public final String cmd;
     public final Enum<?> role;
-    public final String hostFile;
-    public HadoopDaemonInfo(String cmd, Enum<?> role, String hostFile) {
+    public final List<String> hostNames;
+    public HadoopDaemonInfo(String cmd, Enum<?> role, List<String> hostNames) {
       super();
       this.cmd = cmd;
       this.role = role;
-      this.hostFile = hostFile;
-      LOG.info("Created HadoopDaemonInfo for " + cmd + " " + role + " from " + hostFile);
+      this.hostNames = hostNames;
+    }
+
+    public HadoopDaemonInfo(String cmd, Enum<?> role, String hostFile) 
+        throws IOException {
+      super();
+      this.cmd = cmd;
+      this.role = role;
+      File file = new File(getDeployedHadoopConfDir(), hostFile);
+      BufferedReader reader = null;
+      hostNames = new ArrayList<String>();
+      try {
+        reader = new BufferedReader(new FileReader(file));
+        String host = null;
+        while ((host = reader.readLine()) != null) {
+          if (host.trim().isEmpty() || host.startsWith("#")) {
+            // Skip empty and possible comment lines
+            // throw new IllegalArgumentException(
+            // "Hostname could not be found in file " + hostFile);
+            continue;
+          }
+          hostNames.add(host.trim());
+        }
+        if (hostNames.size() < 1) {
+          throw new IllegalArgumentException("At least one hostname "
+              +
+            "is required to be present in file - " + hostFile);
+        }
+      } finally {
+        try {
+          reader.close();
+        } catch (IOException e) {
+          LOG.warn("Could not close reader");
+        }
+      }
+      LOG.info("Created HadoopDaemonInfo for " + cmd + " " + role + " from " 
+          + hostFile);
     }
   }
 
+  @Override
+  public String pushConfig(String localDir) throws IOException {
+    for (RemoteProcess process : processes){
+      process.pushConfig(localDir);
+    }
+    return hadoopNewConfDir;
+  }
+
   public HadoopDaemonRemoteCluster(List<HadoopDaemonInfo> daemonInfos) {
     this.daemonInfos = daemonInfos;
     this.roles = new HashSet<Enum<?>>();
@@ -102,9 +152,10 @@ public abstract class HadoopDaemonRemoteCluster
 
   @Override
   public void init(Configuration conf) throws IOException {
+    this.conf = conf;
     populateDirectories(conf);
     this.processes = new ArrayList<RemoteProcess>();
-    populateDaemons(deployed_hadoopConfDir);
+    populateDaemons();
   }
 
   @Override
@@ -130,17 +181,10 @@ public abstract class HadoopDaemonRemoteCluster
    *           values for the required keys.
    */
   protected void populateDirectories(Configuration conf) {
-    hadoopHome = conf.get(CONF_HADOOPHOME, System
-        .getProperty(CONF_HADOOPHOME));
-    hadoopConfDir = conf.get(CONF_HADOOPCONFDIR, System
-        .getProperty(CONF_HADOOPCONFDIR));
-
-    deployed_hadoopConfDir = conf.get(CONF_DEPLOYED_HADOOPCONFDIR,
-      System.getProperty(CONF_DEPLOYED_HADOOPCONFDIR));
-    if (deployed_hadoopConfDir == null || deployed_hadoopConfDir.isEmpty()) {
-      deployed_hadoopConfDir = hadoopConfDir;
-    }
-
+    hadoopHome = conf.get(CONF_HADOOPHOME);
+    hadoopConfDir = conf.get(CONF_HADOOPCONFDIR);
+    scriptsDir = conf.get(CONF_SCRIPTDIR);
+    hadoopNewConfDir = conf.get(CONF_HADOOPNEWCONFDIR);
     if (hadoopHome == null || hadoopConfDir == null || hadoopHome.isEmpty()
         || hadoopConfDir.isEmpty()) {
       LOG.error("No configuration "
@@ -149,7 +193,17 @@ public abstract class HadoopDaemonRemoteCluster
           "No Configuration passed for hadoop home " +
           "and hadoop conf directories");
     }
+  }
 
+  public static String getDeployedHadoopConfDir() {
+    String dir = System.getProperty(CONF_DEPLOYED_HADOOPCONFDIR);
+    if (dir == null || dir.isEmpty()) {
+      LOG.error("No configuration "
+          + "for the CONF_DEPLOYED_HADOOPCONFDIR passed");
+      throw new IllegalArgumentException(
+          "No Configuration passed for hadoop deployed conf directory");
+    }
+    return dir;
   }
 
   @Override
@@ -159,6 +213,13 @@ public abstract class HadoopDaemonRemoteCluster
     }
   }
 
+  @Override
+  public void start(String newConfLocation)throws IOException {
+    for (RemoteProcess process : processes) {
+      process.start(newConfLocation);
+    }
+  }
+
   @Override
   public void stop() throws IOException {
     for (RemoteProcess process : processes) {
@@ -166,44 +227,39 @@ public abstract class HadoopDaemonRemoteCluster
     }
   }
 
-  protected void populateDaemon(String confLocation, 
-      HadoopDaemonInfo info) throws IOException {
-    File hostFile = new File(confLocation, info.hostFile);
-    BufferedReader reader = null;
-    reader = new BufferedReader(new FileReader(hostFile));
-    String host = null;
-    try {
-      boolean foundAtLeastOne = false;
-      while ((host = reader.readLine()) != null) {
-        if (host.trim().isEmpty()) {
-          throw new IllegalArgumentException(
-          "Hostname could not be found in file " + info.hostFile);
-        }
-        InetAddress addr = InetAddress.getByName(host);
-        RemoteProcess process = new ScriptDaemon(info.cmd, 
-            addr.getCanonicalHostName(), info.role);
-        processes.add(process);
-        foundAtLeastOne = true;
-      }
-      if (!foundAtLeastOne) {
-        throw new IllegalArgumentException("Alteast one hostname " +
-          "is required to be present in file - " + info.hostFile);
-      }
-    } finally {
-      try {
-        reader.close();
-      } catch (Exception e) {
-        LOG.warn("Could not close reader");
-      }
+  @Override
+  public void stop(String newConfLocation) throws IOException {
+    for (RemoteProcess process : processes) {
+      process.kill(newConfLocation);
+    }
+  }
+
+  protected void populateDaemon(HadoopDaemonInfo info) throws IOException {
+    for (String host : info.hostNames) {
+      InetAddress addr = InetAddress.getByName(host);
+      RemoteProcess process = getProcessManager(info, 
+          addr.getCanonicalHostName());
+      processes.add(process);
     }
   }
 
-  protected void populateDaemons(String confLocation) throws IOException {
+  protected void populateDaemons() throws IOException {
    for (HadoopDaemonInfo info : daemonInfos) {
-     populateDaemon(confLocation, info);
+     populateDaemon(info);
    }
   }
 
+  @Override
+  public boolean isMultiUserSupported() throws IOException {
+    return false;
+  }
+
+  protected RemoteProcess getProcessManager(
+      HadoopDaemonInfo info, String hostName) {
+    RemoteProcess process = new ScriptDaemon(info.cmd, hostName, info.role);
+    return process;
+  }
+
   /**
    * The core daemon class which actually implements the remote process
    * management of actual daemon processes in the cluster.
@@ -214,8 +270,9 @@ public abstract class HadoopDaemonRemoteCluster
     private static final String STOP_COMMAND = "stop";
     private static final String START_COMMAND = "start";
     private static final String SCRIPT_NAME = "hadoop-daemon.sh";
-    private final String daemonName;
-    private final String hostName;
+    private static final String PUSH_CONFIG ="pushConfig.sh";
+    protected final String daemonName;
+    protected final String hostName;
     private final Enum<?> role;
 
     public ScriptDaemon(String daemonName, String hostName, Enum<?> role) {
@@ -229,13 +286,57 @@ public abstract class HadoopDaemonRemoteCluster
       return hostName;
     }
 
-    private ShellCommandExecutor buildCommandExecutor(String command) {
-      String[] commandArgs = getCommand(command);
-      File binDir = getBinDir();
+    private String[] getPushConfigCommand(String localDir, String remoteDir,
+        File scriptDir) throws IOException{
+      ArrayList<String> cmdArgs = new ArrayList<String>();
+      cmdArgs.add(scriptDir.getAbsolutePath() + File.separator + PUSH_CONFIG);
+      cmdArgs.add(localDir);
+      cmdArgs.add(hostName);
+      cmdArgs.add(remoteDir);
+      cmdArgs.add(hadoopConfDir);
+      return (String[]) cmdArgs.toArray(new String[cmdArgs.size()]);
+    }
+
+    private ShellCommandExecutor buildPushConfig(String local, String remote )
+        throws IOException {
+      File scriptDir = new File(scriptsDir);
+      String[] commandArgs = getPushConfigCommand(local, remote, scriptDir);
       HashMap<String, String> env = new HashMap<String, String>();
-      env.put("HADOOP_CONF_DIR", hadoopConfDir);
       ShellCommandExecutor executor = new ShellCommandExecutor(commandArgs,
-          binDir, env);
+          scriptDir, env);
+      LOG.info(executor.toString());
+      return executor;
+    }
+
+    private ShellCommandExecutor createNewConfDir() throws IOException {
+      ArrayList<String> cmdArgs = new ArrayList<String>();
+      cmdArgs.add("ssh");
+      cmdArgs.add(hostName);
+      cmdArgs.add("if [ -d "+ hadoopNewConfDir+
+          " ];\n then echo Will remove existing directory;  rm -rf "+
+          hadoopNewConfDir+";\nmkdir "+ hadoopNewConfDir+"; else \n"+
+          "echo " + hadoopNewConfDir + " doesnt exist hence creating" +
+          ";  mkdir " + hadoopNewConfDir + ";\n  fi");
+      String[] cmd = (String[]) cmdArgs.toArray(new String[cmdArgs.size()]);
+      ShellCommandExecutor executor = new ShellCommandExecutor(cmd);
+      LOG.info(executor.toString());
+      return executor;
+    }
+
+    @Override
+    public void pushConfig(String localDir) throws IOException {
+      createNewConfDir().execute();
+      buildPushConfig(localDir, hadoopNewConfDir).execute();
+    }
+
+    private ShellCommandExecutor buildCommandExecutor(String command,
+        String confDir) {
+      String[] commandArgs = getCommand(command, confDir);
+      File cwd = new File(".");
+      HashMap<String, String> env = new HashMap<String, String>();
+      env.put("HADOOP_CONF_DIR", confDir);
+      ShellCommandExecutor executor
+        = new ShellCommandExecutor(commandArgs, cwd, env);
       LOG.info(executor.toString());
       return executor;
     }
@@ -245,14 +346,14 @@ public abstract class HadoopDaemonRemoteCluster
       return binDir;
     }
 
-    private String[] getCommand(String command) {
+    protected String[] getCommand(String command, String confDir) {
       ArrayList<String> cmdArgs = new ArrayList<String>();
       File binDir = getBinDir();
       cmdArgs.add("ssh");
       cmdArgs.add(hostName);
       cmdArgs.add(binDir.getAbsolutePath() + File.separator + SCRIPT_NAME);
       cmdArgs.add("--config");
-      cmdArgs.add(hadoopConfDir);
+      cmdArgs.add(confDir);
       // XXX Twenty internal version does not support --script option.
       cmdArgs.add(command);
       cmdArgs.add(daemonName);
@@ -261,12 +362,38 @@ public abstract class HadoopDaemonRemoteCluster
 
     @Override
     public void kill() throws IOException {
-      buildCommandExecutor(STOP_COMMAND).execute();
+      kill(hadoopConfDir);
     }
 
     @Override
     public void start() throws IOException {
-      buildCommandExecutor(START_COMMAND).execute();
+      start(hadoopConfDir);
+    }
+
+    public void start(String newConfLocation) throws IOException {
+      ShellCommandExecutor cme = buildCommandExecutor(START_COMMAND,
+          newConfLocation);
+      cme.execute();
+      String output = cme.getOutput();
+      if (!output.isEmpty()) { //getOutput() never returns null value
+        if (output.toLowerCase().contains("error")) {
+          LOG.warn("Error is detected.");
+          throw new IOException("Start error\n" + output);
+        }
+      }
+    }
+
+    public void kill(String newConfLocation) throws IOException {
+      ShellCommandExecutor cme
+        = buildCommandExecutor(STOP_COMMAND, newConfLocation);
+      cme.execute();
+      String output = cme.getOutput();
+      if (!output.isEmpty()) { //getOutput() never returns null value
+        if (output.toLowerCase().contains("error")) {
+          LOG.info("Error is detected.");
+          throw new IOException("Kill error\n" + output);
+        }
+      }
     }
 
     @Override

+ 96 - 0
src/test/system/java/org/apache/hadoop/test/system/process/MultiUserHadoopDaemonRemoteCluster.java

@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to you under the Apache License, Version
+ * 2.0 (the "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+package org.apache.hadoop.test.system.process;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster.HadoopDaemonInfo;
+
+public abstract class MultiUserHadoopDaemonRemoteCluster
+    extends HadoopDaemonRemoteCluster {
+
+  public MultiUserHadoopDaemonRemoteCluster(List<HadoopDaemonInfo> daemonInfos) {
+    super(daemonInfos);
+  }
+
+  @Override
+  protected RemoteProcess getProcessManager(
+      HadoopDaemonInfo info, String hostName) {
+    return new MultiUserScriptDaemon(info.cmd, hostName, info.role);
+  }
+
+  @Override
+  public boolean isMultiUserSupported() throws IOException {
+    return true;
+  }
+
+  class MultiUserScriptDaemon extends ScriptDaemon {
+
+    private static final String MULTI_USER_BINARY_PATH_KEY =
+        "test.system.hdrc.multi-user.binary.path";
+    private static final String MULTI_USER_MANAGING_USER =
+        "test.system.hdrc.multi-user.managinguser.";
+    private String binaryPath;
+    /**
+     * Manging user for a particular daemon is gotten by
+     * MULTI_USER_MANAGING_USER + daemonname
+     */
+    private String mangingUser;
+
+    public MultiUserScriptDaemon(
+        String daemonName, String hostName, Enum<?> role) {
+      super(daemonName, hostName, role);
+      initialize(daemonName);
+    }
+
+    private void initialize(String daemonName) {
+      binaryPath = conf.get(MULTI_USER_BINARY_PATH_KEY);
+      if (binaryPath == null || binaryPath.trim().isEmpty()) {
+        throw new IllegalArgumentException(
+            "Binary path for multi-user path is not present. Please set "
+                + MULTI_USER_BINARY_PATH_KEY + " correctly");
+      }
+      File binaryFile = new File(binaryPath);
+      if (!binaryFile.exists() || !binaryFile.canExecute()) {
+        throw new IllegalArgumentException(
+            "Binary file path is not configured correctly. Please set "
+                + MULTI_USER_BINARY_PATH_KEY
+                + " to properly configured binary file.");
+      }
+      mangingUser = conf.get(MULTI_USER_MANAGING_USER + daemonName);
+      if (mangingUser == null || mangingUser.trim().isEmpty()) {
+        throw new IllegalArgumentException(
+            "Manging user for daemon not present please set : "
+                + MULTI_USER_MANAGING_USER + daemonName + " to correct value.");
+      }
+    }
+
+    @Override
+    protected String[] getCommand(String command,String confDir) {
+      ArrayList<String> commandList = new ArrayList<String>();
+      commandList.add(binaryPath);
+      commandList.add(mangingUser);
+      commandList.add(hostName);
+      commandList.add("--config "
+          + confDir + " " + command + " " + daemonName);
+      return (String[]) commandList.toArray(new String[commandList.size()]);
+    }
+  }
+}

+ 23 - 3
src/test/system/java/org/apache/hadoop/test/system/process/RemoteProcess.java

@@ -19,6 +19,7 @@
 package org.apache.hadoop.test.system.process;
 
 import java.io.IOException;
+import org.apache.hadoop.conf.Configuration;
 
 /**
  * Interface to manage the remote process.
@@ -37,18 +38,37 @@ public interface RemoteProcess {
    * @throws IOException if startup fails.
    */
   void start() throws IOException;
-
+  /**
+   * Starts a daemon from user specified conf dir. 
+   * @param newConfLocation is dir where new conf resides. 
+   * @throws IOException
+   */
+  void start(String newConfLocation) throws IOException;
   /**
    * Stop a given daemon process.<br/>
    * 
    * @throws IOException if shutdown fails.
    */
   void kill() throws IOException;
-
+  
+  /**
+   * Stops a given daemon running from user specified 
+   * conf dir. </br>
+   * @throws IOException
+   * @param newconfLocation dir location where new conf resides. 
+   */
+   void kill(String newConfLocation) throws IOException;
   /**
    * Get the role of the Daemon in the cluster.
    * 
    * @return Enum
    */
   Enum<?> getRole();
-}
+  
+  /**
+   * Pushed the configuration to new configuration directory 
+   * @param localDir
+   * @throws IOException
+   */
+  void pushConfig(String localDir) throws IOException;
+}

+ 48 - 0
src/test/system/scripts/pushConfig.sh

@@ -0,0 +1,48 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# local folder with new configuration file
+LOCAL_DIR=$1
+# remote daemon host
+HOST=$2
+#remote dir points to the location of new config files
+REMOTE_DIR=$3
+# remote daemon HADOOP_CONF_DIR location
+DAEMON_HADOOP_CONF_DIR=$4
+
+if [ $# -ne 4 ]; then
+  echo "Wrong number of parameters" >&2
+  exit 2
+fi
+
+ret_value=0
+
+echo The script makes a remote copy of existing ${DAEMON_HADOOP_CONF_DIR} to ${REMOTE_DIR}
+echo and populates it with new configs prepared in $LOCAL_DIR
+
+ssh ${HOST} cp -r ${DAEMON_HADOOP_CONF_DIR}/* ${REMOTE_DIR}
+ret_value=$?
+
+# make sure files are writeble
+ssh ${HOST} chmod u+w ${REMOTE_DIR}/*
+
+# copy new files over
+scp -r ${LOCAL_DIR}/* ${HOST}:${REMOTE_DIR}
+
+err_code=`echo $? + $ret_value | bc`
+echo Copying of files from local to remote returned ${err_code}
+

+ 119 - 0
src/test/testjar/JobKillCommitter.java

@@ -0,0 +1,119 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package testjar;
+
+import java.io.IOException;
+import java.util.Iterator;
+
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.mapred.FileOutputCommitter;
+import org.apache.hadoop.mapred.JobContext;
+import org.apache.hadoop.mapreduce.Mapper;
+import org.apache.hadoop.mapreduce.Reducer;
+
+public class JobKillCommitter {
+  /**
+   * The class provides a overrided implementation of output committer
+   * set up method, which causes the job to fail during set up.
+   */
+  public static class CommitterWithFailSetup extends FileOutputCommitter {
+    @Override
+    public void setupJob(JobContext context) throws IOException {
+      throw new IOException();
+    }
+  }
+
+  /**
+   * The class provides a dummy implementation of outputcommitter
+   * which does nothing
+   */
+  public static class CommitterWithNoError extends FileOutputCommitter {
+    @Override
+    public void setupJob(JobContext context) throws IOException {
+    }
+
+    @Override
+    public void commitJob(JobContext context) throws IOException {
+    }
+  }
+
+  /**
+   * The class provides a overrided implementation of commitJob which
+   * causes the clean up method to fail.
+   */
+  public static class CommitterWithFailCleanup extends FileOutputCommitter {
+    @Override
+    public void commitJob(JobContext context) throws IOException {
+      throw new IOException();
+    }
+  }
+
+  /**
+   * The class is used provides a dummy implementation for mapper method which
+   * does nothing.
+   */
+  public static class MapperPass extends Mapper<LongWritable, Text, Text, Text> {
+    public void map(LongWritable key, Text value, Context context)
+        throws IOException, InterruptedException {
+    }
+  }
+  /**
+  * The class provides a sleep implementation for mapper method.
+  */
+ public static class MapperPassSleep extends 
+     Mapper<LongWritable, Text, Text, Text> {
+   public void map(LongWritable key, Text value, Context context)
+       throws IOException, InterruptedException {
+     Thread.sleep(10000);
+   }
+ }
+
+  /**
+   * The class  provides a way for the mapper function to fail by
+   * intentionally throwing an IOException
+   */
+  public static class MapperFail extends Mapper<LongWritable, Text, Text, Text> {
+    public void map(LongWritable key, Text value, Context context)
+        throws IOException, InterruptedException {
+      throw new IOException();
+    }
+  }
+
+  /**
+   * The class provides a way for the reduce function to fail by
+   * intentionally throwing an IOException
+   */
+  public static class ReducerFail extends Reducer<Text, Text, Text, Text> {
+    public void reduce(Text key, Iterator<Text> values, Context context)
+        throws IOException, InterruptedException {
+      throw new IOException();
+    }
+  }
+
+  /**
+   * The class provides a empty implementation of reducer method that
+   * does nothing
+   */
+  public static class ReducerPass extends Reducer<Text, Text, Text, Text> {
+    public void reduce(Text key, Iterator<Text> values, Context context)
+        throws IOException, InterruptedException {
+    }
+  }
+}