ソースを参照

svn merge -c 1345794 FIXES: HADOOP-8450. Remove src/test/system. (eli)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1356896 13f79535-47bb-0310-9956-ffa450edef68
Robert Joseph Evans 13 年 前
コミット
7a0b44ecdd
100 ファイル変更2 行追加15466 行削除
  1. 2 0
      hadoop-common-project/hadoop-common/CHANGES.txt
  2. 0 400
      hadoop-common-project/hadoop-common/src/test/system/aop/org/apache/hadoop/test/system/DaemonProtocolAspect.aj
  3. 0 41
      hadoop-common-project/hadoop-common/src/test/system/c++/runAs/Makefile.in
  4. 0 5117
      hadoop-common-project/hadoop-common/src/test/system/c++/runAs/configure
  5. 0 65
      hadoop-common-project/hadoop-common/src/test/system/c++/runAs/configure.ac
  6. 0 59
      hadoop-common-project/hadoop-common/src/test/system/c++/runAs/main.c
  7. 0 111
      hadoop-common-project/hadoop-common/src/test/system/c++/runAs/runAs.c
  8. 0 59
      hadoop-common-project/hadoop-common/src/test/system/c++/runAs/runAs.h.in
  9. 0 68
      hadoop-common-project/hadoop-common/src/test/system/conf/hadoop-policy-system-test.xml
  10. 0 599
      hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonClient.java
  11. 0 537
      hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonCluster.java
  12. 0 86
      hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ControlAction.java
  13. 0 204
      hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/DaemonProtocol.java
  14. 0 77
      hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProcessInfo.java
  15. 0 159
      hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProcessInfoImpl.java
  16. 0 90
      hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProxyUserDefinitions.java
  17. 0 99
      hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/ClusterProcessManager.java
  18. 0 404
      hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/HadoopDaemonRemoteCluster.java
  19. 0 96
      hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/MultiUserHadoopDaemonRemoteCluster.java
  20. 0 74
      hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/RemoteProcess.java
  21. 0 27
      hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/util/RemoteExecution.java
  22. 0 203
      hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/util/SSHRemoteExecution.java
  23. 0 48
      hadoop-common-project/hadoop-common/src/test/system/scripts/pushConfig.sh
  24. 0 50
      hadoop-common-project/hadoop-common/src/test/system/validation/org/apache/hadoop/util/TestSSHRemoteExecution.java
  25. 0 63
      hadoop-hdfs-project/hadoop-hdfs/src/test/system/aop/org/apache/hadoop/hdfs/HDFSPolicyProviderAspect.aj
  26. 0 70
      hadoop-hdfs-project/hadoop-hdfs/src/test/system/aop/org/apache/hadoop/hdfs/server/datanode/DataNodeAspect.aj
  27. 0 77
      hadoop-hdfs-project/hadoop-hdfs/src/test/system/aop/org/apache/hadoop/hdfs/server/namenode/NameNodeAspect.aj
  28. 0 147
      hadoop-hdfs-project/hadoop-hdfs/src/test/system/conf/system-test-hdfs.xml
  29. 0 99
      hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/DNClient.java
  30. 0 36
      hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/DNProtocol.java
  31. 0 149
      hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSCluster.java
  32. 0 46
      hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSDaemonClient.java
  33. 0 88
      hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/NNClient.java
  34. 0 36
      hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/NNProtocol.java
  35. 0 86
      hadoop-hdfs-project/hadoop-hdfs/src/test/system/test/org/apache/hadoop/hdfs/TestHL040.java
  36. 0 231
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/GridmixSystemTestCase.java
  37. 0 108
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsAndReducesWithCustomInterval.java
  38. 0 105
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsAndReducesWithDefaultInterval.java
  39. 0 105
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsWithCustomInterval.java
  40. 0 103
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsWithDefaultInterval.java
  41. 0 96
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationEnableForAllTypesOfJobs.java
  42. 0 98
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationForCompressInAndUncompressOut.java
  43. 0 93
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationForUncompressInAndCompressOut.java
  44. 0 65
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestDisableGridmixEmulationOfHighRam.java
  45. 0 95
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSAndLocalFSDCFiles.java
  46. 0 91
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSDCFileUsesMultipleJobs.java
  47. 0 92
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSDCFilesWithDifferentVisibilities.java
  48. 0 64
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHighRamAndNormalMRJobs.java
  49. 0 93
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfLocalFSDCFiles.java
  50. 0 229
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridMixDataGeneration.java
  51. 0 128
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridMixFilePool.java
  52. 0 173
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixCompressedInputGeneration.java
  53. 0 102
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixCompressionEmulationWithCompressInput.java
  54. 0 89
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHDFSPrivateDCFile.java
  55. 0 91
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHDFSPublicDCFile.java
  56. 0 64
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase1.java
  57. 0 67
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase2.java
  58. 0 64
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase3.java
  59. 0 91
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfMultipleHDFSPrivateDCFiles.java
  60. 0 92
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfMultipleHDFSPublicDCFiles.java
  61. 0 67
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith10minTrace.java
  62. 0 62
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith12minTrace.java
  63. 0 59
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith1minTrace.java
  64. 0 64
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith2minStreamingJobTrace.java
  65. 0 68
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith3minStreamingJobTrace.java
  66. 0 62
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith3minTrace.java
  67. 0 65
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith5minStreamingJobTrace.java
  68. 0 62
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith5minTrace.java
  69. 0 62
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith7minTrace.java
  70. 0 106
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsAndReducesWithCustomIntrvl.java
  71. 0 106
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsAndReducesWithDefaultIntrvl.java
  72. 0 108
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsWithCustomHeapMemoryRatio.java
  73. 0 106
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsWithCustomIntrvl.java
  74. 0 104
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsWithDefaultIntrvl.java
  75. 0 285
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixConfig.java
  76. 0 34
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixRunMode.java
  77. 0 86
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobStory.java
  78. 0 82
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobSubmission.java
  79. 0 1166
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobVerification.java
  80. 0 513
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/UtilsForGridmix.java
  81. BIN
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/2m_stream_trace.json.gz
  82. BIN
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/3m_stream_trace.json.gz
  83. BIN
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/5m_stream_trace.json.gz
  84. BIN
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case1_trace.json.gz
  85. BIN
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case2_trace.json.gz
  86. BIN
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case3_trace.json.gz
  87. BIN
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case4_trace.json.gz
  88. BIN
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/cpu_emul_case1.json.gz
  89. BIN
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/cpu_emul_case2.json.gz
  90. BIN
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case1_trace.json.gz
  91. BIN
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case2_trace.json.gz
  92. BIN
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case3_trace.json.gz
  93. BIN
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case4_trace.json.gz
  94. BIN
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case5_trace.json.gz
  95. BIN
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case6_trace.json.gz
  96. BIN
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case7_trace.json.gz
  97. BIN
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case8_trace.json.gz
  98. BIN
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case9_trace.json.gz
  99. BIN
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case1.json.gz
  100. BIN
      hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case2.json.gz

+ 2 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -18,6 +18,8 @@ Release 0.23.3 - UNRELEASED
 
     HADOOP-8535. Cut hadoop build times in half (Job Eagles via bobby)
 
+    HADOOP-8450. Remove src/test/system. (eli)
+
   OPTIMIZATIONS
 
   BUG FIXES

+ 0 - 400
hadoop-common-project/hadoop-common/src/test/system/aop/org/apache/hadoop/test/system/DaemonProtocolAspect.aj

@@ -1,400 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system;
-
-import java.io.File;
-import java.io.IOException;
-import java.security.PrivilegedExceptionAction;
-import java.util.HashMap;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Map;
-import java.util.Properties;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.Shell.ShellCommandExecutor;
-import org.apache.hadoop.util.Shell;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.conf.Configuration;
-
-/**
- * Default DaemonProtocolAspect which is used to provide default implementation
- * for all the common daemon methods. If a daemon requires more specialized
- * version of method, it is responsibility of the DaemonClient to introduce the
- * same in woven classes.
- * 
- */
-public aspect DaemonProtocolAspect {
-
-  private boolean DaemonProtocol.ready;
-  
-  @SuppressWarnings("unchecked")
-  private HashMap<Object, List<ControlAction>> DaemonProtocol.actions = 
-    new HashMap<Object, List<ControlAction>>();
-  private static final Log LOG = LogFactory.getLog(
-      DaemonProtocolAspect.class.getName());
-
-  private static FsPermission defaultPermission = new FsPermission(
-     FsAction.READ_WRITE, FsAction.READ_WRITE, FsAction.READ_WRITE);
-
-  /**
-   * Set if the daemon process is ready or not, concrete daemon protocol should
-   * implement pointcuts to determine when the daemon is ready and use the
-   * setter to set the ready state.
-   * 
-   * @param ready
-   *          true if the Daemon is ready.
-   */
-  public void DaemonProtocol.setReady(boolean ready) {
-    this.ready = ready;
-  }
-
-  /**
-   * Checks if the daemon process is alive or not.
-   * 
-   * @throws IOException
-   *           if daemon is not alive.
-   */
-  public void DaemonProtocol.ping() throws IOException {
-  }
-
-  /**
-   * Checks if the daemon process is ready to accepting RPC connections after it
-   * finishes initialization. <br/>
-   * 
-   * @return true if ready to accept connection.
-   * 
-   * @throws IOException
-   */
-  public boolean DaemonProtocol.isReady() throws IOException {
-    return ready;
-  }
-
-  /**
-   * Returns the process related information regarding the daemon process. <br/>
-   * 
-   * @return process information.
-   * @throws IOException
-   */
-  public ProcessInfo DaemonProtocol.getProcessInfo() throws IOException {
-    int activeThreadCount = Thread.activeCount();
-    long currentTime = System.currentTimeMillis();
-    long maxmem = Runtime.getRuntime().maxMemory();
-    long freemem = Runtime.getRuntime().freeMemory();
-    long totalmem = Runtime.getRuntime().totalMemory();
-    Map<String, String> envMap = System.getenv();
-    Properties sysProps = System.getProperties();
-    Map<String, String> props = new HashMap<String, String>();
-    for (Map.Entry entry : sysProps.entrySet()) {
-      props.put((String) entry.getKey(), (String) entry.getValue());
-    }
-    ProcessInfo info = new ProcessInfoImpl(activeThreadCount, currentTime,
-        freemem, maxmem, totalmem, envMap, props);
-    return info;
-  }
-
-  public void DaemonProtocol.enable(List<Enum<?>> faults) throws IOException {
-  }
-
-  public void DaemonProtocol.disableAll() throws IOException {
-  }
-
-  public abstract Configuration DaemonProtocol.getDaemonConf()
-    throws IOException;
-
-  public FileStatus DaemonProtocol.getFileStatus(String path, boolean local) 
-    throws IOException {
-    Path p = new Path(path);
-    FileSystem fs = getFS(p, local);
-    p.makeQualified(fs);
-    FileStatus fileStatus = fs.getFileStatus(p);
-    return cloneFileStatus(fileStatus);
-  }
-  
-  /**
-   * Create a file with given permissions in a file system.
-   * @param path - source path where the file has to create.
-   * @param fileName - file name.
-   * @param permission - file permissions.
-   * @param local - identifying the path whether its local or not.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public void DaemonProtocol.createFile(String path, String fileName, 
-     FsPermission permission, boolean local) throws IOException {
-    Path p = new Path(path); 
-    FileSystem fs = getFS(p, local);
-    Path filePath = new Path(path, fileName);
-    fs.create(filePath);
-    if (permission == null) {
-      fs.setPermission(filePath, defaultPermission);
-    } else {
-      fs.setPermission(filePath, permission);
-    }
-    fs.close();
-  }
-
-  /**
-   * Create a folder with given permissions in a file system.
-   * @param path - source path where the file has to be creating.
-   * @param folderName - folder name.
-   * @param permission - folder permissions.
-   * @param local - identifying the path whether its local or not.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public void DaemonProtocol.createFolder(String path, String folderName, 
-     FsPermission permission, boolean local) throws IOException {
-    Path p = new Path(path);
-    FileSystem fs = getFS(p, local);
-    Path folderPath = new Path(path, folderName);
-    fs.mkdirs(folderPath);
-    if (permission == null) {
-      fs.setPermission(folderPath, defaultPermission);
-    } else {
-      fs.setPermission(folderPath, permission);
-    }
-    fs.close();
-  }
-
-  public FileStatus[] DaemonProtocol.listStatus(String path, boolean local) 
-    throws IOException {
-    Path p = new Path(path);
-    FileSystem fs = getFS(p, local);
-    FileStatus[] status = fs.listStatus(p);
-    if (status != null) {
-      FileStatus[] result = new FileStatus[status.length];
-      int i = 0;
-      for (FileStatus fileStatus : status) {
-        result[i++] = cloneFileStatus(fileStatus);
-      }
-      return result;
-    }
-    return status;
-  }
-
-  /**
-   * FileStatus object may not be serializable. Clone it into raw FileStatus 
-   * object.
-   */
-  private FileStatus DaemonProtocol.cloneFileStatus(FileStatus fileStatus) {
-    return new FileStatus(fileStatus.getLen(),
-        fileStatus.isDir(),
-        fileStatus.getReplication(),
-        fileStatus.getBlockSize(),
-        fileStatus.getModificationTime(),
-        fileStatus.getAccessTime(),
-        fileStatus.getPermission(),
-        fileStatus.getOwner(),
-        fileStatus.getGroup(),
-        fileStatus.getPath());
-  }
-
-  private FileSystem DaemonProtocol.getFS(final Path path, final boolean local)
-      throws IOException {
-    FileSystem ret = null;
-    try {
-      ret = UserGroupInformation.getLoginUser().doAs (
-          new PrivilegedExceptionAction<FileSystem>() {
-            public FileSystem run() throws IOException {
-              FileSystem fs = null;
-              if (local) {
-                fs = FileSystem.getLocal(getDaemonConf());
-              } else {
-                fs = path.getFileSystem(getDaemonConf());
-              }
-              return fs;
-            }
-          });
-    } catch (InterruptedException ie) {
-    }
-    return ret;
-  }
-  
-  @SuppressWarnings("unchecked")
-  public ControlAction[] DaemonProtocol.getActions(Writable key) 
-    throws IOException {
-    synchronized (actions) {
-      List<ControlAction> actionList = actions.get(key);
-      if(actionList == null) {
-        return new ControlAction[0];
-      } else {
-        return (ControlAction[]) actionList.toArray(new ControlAction[actionList
-                                                                      .size()]);
-      }
-    }
-  }
-
-
-  @SuppressWarnings("unchecked")
-  public void DaemonProtocol.sendAction(ControlAction action) 
-      throws IOException {
-    synchronized (actions) {
-      List<ControlAction> actionList = actions.get(action.getTarget());
-      if(actionList == null) {
-        actionList = new ArrayList<ControlAction>();
-        actions.put(action.getTarget(), actionList);
-      }
-      actionList.add(action);
-    } 
-  }
- 
-  @SuppressWarnings("unchecked")
-  public boolean DaemonProtocol.isActionPending(ControlAction action) 
-    throws IOException{
-    synchronized (actions) {
-      List<ControlAction> actionList = actions.get(action.getTarget());
-      if(actionList == null) {
-        return false;
-      } else {
-        return actionList.contains(action);
-      }
-    }
-  }
-  
-  
-  @SuppressWarnings("unchecked")
-  public void DaemonProtocol.removeAction(ControlAction action) 
-    throws IOException {
-    synchronized (actions) {
-      List<ControlAction> actionList = actions.get(action.getTarget());
-      if(actionList == null) {
-        return;
-      } else {
-        actionList.remove(action);
-      }
-    }
-  }
-  
-  public void DaemonProtocol.clearActions() throws IOException {
-    synchronized (actions) {
-      actions.clear();
-    }
-  }
-
-  public String DaemonProtocol.getFilePattern() {
-    //We use the environment variable HADOOP_LOGFILE to get the
-    //pattern to use in the search.
-    String logDir = System.getProperty("hadoop.log.dir");
-    String daemonLogPattern = System.getProperty("hadoop.log.file");
-    if(daemonLogPattern == null && daemonLogPattern.isEmpty()) {
-      return "*";
-    }
-    return  logDir+File.separator+daemonLogPattern+"*";
-  }
-
-  public int DaemonProtocol.getNumberOfMatchesInLogFile(String pattern,
-      String[] list) throws IOException {
-    StringBuffer filePattern = new StringBuffer(getFilePattern());    
-    String[] cmd = null;
-    if (list != null) {
-      StringBuffer filterExpPattern = new StringBuffer();
-      int index=0;
-      for (String excludeExp : list) {
-        if (index++ < list.length -1) {
-           filterExpPattern.append("grep -v " + excludeExp + " | ");
-        } else {
-           filterExpPattern.append("grep -v " + excludeExp + " | wc -l");
-        }
-      }
-      cmd = new String[] {
-                "bash",
-                "-c",
-                "grep "
-                + pattern + " " + filePattern + " | "
-                + filterExpPattern};
-    } else {
-      cmd = new String[] {
-                "bash",
-                "-c",
-                "grep -c "
-                + pattern + " " + filePattern
-                + " | awk -F: '{s+=$2} END {print s}'" };    
-    }
-    ShellCommandExecutor shexec = new ShellCommandExecutor(cmd);
-    shexec.execute();
-    String output = shexec.getOutput();
-    return Integer.parseInt(output.replaceAll("\n", "").trim());
-  }
-
-  /**
-   * This method is used for suspending the process.
-   * @param pid process id
-   * @throws IOException if an I/O error occurs.
-   * @return true if process is suspended otherwise false.
-   */
-  public boolean DaemonProtocol.suspendProcess(String pid) throws IOException {
-    String suspendCmd = getDaemonConf().get("test.system.hdrc.suspend.cmd",
-        "kill -SIGSTOP");
-    String [] command = {"bash", "-c", suspendCmd + " " + pid};
-    ShellCommandExecutor shexec = new ShellCommandExecutor(command);
-    try {
-      shexec.execute();
-    } catch (Shell.ExitCodeException e) {
-      LOG.warn("suspended process throws an exitcode "
-          + "exception for not being suspended the given process id.");
-      return false;
-    }
-    LOG.info("The suspend process command is :"
-        + shexec.toString()
-        + " and the output for the command is "
-        + shexec.getOutput());
-    return true;
-  }
-
-  /**
-   * This method is used for resuming the process
-   * @param pid process id of suspended process.
-   * @throws IOException if an I/O error occurs.
-   * @return true if suspeneded process is resumed otherwise false.
-   */
-  public boolean DaemonProtocol.resumeProcess(String pid) throws IOException {
-    String resumeCmd = getDaemonConf().get("test.system.hdrc.resume.cmd",
-        "kill -SIGCONT");
-    String [] command = {"bash", "-c", resumeCmd + " " + pid};
-    ShellCommandExecutor shexec = new ShellCommandExecutor(command);
-    try {
-      shexec.execute();
-    } catch(Shell.ExitCodeException e) {
-        LOG.warn("Resume process throws an exitcode "
-          + "exception for not being resumed the given process id.");
-      return false;
-    }
-    LOG.info("The resume process command is :"
-        + shexec.toString()
-        + " and the output for the command is "
-        + shexec.getOutput());
-    return true;
-  }
-
-  private String DaemonProtocol.user = null;
-  
-  public String DaemonProtocol.getDaemonUser() {
-    return user;
-  }
-  
-  public void DaemonProtocol.setUser(String user) {
-    this.user = user;
-  }
-}
-

+ 0 - 41
hadoop-common-project/hadoop-common/src/test/system/c++/runAs/Makefile.in

@@ -1,41 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-OBJS=main.o runAs.o
-CC=@CC@
-CFLAGS = @CFLAGS@
-BINARY=runAs
-installdir = @prefix@
-
-all: $(OBJS)
-	$(CC) $(CFLAG) -o $(BINARY) $(OBJS)
-
-main.o: runAs.o main.c
-	$(CC) $(CFLAG) -o main.o -c main.c
-
-runAs.o: runAs.h runAs.c
-	$(CC) $(CFLAG) -o runAs.o -c runAs.c
-
-clean:
-	rm -rf $(BINARY) $(OBJS) $(TESTOBJS)
-
-install: all
-	cp $(BINARY) $(installdir)
-
-uninstall:
-	rm -rf $(installdir)/$(BINARY)
-	rm -rf $(BINARY)

+ 0 - 5117
hadoop-common-project/hadoop-common/src/test/system/c++/runAs/configure

@@ -1,5117 +0,0 @@
-#! /bin/sh
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.65 for runAs 0.1.
-#
-# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001,
-# 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
-# Inc.
-#
-# This configure script is free software; the Free Software Foundation
-# gives unlimited permission to copy, distribute and modify it.
-## -------------------- ##
-## M4sh Initialization. ##
-## -------------------- ##
-
-# Be more Bourne compatible
-DUALCASE=1; export DUALCASE # for MKS sh
-if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
-  emulate sh
-  NULLCMD=:
-  # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
-  # is contrary to our usage.  Disable this feature.
-  alias -g '${1+"$@"}'='"$@"'
-  setopt NO_GLOB_SUBST
-else
-  case `(set -o) 2>/dev/null` in #(
-  *posix*) :
-    set -o posix ;; #(
-  *) :
-     ;;
-esac
-fi
-
-
-as_nl='
-'
-export as_nl
-# Printing a long string crashes Solaris 7 /usr/bin/printf.
-as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
-as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
-as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
-# Prefer a ksh shell builtin over an external printf program on Solaris,
-# but without wasting forks for bash or zsh.
-if test -z "$BASH_VERSION$ZSH_VERSION" \
-    && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
-  as_echo='print -r --'
-  as_echo_n='print -rn --'
-elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
-  as_echo='printf %s\n'
-  as_echo_n='printf %s'
-else
-  if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
-    as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
-    as_echo_n='/usr/ucb/echo -n'
-  else
-    as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
-    as_echo_n_body='eval
-      arg=$1;
-      case $arg in #(
-      *"$as_nl"*)
-	expr "X$arg" : "X\\(.*\\)$as_nl";
-	arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
-      esac;
-      expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
-    '
-    export as_echo_n_body
-    as_echo_n='sh -c $as_echo_n_body as_echo'
-  fi
-  export as_echo_body
-  as_echo='sh -c $as_echo_body as_echo'
-fi
-
-# The user is always right.
-if test "${PATH_SEPARATOR+set}" != set; then
-  PATH_SEPARATOR=:
-  (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
-    (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
-      PATH_SEPARATOR=';'
-  }
-fi
-
-
-# IFS
-# We need space, tab and new line, in precisely that order.  Quoting is
-# there to prevent editors from complaining about space-tab.
-# (If _AS_PATH_WALK were called with IFS unset, it would disable word
-# splitting by setting IFS to empty value.)
-IFS=" ""	$as_nl"
-
-# Find who we are.  Look in the path if we contain no directory separator.
-case $0 in #((
-  *[\\/]* ) as_myself=$0 ;;
-  *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
-  done
-IFS=$as_save_IFS
-
-     ;;
-esac
-# We did not find ourselves, most probably we were run as `sh COMMAND'
-# in which case we are not to be found in the path.
-if test "x$as_myself" = x; then
-  as_myself=$0
-fi
-if test ! -f "$as_myself"; then
-  $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
-  exit 1
-fi
-
-# Unset variables that we do not need and which cause bugs (e.g. in
-# pre-3.0 UWIN ksh).  But do not cause bugs in bash 2.01; the "|| exit 1"
-# suppresses any "Segmentation fault" message there.  '((' could
-# trigger a bug in pdksh 5.2.14.
-for as_var in BASH_ENV ENV MAIL MAILPATH
-do eval test x\${$as_var+set} = xset \
-  && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
-done
-PS1='$ '
-PS2='> '
-PS4='+ '
-
-# NLS nuisances.
-LC_ALL=C
-export LC_ALL
-LANGUAGE=C
-export LANGUAGE
-
-# CDPATH.
-(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
-
-if test "x$CONFIG_SHELL" = x; then
-  as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then :
-  emulate sh
-  NULLCMD=:
-  # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which
-  # is contrary to our usage.  Disable this feature.
-  alias -g '\${1+\"\$@\"}'='\"\$@\"'
-  setopt NO_GLOB_SUBST
-else
-  case \`(set -o) 2>/dev/null\` in #(
-  *posix*) :
-    set -o posix ;; #(
-  *) :
-     ;;
-esac
-fi
-"
-  as_required="as_fn_return () { (exit \$1); }
-as_fn_success () { as_fn_return 0; }
-as_fn_failure () { as_fn_return 1; }
-as_fn_ret_success () { return 0; }
-as_fn_ret_failure () { return 1; }
-
-exitcode=0
-as_fn_success || { exitcode=1; echo as_fn_success failed.; }
-as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; }
-as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; }
-as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; }
-if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then :
-
-else
-  exitcode=1; echo positional parameters were not saved.
-fi
-test x\$exitcode = x0 || exit 1"
-  as_suggested="  as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO
-  as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO
-  eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" &&
-  test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1
-test \$(( 1 + 1 )) = 2 || exit 1"
-  if (eval "$as_required") 2>/dev/null; then :
-  as_have_required=yes
-else
-  as_have_required=no
-fi
-  if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then :
-
-else
-  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-as_found=false
-for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-  as_found=:
-  case $as_dir in #(
-	 /*)
-	   for as_base in sh bash ksh sh5; do
-	     # Try only shells that exist, to save several forks.
-	     as_shell=$as_dir/$as_base
-	     if { test -f "$as_shell" || test -f "$as_shell.exe"; } &&
-		    { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then :
-  CONFIG_SHELL=$as_shell as_have_required=yes
-		   if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then :
-  break 2
-fi
-fi
-	   done;;
-       esac
-  as_found=false
-done
-$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } &&
-	      { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then :
-  CONFIG_SHELL=$SHELL as_have_required=yes
-fi; }
-IFS=$as_save_IFS
-
-
-      if test "x$CONFIG_SHELL" != x; then :
-  # We cannot yet assume a decent shell, so we have to provide a
-	# neutralization value for shells without unset; and this also
-	# works around shells that cannot unset nonexistent variables.
-	BASH_ENV=/dev/null
-	ENV=/dev/null
-	(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
-	export CONFIG_SHELL
-	exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"}
-fi
-
-    if test x$as_have_required = xno; then :
-  $as_echo "$0: This script requires a shell more modern than all"
-  $as_echo "$0: the shells that I found on your system."
-  if test x${ZSH_VERSION+set} = xset ; then
-    $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should"
-    $as_echo "$0: be upgraded to zsh 4.3.4 or later."
-  else
-    $as_echo "$0: Please tell bug-autoconf@gnu.org about your system,
-$0: including any error possibly output before this
-$0: message. Then install a modern shell, or manually run
-$0: the script under such a shell if you do have one."
-  fi
-  exit 1
-fi
-fi
-fi
-SHELL=${CONFIG_SHELL-/bin/sh}
-export SHELL
-# Unset more variables known to interfere with behavior of common tools.
-CLICOLOR_FORCE= GREP_OPTIONS=
-unset CLICOLOR_FORCE GREP_OPTIONS
-
-## --------------------- ##
-## M4sh Shell Functions. ##
-## --------------------- ##
-# as_fn_unset VAR
-# ---------------
-# Portably unset VAR.
-as_fn_unset ()
-{
-  { eval $1=; unset $1;}
-}
-as_unset=as_fn_unset
-
-# as_fn_set_status STATUS
-# -----------------------
-# Set $? to STATUS, without forking.
-as_fn_set_status ()
-{
-  return $1
-} # as_fn_set_status
-
-# as_fn_exit STATUS
-# -----------------
-# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
-as_fn_exit ()
-{
-  set +e
-  as_fn_set_status $1
-  exit $1
-} # as_fn_exit
-
-# as_fn_mkdir_p
-# -------------
-# Create "$as_dir" as a directory, including parents if necessary.
-as_fn_mkdir_p ()
-{
-
-  case $as_dir in #(
-  -*) as_dir=./$as_dir;;
-  esac
-  test -d "$as_dir" || eval $as_mkdir_p || {
-    as_dirs=
-    while :; do
-      case $as_dir in #(
-      *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
-      *) as_qdir=$as_dir;;
-      esac
-      as_dirs="'$as_qdir' $as_dirs"
-      as_dir=`$as_dirname -- "$as_dir" ||
-$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
-	 X"$as_dir" : 'X\(//\)[^/]' \| \
-	 X"$as_dir" : 'X\(//\)$' \| \
-	 X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
-$as_echo X"$as_dir" |
-    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)[^/].*/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\).*/{
-	    s//\1/
-	    q
-	  }
-	  s/.*/./; q'`
-      test -d "$as_dir" && break
-    done
-    test -z "$as_dirs" || eval "mkdir $as_dirs"
-  } || test -d "$as_dir" || as_fn_error "cannot create directory $as_dir"
-
-
-} # as_fn_mkdir_p
-# as_fn_append VAR VALUE
-# ----------------------
-# Append the text in VALUE to the end of the definition contained in VAR. Take
-# advantage of any shell optimizations that allow amortized linear growth over
-# repeated appends, instead of the typical quadratic growth present in naive
-# implementations.
-if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
-  eval 'as_fn_append ()
-  {
-    eval $1+=\$2
-  }'
-else
-  as_fn_append ()
-  {
-    eval $1=\$$1\$2
-  }
-fi # as_fn_append
-
-# as_fn_arith ARG...
-# ------------------
-# Perform arithmetic evaluation on the ARGs, and store the result in the
-# global $as_val. Take advantage of shells that can avoid forks. The arguments
-# must be portable across $(()) and expr.
-if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
-  eval 'as_fn_arith ()
-  {
-    as_val=$(( $* ))
-  }'
-else
-  as_fn_arith ()
-  {
-    as_val=`expr "$@" || test $? -eq 1`
-  }
-fi # as_fn_arith
-
-
-# as_fn_error ERROR [LINENO LOG_FD]
-# ---------------------------------
-# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
-# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
-# script with status $?, using 1 if that was 0.
-as_fn_error ()
-{
-  as_status=$?; test $as_status -eq 0 && as_status=1
-  if test "$3"; then
-    as_lineno=${as_lineno-"$2"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-    $as_echo "$as_me:${as_lineno-$LINENO}: error: $1" >&$3
-  fi
-  $as_echo "$as_me: error: $1" >&2
-  as_fn_exit $as_status
-} # as_fn_error
-
-if expr a : '\(a\)' >/dev/null 2>&1 &&
-   test "X`expr 00001 : '.*\(...\)'`" = X001; then
-  as_expr=expr
-else
-  as_expr=false
-fi
-
-if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
-  as_basename=basename
-else
-  as_basename=false
-fi
-
-if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
-  as_dirname=dirname
-else
-  as_dirname=false
-fi
-
-as_me=`$as_basename -- "$0" ||
-$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
-	 X"$0" : 'X\(//\)$' \| \
-	 X"$0" : 'X\(/\)' \| . 2>/dev/null ||
-$as_echo X/"$0" |
-    sed '/^.*\/\([^/][^/]*\)\/*$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\/\(\/\/\)$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\/\(\/\).*/{
-	    s//\1/
-	    q
-	  }
-	  s/.*/./; q'`
-
-# Avoid depending upon Character Ranges.
-as_cr_letters='abcdefghijklmnopqrstuvwxyz'
-as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
-as_cr_Letters=$as_cr_letters$as_cr_LETTERS
-as_cr_digits='0123456789'
-as_cr_alnum=$as_cr_Letters$as_cr_digits
-
-
-  as_lineno_1=$LINENO as_lineno_1a=$LINENO
-  as_lineno_2=$LINENO as_lineno_2a=$LINENO
-  eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" &&
-  test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || {
-  # Blame Lee E. McMahon (1931-1989) for sed's syntax.  :-)
-  sed -n '
-    p
-    /[$]LINENO/=
-  ' <$as_myself |
-    sed '
-      s/[$]LINENO.*/&-/
-      t lineno
-      b
-      :lineno
-      N
-      :loop
-      s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
-      t loop
-      s/-\n.*//
-    ' >$as_me.lineno &&
-  chmod +x "$as_me.lineno" ||
-    { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; }
-
-  # Don't try to exec as it changes $[0], causing all sort of problems
-  # (the dirname of $[0] is not the place where we might find the
-  # original and so on.  Autoconf is especially sensitive to this).
-  . "./$as_me.lineno"
-  # Exit status is that of the last command.
-  exit
-}
-
-ECHO_C= ECHO_N= ECHO_T=
-case `echo -n x` in #(((((
--n*)
-  case `echo 'xy\c'` in
-  *c*) ECHO_T='	';;	# ECHO_T is single tab character.
-  xy)  ECHO_C='\c';;
-  *)   echo `echo ksh88 bug on AIX 6.1` > /dev/null
-       ECHO_T='	';;
-  esac;;
-*)
-  ECHO_N='-n';;
-esac
-
-rm -f conf$$ conf$$.exe conf$$.file
-if test -d conf$$.dir; then
-  rm -f conf$$.dir/conf$$.file
-else
-  rm -f conf$$.dir
-  mkdir conf$$.dir 2>/dev/null
-fi
-if (echo >conf$$.file) 2>/dev/null; then
-  if ln -s conf$$.file conf$$ 2>/dev/null; then
-    as_ln_s='ln -s'
-    # ... but there are two gotchas:
-    # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
-    # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
-    # In both cases, we have to default to `cp -p'.
-    ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
-      as_ln_s='cp -p'
-  elif ln conf$$.file conf$$ 2>/dev/null; then
-    as_ln_s=ln
-  else
-    as_ln_s='cp -p'
-  fi
-else
-  as_ln_s='cp -p'
-fi
-rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
-rmdir conf$$.dir 2>/dev/null
-
-if mkdir -p . 2>/dev/null; then
-  as_mkdir_p='mkdir -p "$as_dir"'
-else
-  test -d ./-p && rmdir ./-p
-  as_mkdir_p=false
-fi
-
-if test -x / >/dev/null 2>&1; then
-  as_test_x='test -x'
-else
-  if ls -dL / >/dev/null 2>&1; then
-    as_ls_L_option=L
-  else
-    as_ls_L_option=
-  fi
-  as_test_x='
-    eval sh -c '\''
-      if test -d "$1"; then
-	test -d "$1/.";
-      else
-	case $1 in #(
-	-*)set "./$1";;
-	esac;
-	case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #((
-	???[sx]*):;;*)false;;esac;fi
-    '\'' sh
-  '
-fi
-as_executable_p=$as_test_x
-
-# Sed expression to map a string onto a valid CPP name.
-as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
-
-# Sed expression to map a string onto a valid variable name.
-as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
-
-
-test -n "$DJDIR" || exec 7<&0 </dev/null
-exec 6>&1
-
-# Name of the host.
-# hostname on some systems (SVR3.2, Linux) returns a bogus exit status,
-# so uname gets run too.
-ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
-
-#
-# Initializations.
-#
-ac_default_prefix=/usr/local
-ac_clean_files=
-ac_config_libobj_dir=.
-LIBOBJS=
-cross_compiling=no
-subdirs=
-MFLAGS=
-MAKEFLAGS=
-
-# Identity of this package.
-PACKAGE_NAME='runAs'
-PACKAGE_TARNAME='runas'
-PACKAGE_VERSION='0.1'
-PACKAGE_STRING='runAs 0.1'
-PACKAGE_BUGREPORT=''
-PACKAGE_URL=''
-
-ac_default_prefix=.
-ac_unique_file="main.c"
-# Factoring default headers for most tests.
-ac_includes_default="\
-#include <stdio.h>
-#ifdef HAVE_SYS_TYPES_H
-# include <sys/types.h>
-#endif
-#ifdef HAVE_SYS_STAT_H
-# include <sys/stat.h>
-#endif
-#ifdef STDC_HEADERS
-# include <stdlib.h>
-# include <stddef.h>
-#else
-# ifdef HAVE_STDLIB_H
-#  include <stdlib.h>
-# endif
-#endif
-#ifdef HAVE_STRING_H
-# if !defined STDC_HEADERS && defined HAVE_MEMORY_H
-#  include <memory.h>
-# endif
-# include <string.h>
-#endif
-#ifdef HAVE_STRINGS_H
-# include <strings.h>
-#endif
-#ifdef HAVE_INTTYPES_H
-# include <inttypes.h>
-#endif
-#ifdef HAVE_STDINT_H
-# include <stdint.h>
-#endif
-#ifdef HAVE_UNISTD_H
-# include <unistd.h>
-#endif"
-
-ac_subst_vars='SET_MAKE
-LTLIBOBJS
-LIBOBJS
-EGREP
-GREP
-CPP
-OBJEXT
-EXEEXT
-ac_ct_CC
-CPPFLAGS
-LDFLAGS
-CFLAGS
-CC
-target_alias
-host_alias
-build_alias
-LIBS
-ECHO_T
-ECHO_N
-ECHO_C
-DEFS
-mandir
-localedir
-libdir
-psdir
-pdfdir
-dvidir
-htmldir
-infodir
-docdir
-oldincludedir
-includedir
-localstatedir
-sharedstatedir
-sysconfdir
-datadir
-datarootdir
-libexecdir
-sbindir
-bindir
-program_transform_name
-prefix
-exec_prefix
-PACKAGE_URL
-PACKAGE_BUGREPORT
-PACKAGE_STRING
-PACKAGE_VERSION
-PACKAGE_TARNAME
-PACKAGE_NAME
-PATH_SEPARATOR
-SHELL'
-ac_subst_files=''
-ac_user_opts='
-enable_option_checking
-with_home
-'
-      ac_precious_vars='build_alias
-host_alias
-target_alias
-CC
-CFLAGS
-LDFLAGS
-LIBS
-CPPFLAGS
-CPP'
-
-
-# Initialize some variables set by options.
-ac_init_help=
-ac_init_version=false
-ac_unrecognized_opts=
-ac_unrecognized_sep=
-# The variables have the same names as the options, with
-# dashes changed to underlines.
-cache_file=/dev/null
-exec_prefix=NONE
-no_create=
-no_recursion=
-prefix=NONE
-program_prefix=NONE
-program_suffix=NONE
-program_transform_name=s,x,x,
-silent=
-site=
-srcdir=
-verbose=
-x_includes=NONE
-x_libraries=NONE
-
-# Installation directory options.
-# These are left unexpanded so users can "make install exec_prefix=/foo"
-# and all the variables that are supposed to be based on exec_prefix
-# by default will actually change.
-# Use braces instead of parens because sh, perl, etc. also accept them.
-# (The list follows the same order as the GNU Coding Standards.)
-bindir='${exec_prefix}/bin'
-sbindir='${exec_prefix}/sbin'
-libexecdir='${exec_prefix}/libexec'
-datarootdir='${prefix}/share'
-datadir='${datarootdir}'
-sysconfdir='${prefix}/etc'
-sharedstatedir='${prefix}/com'
-localstatedir='${prefix}/var'
-includedir='${prefix}/include'
-oldincludedir='/usr/include'
-docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
-infodir='${datarootdir}/info'
-htmldir='${docdir}'
-dvidir='${docdir}'
-pdfdir='${docdir}'
-psdir='${docdir}'
-libdir='${exec_prefix}/lib'
-localedir='${datarootdir}/locale'
-mandir='${datarootdir}/man'
-
-ac_prev=
-ac_dashdash=
-for ac_option
-do
-  # If the previous option needs an argument, assign it.
-  if test -n "$ac_prev"; then
-    eval $ac_prev=\$ac_option
-    ac_prev=
-    continue
-  fi
-
-  case $ac_option in
-  *=*)	ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
-  *)	ac_optarg=yes ;;
-  esac
-
-  # Accept the important Cygnus configure options, so we can diagnose typos.
-
-  case $ac_dashdash$ac_option in
-  --)
-    ac_dashdash=yes ;;
-
-  -bindir | --bindir | --bindi | --bind | --bin | --bi)
-    ac_prev=bindir ;;
-  -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
-    bindir=$ac_optarg ;;
-
-  -build | --build | --buil | --bui | --bu)
-    ac_prev=build_alias ;;
-  -build=* | --build=* | --buil=* | --bui=* | --bu=*)
-    build_alias=$ac_optarg ;;
-
-  -cache-file | --cache-file | --cache-fil | --cache-fi \
-  | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c)
-    ac_prev=cache_file ;;
-  -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \
-  | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*)
-    cache_file=$ac_optarg ;;
-
-  --config-cache | -C)
-    cache_file=config.cache ;;
-
-  -datadir | --datadir | --datadi | --datad)
-    ac_prev=datadir ;;
-  -datadir=* | --datadir=* | --datadi=* | --datad=*)
-    datadir=$ac_optarg ;;
-
-  -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \
-  | --dataroo | --dataro | --datar)
-    ac_prev=datarootdir ;;
-  -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \
-  | --dataroot=* | --dataroo=* | --dataro=* | --datar=*)
-    datarootdir=$ac_optarg ;;
-
-  -disable-* | --disable-*)
-    ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
-    # Reject names that are not valid shell variable names.
-    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
-      as_fn_error "invalid feature name: $ac_useropt"
-    ac_useropt_orig=$ac_useropt
-    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
-    case $ac_user_opts in
-      *"
-"enable_$ac_useropt"
-"*) ;;
-      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig"
-	 ac_unrecognized_sep=', ';;
-    esac
-    eval enable_$ac_useropt=no ;;
-
-  -docdir | --docdir | --docdi | --doc | --do)
-    ac_prev=docdir ;;
-  -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*)
-    docdir=$ac_optarg ;;
-
-  -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv)
-    ac_prev=dvidir ;;
-  -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*)
-    dvidir=$ac_optarg ;;
-
-  -enable-* | --enable-*)
-    ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
-    # Reject names that are not valid shell variable names.
-    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
-      as_fn_error "invalid feature name: $ac_useropt"
-    ac_useropt_orig=$ac_useropt
-    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
-    case $ac_user_opts in
-      *"
-"enable_$ac_useropt"
-"*) ;;
-      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig"
-	 ac_unrecognized_sep=', ';;
-    esac
-    eval enable_$ac_useropt=\$ac_optarg ;;
-
-  -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
-  | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
-  | --exec | --exe | --ex)
-    ac_prev=exec_prefix ;;
-  -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
-  | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
-  | --exec=* | --exe=* | --ex=*)
-    exec_prefix=$ac_optarg ;;
-
-  -gas | --gas | --ga | --g)
-    # Obsolete; use --with-gas.
-    with_gas=yes ;;
-
-  -help | --help | --hel | --he | -h)
-    ac_init_help=long ;;
-  -help=r* | --help=r* | --hel=r* | --he=r* | -hr*)
-    ac_init_help=recursive ;;
-  -help=s* | --help=s* | --hel=s* | --he=s* | -hs*)
-    ac_init_help=short ;;
-
-  -host | --host | --hos | --ho)
-    ac_prev=host_alias ;;
-  -host=* | --host=* | --hos=* | --ho=*)
-    host_alias=$ac_optarg ;;
-
-  -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht)
-    ac_prev=htmldir ;;
-  -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \
-  | --ht=*)
-    htmldir=$ac_optarg ;;
-
-  -includedir | --includedir | --includedi | --included | --include \
-  | --includ | --inclu | --incl | --inc)
-    ac_prev=includedir ;;
-  -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
-  | --includ=* | --inclu=* | --incl=* | --inc=*)
-    includedir=$ac_optarg ;;
-
-  -infodir | --infodir | --infodi | --infod | --info | --inf)
-    ac_prev=infodir ;;
-  -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
-    infodir=$ac_optarg ;;
-
-  -libdir | --libdir | --libdi | --libd)
-    ac_prev=libdir ;;
-  -libdir=* | --libdir=* | --libdi=* | --libd=*)
-    libdir=$ac_optarg ;;
-
-  -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
-  | --libexe | --libex | --libe)
-    ac_prev=libexecdir ;;
-  -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
-  | --libexe=* | --libex=* | --libe=*)
-    libexecdir=$ac_optarg ;;
-
-  -localedir | --localedir | --localedi | --localed | --locale)
-    ac_prev=localedir ;;
-  -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*)
-    localedir=$ac_optarg ;;
-
-  -localstatedir | --localstatedir | --localstatedi | --localstated \
-  | --localstate | --localstat | --localsta | --localst | --locals)
-    ac_prev=localstatedir ;;
-  -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
-  | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*)
-    localstatedir=$ac_optarg ;;
-
-  -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
-    ac_prev=mandir ;;
-  -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
-    mandir=$ac_optarg ;;
-
-  -nfp | --nfp | --nf)
-    # Obsolete; use --without-fp.
-    with_fp=no ;;
-
-  -no-create | --no-create | --no-creat | --no-crea | --no-cre \
-  | --no-cr | --no-c | -n)
-    no_create=yes ;;
-
-  -no-recursion | --no-recursion | --no-recursio | --no-recursi \
-  | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r)
-    no_recursion=yes ;;
-
-  -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \
-  | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \
-  | --oldin | --oldi | --old | --ol | --o)
-    ac_prev=oldincludedir ;;
-  -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \
-  | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \
-  | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*)
-    oldincludedir=$ac_optarg ;;
-
-  -prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
-    ac_prev=prefix ;;
-  -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
-    prefix=$ac_optarg ;;
-
-  -program-prefix | --program-prefix | --program-prefi | --program-pref \
-  | --program-pre | --program-pr | --program-p)
-    ac_prev=program_prefix ;;
-  -program-prefix=* | --program-prefix=* | --program-prefi=* \
-  | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*)
-    program_prefix=$ac_optarg ;;
-
-  -program-suffix | --program-suffix | --program-suffi | --program-suff \
-  | --program-suf | --program-su | --program-s)
-    ac_prev=program_suffix ;;
-  -program-suffix=* | --program-suffix=* | --program-suffi=* \
-  | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*)
-    program_suffix=$ac_optarg ;;
-
-  -program-transform-name | --program-transform-name \
-  | --program-transform-nam | --program-transform-na \
-  | --program-transform-n | --program-transform- \
-  | --program-transform | --program-transfor \
-  | --program-transfo | --program-transf \
-  | --program-trans | --program-tran \
-  | --progr-tra | --program-tr | --program-t)
-    ac_prev=program_transform_name ;;
-  -program-transform-name=* | --program-transform-name=* \
-  | --program-transform-nam=* | --program-transform-na=* \
-  | --program-transform-n=* | --program-transform-=* \
-  | --program-transform=* | --program-transfor=* \
-  | --program-transfo=* | --program-transf=* \
-  | --program-trans=* | --program-tran=* \
-  | --progr-tra=* | --program-tr=* | --program-t=*)
-    program_transform_name=$ac_optarg ;;
-
-  -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd)
-    ac_prev=pdfdir ;;
-  -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*)
-    pdfdir=$ac_optarg ;;
-
-  -psdir | --psdir | --psdi | --psd | --ps)
-    ac_prev=psdir ;;
-  -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*)
-    psdir=$ac_optarg ;;
-
-  -q | -quiet | --quiet | --quie | --qui | --qu | --q \
-  | -silent | --silent | --silen | --sile | --sil)
-    silent=yes ;;
-
-  -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
-    ac_prev=sbindir ;;
-  -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
-  | --sbi=* | --sb=*)
-    sbindir=$ac_optarg ;;
-
-  -sharedstatedir | --sharedstatedir | --sharedstatedi \
-  | --sharedstated | --sharedstate | --sharedstat | --sharedsta \
-  | --sharedst | --shareds | --shared | --share | --shar \
-  | --sha | --sh)
-    ac_prev=sharedstatedir ;;
-  -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
-  | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
-  | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
-  | --sha=* | --sh=*)
-    sharedstatedir=$ac_optarg ;;
-
-  -site | --site | --sit)
-    ac_prev=site ;;
-  -site=* | --site=* | --sit=*)
-    site=$ac_optarg ;;
-
-  -srcdir | --srcdir | --srcdi | --srcd | --src | --sr)
-    ac_prev=srcdir ;;
-  -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*)
-    srcdir=$ac_optarg ;;
-
-  -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
-  | --syscon | --sysco | --sysc | --sys | --sy)
-    ac_prev=sysconfdir ;;
-  -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
-  | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
-    sysconfdir=$ac_optarg ;;
-
-  -target | --target | --targe | --targ | --tar | --ta | --t)
-    ac_prev=target_alias ;;
-  -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
-    target_alias=$ac_optarg ;;
-
-  -v | -verbose | --verbose | --verbos | --verbo | --verb)
-    verbose=yes ;;
-
-  -version | --version | --versio | --versi | --vers | -V)
-    ac_init_version=: ;;
-
-  -with-* | --with-*)
-    ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
-    # Reject names that are not valid shell variable names.
-    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
-      as_fn_error "invalid package name: $ac_useropt"
-    ac_useropt_orig=$ac_useropt
-    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
-    case $ac_user_opts in
-      *"
-"with_$ac_useropt"
-"*) ;;
-      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig"
-	 ac_unrecognized_sep=', ';;
-    esac
-    eval with_$ac_useropt=\$ac_optarg ;;
-
-  -without-* | --without-*)
-    ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'`
-    # Reject names that are not valid shell variable names.
-    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
-      as_fn_error "invalid package name: $ac_useropt"
-    ac_useropt_orig=$ac_useropt
-    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
-    case $ac_user_opts in
-      *"
-"with_$ac_useropt"
-"*) ;;
-      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig"
-	 ac_unrecognized_sep=', ';;
-    esac
-    eval with_$ac_useropt=no ;;
-
-  --x)
-    # Obsolete; use --with-x.
-    with_x=yes ;;
-
-  -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \
-  | --x-incl | --x-inc | --x-in | --x-i)
-    ac_prev=x_includes ;;
-  -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \
-  | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*)
-    x_includes=$ac_optarg ;;
-
-  -x-libraries | --x-libraries | --x-librarie | --x-librari \
-  | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l)
-    ac_prev=x_libraries ;;
-  -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \
-  | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
-    x_libraries=$ac_optarg ;;
-
-  -*) as_fn_error "unrecognized option: \`$ac_option'
-Try \`$0 --help' for more information."
-    ;;
-
-  *=*)
-    ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='`
-    # Reject names that are not valid shell variable names.
-    case $ac_envvar in #(
-      '' | [0-9]* | *[!_$as_cr_alnum]* )
-      as_fn_error "invalid variable name: \`$ac_envvar'" ;;
-    esac
-    eval $ac_envvar=\$ac_optarg
-    export $ac_envvar ;;
-
-  *)
-    # FIXME: should be removed in autoconf 3.0.
-    $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2
-    expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
-      $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2
-    : ${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}
-    ;;
-
-  esac
-done
-
-if test -n "$ac_prev"; then
-  ac_option=--`echo $ac_prev | sed 's/_/-/g'`
-  as_fn_error "missing argument to $ac_option"
-fi
-
-if test -n "$ac_unrecognized_opts"; then
-  case $enable_option_checking in
-    no) ;;
-    fatal) as_fn_error "unrecognized options: $ac_unrecognized_opts" ;;
-    *)     $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;;
-  esac
-fi
-
-# Check all directory arguments for consistency.
-for ac_var in	exec_prefix prefix bindir sbindir libexecdir datarootdir \
-		datadir sysconfdir sharedstatedir localstatedir includedir \
-		oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
-		libdir localedir mandir
-do
-  eval ac_val=\$$ac_var
-  # Remove trailing slashes.
-  case $ac_val in
-    */ )
-      ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'`
-      eval $ac_var=\$ac_val;;
-  esac
-  # Be sure to have absolute directory names.
-  case $ac_val in
-    [\\/$]* | ?:[\\/]* )  continue;;
-    NONE | '' ) case $ac_var in *prefix ) continue;; esac;;
-  esac
-  as_fn_error "expected an absolute directory name for --$ac_var: $ac_val"
-done
-
-# There might be people who depend on the old broken behavior: `$host'
-# used to hold the argument of --host etc.
-# FIXME: To remove some day.
-build=$build_alias
-host=$host_alias
-target=$target_alias
-
-# FIXME: To remove some day.
-if test "x$host_alias" != x; then
-  if test "x$build_alias" = x; then
-    cross_compiling=maybe
-    $as_echo "$as_me: WARNING: If you wanted to set the --build type, don't use --host.
-    If a cross compiler is detected then cross compile mode will be used." >&2
-  elif test "x$build_alias" != "x$host_alias"; then
-    cross_compiling=yes
-  fi
-fi
-
-ac_tool_prefix=
-test -n "$host_alias" && ac_tool_prefix=$host_alias-
-
-test "$silent" = yes && exec 6>/dev/null
-
-
-ac_pwd=`pwd` && test -n "$ac_pwd" &&
-ac_ls_di=`ls -di .` &&
-ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` ||
-  as_fn_error "working directory cannot be determined"
-test "X$ac_ls_di" = "X$ac_pwd_ls_di" ||
-  as_fn_error "pwd does not report name of working directory"
-
-
-# Find the source files, if location was not specified.
-if test -z "$srcdir"; then
-  ac_srcdir_defaulted=yes
-  # Try the directory containing this script, then the parent directory.
-  ac_confdir=`$as_dirname -- "$as_myself" ||
-$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
-	 X"$as_myself" : 'X\(//\)[^/]' \| \
-	 X"$as_myself" : 'X\(//\)$' \| \
-	 X"$as_myself" : 'X\(/\)' \| . 2>/dev/null ||
-$as_echo X"$as_myself" |
-    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)[^/].*/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\).*/{
-	    s//\1/
-	    q
-	  }
-	  s/.*/./; q'`
-  srcdir=$ac_confdir
-  if test ! -r "$srcdir/$ac_unique_file"; then
-    srcdir=..
-  fi
-else
-  ac_srcdir_defaulted=no
-fi
-if test ! -r "$srcdir/$ac_unique_file"; then
-  test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .."
-  as_fn_error "cannot find sources ($ac_unique_file) in $srcdir"
-fi
-ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work"
-ac_abs_confdir=`(
-	cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error "$ac_msg"
-	pwd)`
-# When building in place, set srcdir=.
-if test "$ac_abs_confdir" = "$ac_pwd"; then
-  srcdir=.
-fi
-# Remove unnecessary trailing slashes from srcdir.
-# Double slashes in file names in object file debugging info
-# mess up M-x gdb in Emacs.
-case $srcdir in
-*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;;
-esac
-for ac_var in $ac_precious_vars; do
-  eval ac_env_${ac_var}_set=\${${ac_var}+set}
-  eval ac_env_${ac_var}_value=\$${ac_var}
-  eval ac_cv_env_${ac_var}_set=\${${ac_var}+set}
-  eval ac_cv_env_${ac_var}_value=\$${ac_var}
-done
-
-#
-# Report the --help message.
-#
-if test "$ac_init_help" = "long"; then
-  # Omit some internal or obsolete options to make the list less imposing.
-  # This message is too long to be a string in the A/UX 3.1 sh.
-  cat <<_ACEOF
-\`configure' configures runAs 0.1 to adapt to many kinds of systems.
-
-Usage: $0 [OPTION]... [VAR=VALUE]...
-
-To assign environment variables (e.g., CC, CFLAGS...), specify them as
-VAR=VALUE.  See below for descriptions of some of the useful variables.
-
-Defaults for the options are specified in brackets.
-
-Configuration:
-  -h, --help              display this help and exit
-      --help=short        display options specific to this package
-      --help=recursive    display the short help of all the included packages
-  -V, --version           display version information and exit
-  -q, --quiet, --silent   do not print \`checking...' messages
-      --cache-file=FILE   cache test results in FILE [disabled]
-  -C, --config-cache      alias for \`--cache-file=config.cache'
-  -n, --no-create         do not create output files
-      --srcdir=DIR        find the sources in DIR [configure dir or \`..']
-
-Installation directories:
-  --prefix=PREFIX         install architecture-independent files in PREFIX
-                          [$ac_default_prefix]
-  --exec-prefix=EPREFIX   install architecture-dependent files in EPREFIX
-                          [PREFIX]
-
-By default, \`make install' will install all the files in
-\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc.  You can specify
-an installation prefix other than \`$ac_default_prefix' using \`--prefix',
-for instance \`--prefix=\$HOME'.
-
-For better control, use the options below.
-
-Fine tuning of the installation directories:
-  --bindir=DIR            user executables [EPREFIX/bin]
-  --sbindir=DIR           system admin executables [EPREFIX/sbin]
-  --libexecdir=DIR        program executables [EPREFIX/libexec]
-  --sysconfdir=DIR        read-only single-machine data [PREFIX/etc]
-  --sharedstatedir=DIR    modifiable architecture-independent data [PREFIX/com]
-  --localstatedir=DIR     modifiable single-machine data [PREFIX/var]
-  --libdir=DIR            object code libraries [EPREFIX/lib]
-  --includedir=DIR        C header files [PREFIX/include]
-  --oldincludedir=DIR     C header files for non-gcc [/usr/include]
-  --datarootdir=DIR       read-only arch.-independent data root [PREFIX/share]
-  --datadir=DIR           read-only architecture-independent data [DATAROOTDIR]
-  --infodir=DIR           info documentation [DATAROOTDIR/info]
-  --localedir=DIR         locale-dependent data [DATAROOTDIR/locale]
-  --mandir=DIR            man documentation [DATAROOTDIR/man]
-  --docdir=DIR            documentation root [DATAROOTDIR/doc/runas]
-  --htmldir=DIR           html documentation [DOCDIR]
-  --dvidir=DIR            dvi documentation [DOCDIR]
-  --pdfdir=DIR            pdf documentation [DOCDIR]
-  --psdir=DIR             ps documentation [DOCDIR]
-_ACEOF
-
-  cat <<\_ACEOF
-_ACEOF
-fi
-
-if test -n "$ac_init_help"; then
-  case $ac_init_help in
-     short | recursive ) echo "Configuration of runAs 0.1:";;
-   esac
-  cat <<\_ACEOF
-
-Optional Packages:
-  --with-PACKAGE[=ARG]    use PACKAGE [ARG=yes]
-  --without-PACKAGE       do not use PACKAGE (same as --with-PACKAGE=no)
---with-home path to hadoop home dir
-
-Some influential environment variables:
-  CC          C compiler command
-  CFLAGS      C compiler flags
-  LDFLAGS     linker flags, e.g. -L<lib dir> if you have libraries in a
-              nonstandard directory <lib dir>
-  LIBS        libraries to pass to the linker, e.g. -l<library>
-  CPPFLAGS    (Objective) C/C++ preprocessor flags, e.g. -I<include dir> if
-              you have headers in a nonstandard directory <include dir>
-  CPP         C preprocessor
-
-Use these variables to override the choices made by `configure' or to help
-it to find libraries and programs with nonstandard names/locations.
-
-Report bugs to the package provider.
-_ACEOF
-ac_status=$?
-fi
-
-if test "$ac_init_help" = "recursive"; then
-  # If there are subdirs, report their specific --help.
-  for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue
-    test -d "$ac_dir" ||
-      { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } ||
-      continue
-    ac_builddir=.
-
-case "$ac_dir" in
-.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
-*)
-  ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
-  # A ".." for each directory in $ac_dir_suffix.
-  ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
-  case $ac_top_builddir_sub in
-  "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
-  *)  ac_top_build_prefix=$ac_top_builddir_sub/ ;;
-  esac ;;
-esac
-ac_abs_top_builddir=$ac_pwd
-ac_abs_builddir=$ac_pwd$ac_dir_suffix
-# for backward compatibility:
-ac_top_builddir=$ac_top_build_prefix
-
-case $srcdir in
-  .)  # We are building in place.
-    ac_srcdir=.
-    ac_top_srcdir=$ac_top_builddir_sub
-    ac_abs_top_srcdir=$ac_pwd ;;
-  [\\/]* | ?:[\\/]* )  # Absolute name.
-    ac_srcdir=$srcdir$ac_dir_suffix;
-    ac_top_srcdir=$srcdir
-    ac_abs_top_srcdir=$srcdir ;;
-  *) # Relative name.
-    ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
-    ac_top_srcdir=$ac_top_build_prefix$srcdir
-    ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
-esac
-ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
-
-    cd "$ac_dir" || { ac_status=$?; continue; }
-    # Check for guested configure.
-    if test -f "$ac_srcdir/configure.gnu"; then
-      echo &&
-      $SHELL "$ac_srcdir/configure.gnu" --help=recursive
-    elif test -f "$ac_srcdir/configure"; then
-      echo &&
-      $SHELL "$ac_srcdir/configure" --help=recursive
-    else
-      $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
-    fi || ac_status=$?
-    cd "$ac_pwd" || { ac_status=$?; break; }
-  done
-fi
-
-test -n "$ac_init_help" && exit $ac_status
-if $ac_init_version; then
-  cat <<\_ACEOF
-runAs configure 0.1
-generated by GNU Autoconf 2.65
-
-Copyright (C) 2009 Free Software Foundation, Inc.
-This configure script is free software; the Free Software Foundation
-gives unlimited permission to copy, distribute and modify it.
-_ACEOF
-  exit
-fi
-
-## ------------------------ ##
-## Autoconf initialization. ##
-## ------------------------ ##
-
-# ac_fn_c_try_compile LINENO
-# --------------------------
-# Try to compile conftest.$ac_ext, and return whether this succeeded.
-ac_fn_c_try_compile ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  rm -f conftest.$ac_objext
-  if { { ac_try="$ac_compile"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_compile") 2>conftest.err
-  ac_status=$?
-  if test -s conftest.err; then
-    grep -v '^ *+' conftest.err >conftest.er1
-    cat conftest.er1 >&5
-    mv -f conftest.er1 conftest.err
-  fi
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; } && {
-	 test -z "$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest.$ac_objext; then :
-  ac_retval=0
-else
-  $as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-	ac_retval=1
-fi
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-  as_fn_set_status $ac_retval
-
-} # ac_fn_c_try_compile
-
-# ac_fn_c_try_cpp LINENO
-# ----------------------
-# Try to preprocess conftest.$ac_ext, and return whether this succeeded.
-ac_fn_c_try_cpp ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  if { { ac_try="$ac_cpp conftest.$ac_ext"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err
-  ac_status=$?
-  if test -s conftest.err; then
-    grep -v '^ *+' conftest.err >conftest.er1
-    cat conftest.er1 >&5
-    mv -f conftest.er1 conftest.err
-  fi
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; } >/dev/null && {
-	 test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       }; then :
-  ac_retval=0
-else
-  $as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-    ac_retval=1
-fi
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-  as_fn_set_status $ac_retval
-
-} # ac_fn_c_try_cpp
-
-# ac_fn_c_try_run LINENO
-# ----------------------
-# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes
-# that executables *can* be run.
-ac_fn_c_try_run ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  if { { ac_try="$ac_link"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_link") 2>&5
-  ac_status=$?
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; } && { ac_try='./conftest$ac_exeext'
-  { { case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_try") 2>&5
-  ac_status=$?
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; }; }; then :
-  ac_retval=0
-else
-  $as_echo "$as_me: program exited with status $ac_status" >&5
-       $as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-       ac_retval=$ac_status
-fi
-  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-  as_fn_set_status $ac_retval
-
-} # ac_fn_c_try_run
-
-# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES
-# -------------------------------------------------------
-# Tests whether HEADER exists, giving a warning if it cannot be compiled using
-# the include files in INCLUDES and setting the cache variable VAR
-# accordingly.
-ac_fn_c_check_header_mongrel ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
-$as_echo_n "checking for $2... " >&6; }
-if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
-  $as_echo_n "(cached) " >&6
-fi
-eval ac_res=\$$3
-	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
-$as_echo "$ac_res" >&6; }
-else
-  # Is the header compilable?
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5
-$as_echo_n "checking $2 usability... " >&6; }
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-$4
-#include <$2>
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  ac_header_compiler=yes
-else
-  ac_header_compiler=no
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5
-$as_echo "$ac_header_compiler" >&6; }
-
-# Is the header present?
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5
-$as_echo_n "checking $2 presence... " >&6; }
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <$2>
-_ACEOF
-if ac_fn_c_try_cpp "$LINENO"; then :
-  ac_header_preproc=yes
-else
-  ac_header_preproc=no
-fi
-rm -f conftest.err conftest.$ac_ext
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5
-$as_echo "$ac_header_preproc" >&6; }
-
-# So?  What about this header?
-case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #((
-  yes:no: )
-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5
-$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;}
-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
-$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
-    ;;
-  no:yes:* )
-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5
-$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;}
-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2:     check for missing prerequisite headers?" >&5
-$as_echo "$as_me: WARNING: $2:     check for missing prerequisite headers?" >&2;}
-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5
-$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;}
-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2:     section \"Present But Cannot Be Compiled\"" >&5
-$as_echo "$as_me: WARNING: $2:     section \"Present But Cannot Be Compiled\"" >&2;}
-    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
-$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
-    ;;
-esac
-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
-$as_echo_n "checking for $2... " >&6; }
-if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
-  $as_echo_n "(cached) " >&6
-else
-  eval "$3=\$ac_header_compiler"
-fi
-eval ac_res=\$$3
-	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
-$as_echo "$ac_res" >&6; }
-fi
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-
-} # ac_fn_c_check_header_mongrel
-
-# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES
-# -------------------------------------------------------
-# Tests whether HEADER exists and can be compiled using the include files in
-# INCLUDES, setting the cache variable VAR accordingly.
-ac_fn_c_check_header_compile ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
-$as_echo_n "checking for $2... " >&6; }
-if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-$4
-#include <$2>
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  eval "$3=yes"
-else
-  eval "$3=no"
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-eval ac_res=\$$3
-	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
-$as_echo "$ac_res" >&6; }
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-
-} # ac_fn_c_check_header_compile
-
-# ac_fn_c_check_type LINENO TYPE VAR INCLUDES
-# -------------------------------------------
-# Tests whether TYPE exists after having included INCLUDES, setting cache
-# variable VAR accordingly.
-ac_fn_c_check_type ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
-$as_echo_n "checking for $2... " >&6; }
-if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
-  $as_echo_n "(cached) " >&6
-else
-  eval "$3=no"
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-$4
-int
-main ()
-{
-if (sizeof ($2))
-	 return 0;
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-$4
-int
-main ()
-{
-if (sizeof (($2)))
-	    return 0;
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-
-else
-  eval "$3=yes"
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-eval ac_res=\$$3
-	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
-$as_echo "$ac_res" >&6; }
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-
-} # ac_fn_c_check_type
-
-# ac_fn_c_try_link LINENO
-# -----------------------
-# Try to link conftest.$ac_ext, and return whether this succeeded.
-ac_fn_c_try_link ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  rm -f conftest.$ac_objext conftest$ac_exeext
-  if { { ac_try="$ac_link"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_link") 2>conftest.err
-  ac_status=$?
-  if test -s conftest.err; then
-    grep -v '^ *+' conftest.err >conftest.er1
-    cat conftest.er1 >&5
-    mv -f conftest.er1 conftest.err
-  fi
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; } && {
-	 test -z "$ac_c_werror_flag" ||
-	 test ! -s conftest.err
-       } && test -s conftest$ac_exeext && {
-	 test "$cross_compiling" = yes ||
-	 $as_test_x conftest$ac_exeext
-       }; then :
-  ac_retval=0
-else
-  $as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-	ac_retval=1
-fi
-  # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
-  # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
-  # interfere with the next link command; also delete a directory that is
-  # left behind by Apple's compiler.  We do this before executing the actions.
-  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-  as_fn_set_status $ac_retval
-
-} # ac_fn_c_try_link
-
-# ac_fn_c_check_func LINENO FUNC VAR
-# ----------------------------------
-# Tests whether FUNC exists, setting the cache variable VAR accordingly
-ac_fn_c_check_func ()
-{
-  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
-$as_echo_n "checking for $2... " >&6; }
-if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-/* Define $2 to an innocuous variant, in case <limits.h> declares $2.
-   For example, HP-UX 11i <limits.h> declares gettimeofday.  */
-#define $2 innocuous_$2
-
-/* System header to define __stub macros and hopefully few prototypes,
-    which can conflict with char $2 (); below.
-    Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
-    <limits.h> exists even on freestanding compilers.  */
-
-#ifdef __STDC__
-# include <limits.h>
-#else
-# include <assert.h>
-#endif
-
-#undef $2
-
-/* Override any GCC internal prototype to avoid an error.
-   Use char because int might match the return type of a GCC
-   builtin and then its argument prototype would still apply.  */
-#ifdef __cplusplus
-extern "C"
-#endif
-char $2 ();
-/* The GNU C library defines this for functions which it implements
-    to always fail with ENOSYS.  Some functions are actually named
-    something starting with __ and the normal name is an alias.  */
-#if defined __stub_$2 || defined __stub___$2
-choke me
-#endif
-
-int
-main ()
-{
-return $2 ();
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_link "$LINENO"; then :
-  eval "$3=yes"
-else
-  eval "$3=no"
-fi
-rm -f core conftest.err conftest.$ac_objext \
-    conftest$ac_exeext conftest.$ac_ext
-fi
-eval ac_res=\$$3
-	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
-$as_echo "$ac_res" >&6; }
-  eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;}
-
-} # ac_fn_c_check_func
-cat >config.log <<_ACEOF
-This file contains any messages produced by compilers while
-running configure, to aid debugging if configure makes a mistake.
-
-It was created by runAs $as_me 0.1, which was
-generated by GNU Autoconf 2.65.  Invocation command line was
-
-  $ $0 $@
-
-_ACEOF
-exec 5>>config.log
-{
-cat <<_ASUNAME
-## --------- ##
-## Platform. ##
-## --------- ##
-
-hostname = `(hostname || uname -n) 2>/dev/null | sed 1q`
-uname -m = `(uname -m) 2>/dev/null || echo unknown`
-uname -r = `(uname -r) 2>/dev/null || echo unknown`
-uname -s = `(uname -s) 2>/dev/null || echo unknown`
-uname -v = `(uname -v) 2>/dev/null || echo unknown`
-
-/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown`
-/bin/uname -X     = `(/bin/uname -X) 2>/dev/null     || echo unknown`
-
-/bin/arch              = `(/bin/arch) 2>/dev/null              || echo unknown`
-/usr/bin/arch -k       = `(/usr/bin/arch -k) 2>/dev/null       || echo unknown`
-/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown`
-/usr/bin/hostinfo      = `(/usr/bin/hostinfo) 2>/dev/null      || echo unknown`
-/bin/machine           = `(/bin/machine) 2>/dev/null           || echo unknown`
-/usr/bin/oslevel       = `(/usr/bin/oslevel) 2>/dev/null       || echo unknown`
-/bin/universe          = `(/bin/universe) 2>/dev/null          || echo unknown`
-
-_ASUNAME
-
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    $as_echo "PATH: $as_dir"
-  done
-IFS=$as_save_IFS
-
-} >&5
-
-cat >&5 <<_ACEOF
-
-
-## ----------- ##
-## Core tests. ##
-## ----------- ##
-
-_ACEOF
-
-
-# Keep a trace of the command line.
-# Strip out --no-create and --no-recursion so they do not pile up.
-# Strip out --silent because we don't want to record it for future runs.
-# Also quote any args containing shell meta-characters.
-# Make two passes to allow for proper duplicate-argument suppression.
-ac_configure_args=
-ac_configure_args0=
-ac_configure_args1=
-ac_must_keep_next=false
-for ac_pass in 1 2
-do
-  for ac_arg
-  do
-    case $ac_arg in
-    -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;;
-    -q | -quiet | --quiet | --quie | --qui | --qu | --q \
-    | -silent | --silent | --silen | --sile | --sil)
-      continue ;;
-    *\'*)
-      ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
-    esac
-    case $ac_pass in
-    1) as_fn_append ac_configure_args0 " '$ac_arg'" ;;
-    2)
-      as_fn_append ac_configure_args1 " '$ac_arg'"
-      if test $ac_must_keep_next = true; then
-	ac_must_keep_next=false # Got value, back to normal.
-      else
-	case $ac_arg in
-	  *=* | --config-cache | -C | -disable-* | --disable-* \
-	  | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \
-	  | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \
-	  | -with-* | --with-* | -without-* | --without-* | --x)
-	    case "$ac_configure_args0 " in
-	      "$ac_configure_args1"*" '$ac_arg' "* ) continue ;;
-	    esac
-	    ;;
-	  -* ) ac_must_keep_next=true ;;
-	esac
-      fi
-      as_fn_append ac_configure_args " '$ac_arg'"
-      ;;
-    esac
-  done
-done
-{ ac_configure_args0=; unset ac_configure_args0;}
-{ ac_configure_args1=; unset ac_configure_args1;}
-
-# When interrupted or exit'd, cleanup temporary files, and complete
-# config.log.  We remove comments because anyway the quotes in there
-# would cause problems or look ugly.
-# WARNING: Use '\'' to represent an apostrophe within the trap.
-# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug.
-trap 'exit_status=$?
-  # Save into config.log some information that might help in debugging.
-  {
-    echo
-
-    cat <<\_ASBOX
-## ---------------- ##
-## Cache variables. ##
-## ---------------- ##
-_ASBOX
-    echo
-    # The following way of writing the cache mishandles newlines in values,
-(
-  for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do
-    eval ac_val=\$$ac_var
-    case $ac_val in #(
-    *${as_nl}*)
-      case $ac_var in #(
-      *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
-$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
-      esac
-      case $ac_var in #(
-      _ | IFS | as_nl) ;; #(
-      BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
-      *) { eval $ac_var=; unset $ac_var;} ;;
-      esac ;;
-    esac
-  done
-  (set) 2>&1 |
-    case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #(
-    *${as_nl}ac_space=\ *)
-      sed -n \
-	"s/'\''/'\''\\\\'\'''\''/g;
-	  s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p"
-      ;; #(
-    *)
-      sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
-      ;;
-    esac |
-    sort
-)
-    echo
-
-    cat <<\_ASBOX
-## ----------------- ##
-## Output variables. ##
-## ----------------- ##
-_ASBOX
-    echo
-    for ac_var in $ac_subst_vars
-    do
-      eval ac_val=\$$ac_var
-      case $ac_val in
-      *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
-      esac
-      $as_echo "$ac_var='\''$ac_val'\''"
-    done | sort
-    echo
-
-    if test -n "$ac_subst_files"; then
-      cat <<\_ASBOX
-## ------------------- ##
-## File substitutions. ##
-## ------------------- ##
-_ASBOX
-      echo
-      for ac_var in $ac_subst_files
-      do
-	eval ac_val=\$$ac_var
-	case $ac_val in
-	*\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
-	esac
-	$as_echo "$ac_var='\''$ac_val'\''"
-      done | sort
-      echo
-    fi
-
-    if test -s confdefs.h; then
-      cat <<\_ASBOX
-## ----------- ##
-## confdefs.h. ##
-## ----------- ##
-_ASBOX
-      echo
-      cat confdefs.h
-      echo
-    fi
-    test "$ac_signal" != 0 &&
-      $as_echo "$as_me: caught signal $ac_signal"
-    $as_echo "$as_me: exit $exit_status"
-  } >&5
-  rm -f core *.core core.conftest.* &&
-    rm -f -r conftest* confdefs* conf$$* $ac_clean_files &&
-    exit $exit_status
-' 0
-for ac_signal in 1 2 13 15; do
-  trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal
-done
-ac_signal=0
-
-# confdefs.h avoids OS command line length limits that DEFS can exceed.
-rm -f -r conftest* confdefs.h
-
-$as_echo "/* confdefs.h */" > confdefs.h
-
-# Predefined preprocessor variables.
-
-cat >>confdefs.h <<_ACEOF
-#define PACKAGE_NAME "$PACKAGE_NAME"
-_ACEOF
-
-cat >>confdefs.h <<_ACEOF
-#define PACKAGE_TARNAME "$PACKAGE_TARNAME"
-_ACEOF
-
-cat >>confdefs.h <<_ACEOF
-#define PACKAGE_VERSION "$PACKAGE_VERSION"
-_ACEOF
-
-cat >>confdefs.h <<_ACEOF
-#define PACKAGE_STRING "$PACKAGE_STRING"
-_ACEOF
-
-cat >>confdefs.h <<_ACEOF
-#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT"
-_ACEOF
-
-cat >>confdefs.h <<_ACEOF
-#define PACKAGE_URL "$PACKAGE_URL"
-_ACEOF
-
-
-# Let the site file select an alternate cache file if it wants to.
-# Prefer an explicitly selected file to automatically selected ones.
-ac_site_file1=NONE
-ac_site_file2=NONE
-if test -n "$CONFIG_SITE"; then
-  ac_site_file1=$CONFIG_SITE
-elif test "x$prefix" != xNONE; then
-  ac_site_file1=$prefix/share/config.site
-  ac_site_file2=$prefix/etc/config.site
-else
-  ac_site_file1=$ac_default_prefix/share/config.site
-  ac_site_file2=$ac_default_prefix/etc/config.site
-fi
-for ac_site_file in "$ac_site_file1" "$ac_site_file2"
-do
-  test "x$ac_site_file" = xNONE && continue
-  if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then
-    { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5
-$as_echo "$as_me: loading site script $ac_site_file" >&6;}
-    sed 's/^/| /' "$ac_site_file" >&5
-    . "$ac_site_file"
-  fi
-done
-
-if test -r "$cache_file"; then
-  # Some versions of bash will fail to source /dev/null (special files
-  # actually), so we avoid doing that.  DJGPP emulates it as a regular file.
-  if test /dev/null != "$cache_file" && test -f "$cache_file"; then
-    { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5
-$as_echo "$as_me: loading cache $cache_file" >&6;}
-    case $cache_file in
-      [\\/]* | ?:[\\/]* ) . "$cache_file";;
-      *)                      . "./$cache_file";;
-    esac
-  fi
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5
-$as_echo "$as_me: creating cache $cache_file" >&6;}
-  >$cache_file
-fi
-
-# Check that the precious variables saved in the cache have kept the same
-# value.
-ac_cache_corrupted=false
-for ac_var in $ac_precious_vars; do
-  eval ac_old_set=\$ac_cv_env_${ac_var}_set
-  eval ac_new_set=\$ac_env_${ac_var}_set
-  eval ac_old_val=\$ac_cv_env_${ac_var}_value
-  eval ac_new_val=\$ac_env_${ac_var}_value
-  case $ac_old_set,$ac_new_set in
-    set,)
-      { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
-$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
-      ac_cache_corrupted=: ;;
-    ,set)
-      { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5
-$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
-      ac_cache_corrupted=: ;;
-    ,);;
-    *)
-      if test "x$ac_old_val" != "x$ac_new_val"; then
-	# differences in whitespace do not lead to failure.
-	ac_old_val_w=`echo x $ac_old_val`
-	ac_new_val_w=`echo x $ac_new_val`
-	if test "$ac_old_val_w" != "$ac_new_val_w"; then
-	  { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5
-$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
-	  ac_cache_corrupted=:
-	else
-	  { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5
-$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;}
-	  eval $ac_var=\$ac_old_val
-	fi
-	{ $as_echo "$as_me:${as_lineno-$LINENO}:   former value:  \`$ac_old_val'" >&5
-$as_echo "$as_me:   former value:  \`$ac_old_val'" >&2;}
-	{ $as_echo "$as_me:${as_lineno-$LINENO}:   current value: \`$ac_new_val'" >&5
-$as_echo "$as_me:   current value: \`$ac_new_val'" >&2;}
-      fi;;
-  esac
-  # Pass precious variables to config.status.
-  if test "$ac_new_set" = set; then
-    case $ac_new_val in
-    *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
-    *) ac_arg=$ac_var=$ac_new_val ;;
-    esac
-    case " $ac_configure_args " in
-      *" '$ac_arg' "*) ;; # Avoid dups.  Use of quotes ensures accuracy.
-      *) as_fn_append ac_configure_args " '$ac_arg'" ;;
-    esac
-  fi
-done
-if $ac_cache_corrupted; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-  { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5
-$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;}
-  as_fn_error "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5
-fi
-## -------------------- ##
-## Main body of script. ##
-## -------------------- ##
-
-ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-
-
-
-#changing default prefix value to empty string, so that binary does not
-#gets installed within system
-
-
-#add new arguments --with-home
-
-# Check whether --with-home was given.
-if test "${with_home+set}" = set; then :
-  withval=$with_home;
-fi
-
-
-ac_config_headers="$ac_config_headers runAs.h"
-
-
-# Checks for programs.
-ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-if test -n "$ac_tool_prefix"; then
-  # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
-set dummy ${ac_tool_prefix}gcc; ac_word=$2
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
-$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_CC+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test -n "$CC"; then
-  ac_cv_prog_CC="$CC" # Let the user override the test.
-else
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    for ac_exec_ext in '' $ac_executable_extensions; do
-  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
-    ac_cv_prog_CC="${ac_tool_prefix}gcc"
-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-  done
-IFS=$as_save_IFS
-
-fi
-fi
-CC=$ac_cv_prog_CC
-if test -n "$CC"; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
-$as_echo "$CC" >&6; }
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-fi
-
-
-fi
-if test -z "$ac_cv_prog_CC"; then
-  ac_ct_CC=$CC
-  # Extract the first word of "gcc", so it can be a program name with args.
-set dummy gcc; ac_word=$2
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
-$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_ac_ct_CC+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test -n "$ac_ct_CC"; then
-  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
-else
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    for ac_exec_ext in '' $ac_executable_extensions; do
-  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
-    ac_cv_prog_ac_ct_CC="gcc"
-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-  done
-IFS=$as_save_IFS
-
-fi
-fi
-ac_ct_CC=$ac_cv_prog_ac_ct_CC
-if test -n "$ac_ct_CC"; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
-$as_echo "$ac_ct_CC" >&6; }
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-fi
-
-  if test "x$ac_ct_CC" = x; then
-    CC=""
-  else
-    case $cross_compiling:$ac_tool_warned in
-yes:)
-{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
-$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
-ac_tool_warned=yes ;;
-esac
-    CC=$ac_ct_CC
-  fi
-else
-  CC="$ac_cv_prog_CC"
-fi
-
-if test -z "$CC"; then
-          if test -n "$ac_tool_prefix"; then
-    # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
-set dummy ${ac_tool_prefix}cc; ac_word=$2
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
-$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_CC+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test -n "$CC"; then
-  ac_cv_prog_CC="$CC" # Let the user override the test.
-else
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    for ac_exec_ext in '' $ac_executable_extensions; do
-  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
-    ac_cv_prog_CC="${ac_tool_prefix}cc"
-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-  done
-IFS=$as_save_IFS
-
-fi
-fi
-CC=$ac_cv_prog_CC
-if test -n "$CC"; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
-$as_echo "$CC" >&6; }
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-fi
-
-
-  fi
-fi
-if test -z "$CC"; then
-  # Extract the first word of "cc", so it can be a program name with args.
-set dummy cc; ac_word=$2
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
-$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_CC+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test -n "$CC"; then
-  ac_cv_prog_CC="$CC" # Let the user override the test.
-else
-  ac_prog_rejected=no
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    for ac_exec_ext in '' $ac_executable_extensions; do
-  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
-    if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
-       ac_prog_rejected=yes
-       continue
-     fi
-    ac_cv_prog_CC="cc"
-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-  done
-IFS=$as_save_IFS
-
-if test $ac_prog_rejected = yes; then
-  # We found a bogon in the path, so make sure we never use it.
-  set dummy $ac_cv_prog_CC
-  shift
-  if test $# != 0; then
-    # We chose a different compiler from the bogus one.
-    # However, it has the same basename, so the bogon will be chosen
-    # first if we set CC to just the basename; use the full file name.
-    shift
-    ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@"
-  fi
-fi
-fi
-fi
-CC=$ac_cv_prog_CC
-if test -n "$CC"; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
-$as_echo "$CC" >&6; }
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-fi
-
-
-fi
-if test -z "$CC"; then
-  if test -n "$ac_tool_prefix"; then
-  for ac_prog in cl.exe
-  do
-    # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
-set dummy $ac_tool_prefix$ac_prog; ac_word=$2
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
-$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_CC+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test -n "$CC"; then
-  ac_cv_prog_CC="$CC" # Let the user override the test.
-else
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    for ac_exec_ext in '' $ac_executable_extensions; do
-  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
-    ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-  done
-IFS=$as_save_IFS
-
-fi
-fi
-CC=$ac_cv_prog_CC
-if test -n "$CC"; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
-$as_echo "$CC" >&6; }
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-fi
-
-
-    test -n "$CC" && break
-  done
-fi
-if test -z "$CC"; then
-  ac_ct_CC=$CC
-  for ac_prog in cl.exe
-do
-  # Extract the first word of "$ac_prog", so it can be a program name with args.
-set dummy $ac_prog; ac_word=$2
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
-$as_echo_n "checking for $ac_word... " >&6; }
-if test "${ac_cv_prog_ac_ct_CC+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test -n "$ac_ct_CC"; then
-  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
-else
-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    for ac_exec_ext in '' $ac_executable_extensions; do
-  if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then
-    ac_cv_prog_ac_ct_CC="$ac_prog"
-    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
-    break 2
-  fi
-done
-  done
-IFS=$as_save_IFS
-
-fi
-fi
-ac_ct_CC=$ac_cv_prog_ac_ct_CC
-if test -n "$ac_ct_CC"; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
-$as_echo "$ac_ct_CC" >&6; }
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-fi
-
-
-  test -n "$ac_ct_CC" && break
-done
-
-  if test "x$ac_ct_CC" = x; then
-    CC=""
-  else
-    case $cross_compiling:$ac_tool_warned in
-yes:)
-{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
-$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
-ac_tool_warned=yes ;;
-esac
-    CC=$ac_ct_CC
-  fi
-fi
-
-fi
-
-
-test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "no acceptable C compiler found in \$PATH
-See \`config.log' for more details." "$LINENO" 5; }
-
-# Provide some information about the compiler.
-$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5
-set X $ac_compile
-ac_compiler=$2
-for ac_option in --version -v -V -qversion; do
-  { { ac_try="$ac_compiler $ac_option >&5"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_compiler $ac_option >&5") 2>conftest.err
-  ac_status=$?
-  if test -s conftest.err; then
-    sed '10a\
-... rest of stderr output deleted ...
-         10q' conftest.err >conftest.er1
-    cat conftest.er1 >&5
-  fi
-  rm -f conftest.er1 conftest.err
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; }
-done
-
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-int
-main ()
-{
-
-  ;
-  return 0;
-}
-_ACEOF
-ac_clean_files_save=$ac_clean_files
-ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out"
-# Try to create an executable without -o first, disregard a.out.
-# It will help us diagnose broken compilers, and finding out an intuition
-# of exeext.
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5
-$as_echo_n "checking whether the C compiler works... " >&6; }
-ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'`
-
-# The possible output files:
-ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*"
-
-ac_rmfiles=
-for ac_file in $ac_files
-do
-  case $ac_file in
-    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
-    * ) ac_rmfiles="$ac_rmfiles $ac_file";;
-  esac
-done
-rm -f $ac_rmfiles
-
-if { { ac_try="$ac_link_default"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_link_default") 2>&5
-  ac_status=$?
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; }; then :
-  # Autoconf-2.13 could set the ac_cv_exeext variable to `no'.
-# So ignore a value of `no', otherwise this would lead to `EXEEXT = no'
-# in a Makefile.  We should not override ac_cv_exeext if it was cached,
-# so that the user can short-circuit this test for compilers unknown to
-# Autoconf.
-for ac_file in $ac_files ''
-do
-  test -f "$ac_file" || continue
-  case $ac_file in
-    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj )
-	;;
-    [ab].out )
-	# We found the default executable, but exeext='' is most
-	# certainly right.
-	break;;
-    *.* )
-	if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no;
-	then :; else
-	   ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
-	fi
-	# We set ac_cv_exeext here because the later test for it is not
-	# safe: cross compilers may not add the suffix if given an `-o'
-	# argument, so we may need to know it at that point already.
-	# Even if this section looks crufty: it has the advantage of
-	# actually working.
-	break;;
-    * )
-	break;;
-  esac
-done
-test "$ac_cv_exeext" = no && ac_cv_exeext=
-
-else
-  ac_file=''
-fi
-if test -z "$ac_file"; then :
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-$as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-{ as_fn_set_status 77
-as_fn_error "C compiler cannot create executables
-See \`config.log' for more details." "$LINENO" 5; }; }
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5
-$as_echo_n "checking for C compiler default output file name... " >&6; }
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5
-$as_echo "$ac_file" >&6; }
-ac_exeext=$ac_cv_exeext
-
-rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out
-ac_clean_files=$ac_clean_files_save
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5
-$as_echo_n "checking for suffix of executables... " >&6; }
-if { { ac_try="$ac_link"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_link") 2>&5
-  ac_status=$?
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; }; then :
-  # If both `conftest.exe' and `conftest' are `present' (well, observable)
-# catch `conftest.exe'.  For instance with Cygwin, `ls conftest' will
-# work properly (i.e., refer to `conftest.exe'), while it won't with
-# `rm'.
-for ac_file in conftest.exe conftest conftest.*; do
-  test -f "$ac_file" || continue
-  case $ac_file in
-    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
-    *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
-	  break;;
-    * ) break;;
-  esac
-done
-else
-  { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "cannot compute suffix of executables: cannot compile and link
-See \`config.log' for more details." "$LINENO" 5; }
-fi
-rm -f conftest conftest$ac_cv_exeext
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5
-$as_echo "$ac_cv_exeext" >&6; }
-
-rm -f conftest.$ac_ext
-EXEEXT=$ac_cv_exeext
-ac_exeext=$EXEEXT
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <stdio.h>
-int
-main ()
-{
-FILE *f = fopen ("conftest.out", "w");
- return ferror (f) || fclose (f) != 0;
-
-  ;
-  return 0;
-}
-_ACEOF
-ac_clean_files="$ac_clean_files conftest.out"
-# Check that the compiler produces executables we can run.  If not, either
-# the compiler is broken, or we cross compile.
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5
-$as_echo_n "checking whether we are cross compiling... " >&6; }
-if test "$cross_compiling" != yes; then
-  { { ac_try="$ac_link"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_link") 2>&5
-  ac_status=$?
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; }
-  if { ac_try='./conftest$ac_cv_exeext'
-  { { case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_try") 2>&5
-  ac_status=$?
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; }; }; then
-    cross_compiling=no
-  else
-    if test "$cross_compiling" = maybe; then
-	cross_compiling=yes
-    else
-	{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "cannot run C compiled programs.
-If you meant to cross compile, use \`--host'.
-See \`config.log' for more details." "$LINENO" 5; }
-    fi
-  fi
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5
-$as_echo "$cross_compiling" >&6; }
-
-rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out
-ac_clean_files=$ac_clean_files_save
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5
-$as_echo_n "checking for suffix of object files... " >&6; }
-if test "${ac_cv_objext+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-int
-main ()
-{
-
-  ;
-  return 0;
-}
-_ACEOF
-rm -f conftest.o conftest.obj
-if { { ac_try="$ac_compile"
-case "(($ac_try" in
-  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
-  *) ac_try_echo=$ac_try;;
-esac
-eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
-$as_echo "$ac_try_echo"; } >&5
-  (eval "$ac_compile") 2>&5
-  ac_status=$?
-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
-  test $ac_status = 0; }; then :
-  for ac_file in conftest.o conftest.obj conftest.*; do
-  test -f "$ac_file" || continue;
-  case $ac_file in
-    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;;
-    *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'`
-       break;;
-  esac
-done
-else
-  $as_echo "$as_me: failed program was:" >&5
-sed 's/^/| /' conftest.$ac_ext >&5
-
-{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "cannot compute suffix of object files: cannot compile
-See \`config.log' for more details." "$LINENO" 5; }
-fi
-rm -f conftest.$ac_cv_objext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5
-$as_echo "$ac_cv_objext" >&6; }
-OBJEXT=$ac_cv_objext
-ac_objext=$OBJEXT
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5
-$as_echo_n "checking whether we are using the GNU C compiler... " >&6; }
-if test "${ac_cv_c_compiler_gnu+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-int
-main ()
-{
-#ifndef __GNUC__
-       choke me
-#endif
-
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  ac_compiler_gnu=yes
-else
-  ac_compiler_gnu=no
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-ac_cv_c_compiler_gnu=$ac_compiler_gnu
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5
-$as_echo "$ac_cv_c_compiler_gnu" >&6; }
-if test $ac_compiler_gnu = yes; then
-  GCC=yes
-else
-  GCC=
-fi
-ac_test_CFLAGS=${CFLAGS+set}
-ac_save_CFLAGS=$CFLAGS
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5
-$as_echo_n "checking whether $CC accepts -g... " >&6; }
-if test "${ac_cv_prog_cc_g+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  ac_save_c_werror_flag=$ac_c_werror_flag
-   ac_c_werror_flag=yes
-   ac_cv_prog_cc_g=no
-   CFLAGS="-g"
-   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-int
-main ()
-{
-
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  ac_cv_prog_cc_g=yes
-else
-  CFLAGS=""
-      cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-int
-main ()
-{
-
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-
-else
-  ac_c_werror_flag=$ac_save_c_werror_flag
-	 CFLAGS="-g"
-	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-int
-main ()
-{
-
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  ac_cv_prog_cc_g=yes
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-   ac_c_werror_flag=$ac_save_c_werror_flag
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5
-$as_echo "$ac_cv_prog_cc_g" >&6; }
-if test "$ac_test_CFLAGS" = set; then
-  CFLAGS=$ac_save_CFLAGS
-elif test $ac_cv_prog_cc_g = yes; then
-  if test "$GCC" = yes; then
-    CFLAGS="-g -O2"
-  else
-    CFLAGS="-g"
-  fi
-else
-  if test "$GCC" = yes; then
-    CFLAGS="-O2"
-  else
-    CFLAGS=
-  fi
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5
-$as_echo_n "checking for $CC option to accept ISO C89... " >&6; }
-if test "${ac_cv_prog_cc_c89+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  ac_cv_prog_cc_c89=no
-ac_save_CC=$CC
-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <stdarg.h>
-#include <stdio.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-/* Most of the following tests are stolen from RCS 5.7's src/conf.sh.  */
-struct buf { int x; };
-FILE * (*rcsopen) (struct buf *, struct stat *, int);
-static char *e (p, i)
-     char **p;
-     int i;
-{
-  return p[i];
-}
-static char *f (char * (*g) (char **, int), char **p, ...)
-{
-  char *s;
-  va_list v;
-  va_start (v,p);
-  s = g (p, va_arg (v,int));
-  va_end (v);
-  return s;
-}
-
-/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default.  It has
-   function prototypes and stuff, but not '\xHH' hex character constants.
-   These don't provoke an error unfortunately, instead are silently treated
-   as 'x'.  The following induces an error, until -std is added to get
-   proper ANSI mode.  Curiously '\x00'!='x' always comes out true, for an
-   array size at least.  It's necessary to write '\x00'==0 to get something
-   that's true only with -std.  */
-int osf4_cc_array ['\x00' == 0 ? 1 : -1];
-
-/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters
-   inside strings and character constants.  */
-#define FOO(x) 'x'
-int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1];
-
-int test (int i, double x);
-struct s1 {int (*f) (int a);};
-struct s2 {int (*f) (double a);};
-int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int);
-int argc;
-char **argv;
-int
-main ()
-{
-return f (e, argv, 0) != argv[0]  ||  f (e, argv, 1) != argv[1];
-  ;
-  return 0;
-}
-_ACEOF
-for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \
-	-Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
-do
-  CC="$ac_save_CC $ac_arg"
-  if ac_fn_c_try_compile "$LINENO"; then :
-  ac_cv_prog_cc_c89=$ac_arg
-fi
-rm -f core conftest.err conftest.$ac_objext
-  test "x$ac_cv_prog_cc_c89" != "xno" && break
-done
-rm -f conftest.$ac_ext
-CC=$ac_save_CC
-
-fi
-# AC_CACHE_VAL
-case "x$ac_cv_prog_cc_c89" in
-  x)
-    { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
-$as_echo "none needed" >&6; } ;;
-  xno)
-    { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
-$as_echo "unsupported" >&6; } ;;
-  *)
-    CC="$CC $ac_cv_prog_cc_c89"
-    { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5
-$as_echo "$ac_cv_prog_cc_c89" >&6; } ;;
-esac
-if test "x$ac_cv_prog_cc_c89" != xno; then :
-
-fi
-
-ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-
-
-# Checks for libraries.
-
-# Checks for header files.
-
-ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5
-$as_echo_n "checking how to run the C preprocessor... " >&6; }
-# On Suns, sometimes $CPP names a directory.
-if test -n "$CPP" && test -d "$CPP"; then
-  CPP=
-fi
-if test -z "$CPP"; then
-  if test "${ac_cv_prog_CPP+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-      # Double quotes because CPP needs to be expanded
-    for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp"
-    do
-      ac_preproc_ok=false
-for ac_c_preproc_warn_flag in '' yes
-do
-  # Use a header file that comes with gcc, so configuring glibc
-  # with a fresh cross-compiler works.
-  # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
-  # <limits.h> exists even on freestanding compilers.
-  # On the NeXT, cc -E runs the code through the compiler's parser,
-  # not just through cpp. "Syntax error" is here to catch this case.
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#ifdef __STDC__
-# include <limits.h>
-#else
-# include <assert.h>
-#endif
-		     Syntax error
-_ACEOF
-if ac_fn_c_try_cpp "$LINENO"; then :
-
-else
-  # Broken: fails on valid input.
-continue
-fi
-rm -f conftest.err conftest.$ac_ext
-
-  # OK, works on sane cases.  Now check whether nonexistent headers
-  # can be detected and how.
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <ac_nonexistent.h>
-_ACEOF
-if ac_fn_c_try_cpp "$LINENO"; then :
-  # Broken: success on invalid input.
-continue
-else
-  # Passes both tests.
-ac_preproc_ok=:
-break
-fi
-rm -f conftest.err conftest.$ac_ext
-
-done
-# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
-rm -f conftest.err conftest.$ac_ext
-if $ac_preproc_ok; then :
-  break
-fi
-
-    done
-    ac_cv_prog_CPP=$CPP
-
-fi
-  CPP=$ac_cv_prog_CPP
-else
-  ac_cv_prog_CPP=$CPP
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5
-$as_echo "$CPP" >&6; }
-ac_preproc_ok=false
-for ac_c_preproc_warn_flag in '' yes
-do
-  # Use a header file that comes with gcc, so configuring glibc
-  # with a fresh cross-compiler works.
-  # Prefer <limits.h> to <assert.h> if __STDC__ is defined, since
-  # <limits.h> exists even on freestanding compilers.
-  # On the NeXT, cc -E runs the code through the compiler's parser,
-  # not just through cpp. "Syntax error" is here to catch this case.
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#ifdef __STDC__
-# include <limits.h>
-#else
-# include <assert.h>
-#endif
-		     Syntax error
-_ACEOF
-if ac_fn_c_try_cpp "$LINENO"; then :
-
-else
-  # Broken: fails on valid input.
-continue
-fi
-rm -f conftest.err conftest.$ac_ext
-
-  # OK, works on sane cases.  Now check whether nonexistent headers
-  # can be detected and how.
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <ac_nonexistent.h>
-_ACEOF
-if ac_fn_c_try_cpp "$LINENO"; then :
-  # Broken: success on invalid input.
-continue
-else
-  # Passes both tests.
-ac_preproc_ok=:
-break
-fi
-rm -f conftest.err conftest.$ac_ext
-
-done
-# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
-rm -f conftest.err conftest.$ac_ext
-if $ac_preproc_ok; then :
-
-else
-  { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
-$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
-as_fn_error "C preprocessor \"$CPP\" fails sanity check
-See \`config.log' for more details." "$LINENO" 5; }
-fi
-
-ac_ext=c
-ac_cpp='$CPP $CPPFLAGS'
-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
-ac_compiler_gnu=$ac_cv_c_compiler_gnu
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5
-$as_echo_n "checking for grep that handles long lines and -e... " >&6; }
-if test "${ac_cv_path_GREP+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test -z "$GREP"; then
-  ac_path_GREP_found=false
-  # Loop through the user's path and test for each of PROGNAME-LIST
-  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    for ac_prog in grep ggrep; do
-    for ac_exec_ext in '' $ac_executable_extensions; do
-      ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext"
-      { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue
-# Check for GNU ac_path_GREP and select it if it is found.
-  # Check for GNU $ac_path_GREP
-case `"$ac_path_GREP" --version 2>&1` in
-*GNU*)
-  ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;;
-*)
-  ac_count=0
-  $as_echo_n 0123456789 >"conftest.in"
-  while :
-  do
-    cat "conftest.in" "conftest.in" >"conftest.tmp"
-    mv "conftest.tmp" "conftest.in"
-    cp "conftest.in" "conftest.nl"
-    $as_echo 'GREP' >> "conftest.nl"
-    "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break
-    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
-    as_fn_arith $ac_count + 1 && ac_count=$as_val
-    if test $ac_count -gt ${ac_path_GREP_max-0}; then
-      # Best one so far, save it but keep looking for a better one
-      ac_cv_path_GREP="$ac_path_GREP"
-      ac_path_GREP_max=$ac_count
-    fi
-    # 10*(2^10) chars as input seems more than enough
-    test $ac_count -gt 10 && break
-  done
-  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
-esac
-
-      $ac_path_GREP_found && break 3
-    done
-  done
-  done
-IFS=$as_save_IFS
-  if test -z "$ac_cv_path_GREP"; then
-    as_fn_error "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
-  fi
-else
-  ac_cv_path_GREP=$GREP
-fi
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5
-$as_echo "$ac_cv_path_GREP" >&6; }
- GREP="$ac_cv_path_GREP"
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5
-$as_echo_n "checking for egrep... " >&6; }
-if test "${ac_cv_path_EGREP+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if echo a | $GREP -E '(a|b)' >/dev/null 2>&1
-   then ac_cv_path_EGREP="$GREP -E"
-   else
-     if test -z "$EGREP"; then
-  ac_path_EGREP_found=false
-  # Loop through the user's path and test for each of PROGNAME-LIST
-  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    for ac_prog in egrep; do
-    for ac_exec_ext in '' $ac_executable_extensions; do
-      ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext"
-      { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue
-# Check for GNU ac_path_EGREP and select it if it is found.
-  # Check for GNU $ac_path_EGREP
-case `"$ac_path_EGREP" --version 2>&1` in
-*GNU*)
-  ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;;
-*)
-  ac_count=0
-  $as_echo_n 0123456789 >"conftest.in"
-  while :
-  do
-    cat "conftest.in" "conftest.in" >"conftest.tmp"
-    mv "conftest.tmp" "conftest.in"
-    cp "conftest.in" "conftest.nl"
-    $as_echo 'EGREP' >> "conftest.nl"
-    "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break
-    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
-    as_fn_arith $ac_count + 1 && ac_count=$as_val
-    if test $ac_count -gt ${ac_path_EGREP_max-0}; then
-      # Best one so far, save it but keep looking for a better one
-      ac_cv_path_EGREP="$ac_path_EGREP"
-      ac_path_EGREP_max=$ac_count
-    fi
-    # 10*(2^10) chars as input seems more than enough
-    test $ac_count -gt 10 && break
-  done
-  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
-esac
-
-      $ac_path_EGREP_found && break 3
-    done
-  done
-  done
-IFS=$as_save_IFS
-  if test -z "$ac_cv_path_EGREP"; then
-    as_fn_error "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
-  fi
-else
-  ac_cv_path_EGREP=$EGREP
-fi
-
-   fi
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5
-$as_echo "$ac_cv_path_EGREP" >&6; }
- EGREP="$ac_cv_path_EGREP"
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5
-$as_echo_n "checking for ANSI C header files... " >&6; }
-if test "${ac_cv_header_stdc+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <stdlib.h>
-#include <stdarg.h>
-#include <string.h>
-#include <float.h>
-
-int
-main ()
-{
-
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  ac_cv_header_stdc=yes
-else
-  ac_cv_header_stdc=no
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-
-if test $ac_cv_header_stdc = yes; then
-  # SunOS 4.x string.h does not declare mem*, contrary to ANSI.
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <string.h>
-
-_ACEOF
-if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
-  $EGREP "memchr" >/dev/null 2>&1; then :
-
-else
-  ac_cv_header_stdc=no
-fi
-rm -f conftest*
-
-fi
-
-if test $ac_cv_header_stdc = yes; then
-  # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI.
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <stdlib.h>
-
-_ACEOF
-if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
-  $EGREP "free" >/dev/null 2>&1; then :
-
-else
-  ac_cv_header_stdc=no
-fi
-rm -f conftest*
-
-fi
-
-if test $ac_cv_header_stdc = yes; then
-  # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi.
-  if test "$cross_compiling" = yes; then :
-  :
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <ctype.h>
-#include <stdlib.h>
-#if ((' ' & 0x0FF) == 0x020)
-# define ISLOWER(c) ('a' <= (c) && (c) <= 'z')
-# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c))
-#else
-# define ISLOWER(c) \
-		   (('a' <= (c) && (c) <= 'i') \
-		     || ('j' <= (c) && (c) <= 'r') \
-		     || ('s' <= (c) && (c) <= 'z'))
-# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c))
-#endif
-
-#define XOR(e, f) (((e) && !(f)) || (!(e) && (f)))
-int
-main ()
-{
-  int i;
-  for (i = 0; i < 256; i++)
-    if (XOR (islower (i), ISLOWER (i))
-	|| toupper (i) != TOUPPER (i))
-      return 2;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_run "$LINENO"; then :
-
-else
-  ac_cv_header_stdc=no
-fi
-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
-  conftest.$ac_objext conftest.beam conftest.$ac_ext
-fi
-
-fi
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5
-$as_echo "$ac_cv_header_stdc" >&6; }
-if test $ac_cv_header_stdc = yes; then
-
-$as_echo "#define STDC_HEADERS 1" >>confdefs.h
-
-fi
-
-# On IRIX 5.3, sys/types and inttypes.h are conflicting.
-for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \
-		  inttypes.h stdint.h unistd.h
-do :
-  as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
-ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default
-"
-eval as_val=\$$as_ac_Header
-   if test "x$as_val" = x""yes; then :
-  cat >>confdefs.h <<_ACEOF
-#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
-_ACEOF
-
-fi
-
-done
-
-
-for ac_header in stdlib.h string.h unistd.h fcntl.h
-do :
-  as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
-ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default"
-eval as_val=\$$as_ac_Header
-   if test "x$as_val" = x""yes; then :
-  cat >>confdefs.h <<_ACEOF
-#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
-_ACEOF
-
-fi
-
-done
-
-
-#check for HADOOP_PREFIX
-if test "$with_home" != ""
-then
-cat >>confdefs.h <<_ACEOF
-#define HADOOP_PREFIX "$with_home"
-_ACEOF
-
-fi
-
-# Checks for typedefs, structures, and compiler characteristics.
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for an ANSI C-conforming const" >&5
-$as_echo_n "checking for an ANSI C-conforming const... " >&6; }
-if test "${ac_cv_c_const+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-int
-main ()
-{
-/* FIXME: Include the comments suggested by Paul. */
-#ifndef __cplusplus
-  /* Ultrix mips cc rejects this.  */
-  typedef int charset[2];
-  const charset cs;
-  /* SunOS 4.1.1 cc rejects this.  */
-  char const *const *pcpcc;
-  char **ppc;
-  /* NEC SVR4.0.2 mips cc rejects this.  */
-  struct point {int x, y;};
-  static struct point const zero = {0,0};
-  /* AIX XL C 1.02.0.0 rejects this.
-     It does not let you subtract one const X* pointer from another in
-     an arm of an if-expression whose if-part is not a constant
-     expression */
-  const char *g = "string";
-  pcpcc = &g + (g ? g-g : 0);
-  /* HPUX 7.0 cc rejects these. */
-  ++pcpcc;
-  ppc = (char**) pcpcc;
-  pcpcc = (char const *const *) ppc;
-  { /* SCO 3.2v4 cc rejects this.  */
-    char *t;
-    char const *s = 0 ? (char *) 0 : (char const *) 0;
-
-    *t++ = 0;
-    if (s) return 0;
-  }
-  { /* Someone thinks the Sun supposedly-ANSI compiler will reject this.  */
-    int x[] = {25, 17};
-    const int *foo = &x[0];
-    ++foo;
-  }
-  { /* Sun SC1.0 ANSI compiler rejects this -- but not the above. */
-    typedef const int *iptr;
-    iptr p = 0;
-    ++p;
-  }
-  { /* AIX XL C 1.02.0.0 rejects this saying
-       "k.c", line 2.27: 1506-025 (S) Operand must be a modifiable lvalue. */
-    struct s { int j; const int *ap[3]; };
-    struct s *b; b->j = 5;
-  }
-  { /* ULTRIX-32 V3.1 (Rev 9) vcc rejects this */
-    const int foo = 10;
-    if (!foo) return 0;
-  }
-  return !cs[0] && !zero.x;
-#endif
-
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  ac_cv_c_const=yes
-else
-  ac_cv_c_const=no
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_const" >&5
-$as_echo "$ac_cv_c_const" >&6; }
-if test $ac_cv_c_const = no; then
-
-$as_echo "#define const /**/" >>confdefs.h
-
-fi
-
-ac_fn_c_check_type "$LINENO" "pid_t" "ac_cv_type_pid_t" "$ac_includes_default"
-if test "x$ac_cv_type_pid_t" = x""yes; then :
-
-else
-
-cat >>confdefs.h <<_ACEOF
-#define pid_t int
-_ACEOF
-
-fi
-
-ac_fn_c_check_type "$LINENO" "mode_t" "ac_cv_type_mode_t" "$ac_includes_default"
-if test "x$ac_cv_type_mode_t" = x""yes; then :
-
-else
-
-cat >>confdefs.h <<_ACEOF
-#define mode_t int
-_ACEOF
-
-fi
-
-ac_fn_c_check_type "$LINENO" "size_t" "ac_cv_type_size_t" "$ac_includes_default"
-if test "x$ac_cv_type_size_t" = x""yes; then :
-
-else
-
-cat >>confdefs.h <<_ACEOF
-#define size_t unsigned int
-_ACEOF
-
-fi
-
-
-# Checks for library functions.
-for ac_header in stdlib.h
-do :
-  ac_fn_c_check_header_mongrel "$LINENO" "stdlib.h" "ac_cv_header_stdlib_h" "$ac_includes_default"
-if test "x$ac_cv_header_stdlib_h" = x""yes; then :
-  cat >>confdefs.h <<_ACEOF
-#define HAVE_STDLIB_H 1
-_ACEOF
-
-fi
-
-done
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU libc compatible malloc" >&5
-$as_echo_n "checking for GNU libc compatible malloc... " >&6; }
-if test "${ac_cv_func_malloc_0_nonnull+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test "$cross_compiling" = yes; then :
-  ac_cv_func_malloc_0_nonnull=no
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#if defined STDC_HEADERS || defined HAVE_STDLIB_H
-# include <stdlib.h>
-#else
-char *malloc ();
-#endif
-
-int
-main ()
-{
-return ! malloc (0);
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_run "$LINENO"; then :
-  ac_cv_func_malloc_0_nonnull=yes
-else
-  ac_cv_func_malloc_0_nonnull=no
-fi
-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
-  conftest.$ac_objext conftest.beam conftest.$ac_ext
-fi
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_malloc_0_nonnull" >&5
-$as_echo "$ac_cv_func_malloc_0_nonnull" >&6; }
-if test $ac_cv_func_malloc_0_nonnull = yes; then :
-
-$as_echo "#define HAVE_MALLOC 1" >>confdefs.h
-
-else
-  $as_echo "#define HAVE_MALLOC 0" >>confdefs.h
-
-   case " $LIBOBJS " in
-  *" malloc.$ac_objext "* ) ;;
-  *) LIBOBJS="$LIBOBJS malloc.$ac_objext"
- ;;
-esac
-
-
-$as_echo "#define malloc rpl_malloc" >>confdefs.h
-
-fi
-
-
-for ac_header in stdlib.h
-do :
-  ac_fn_c_check_header_mongrel "$LINENO" "stdlib.h" "ac_cv_header_stdlib_h" "$ac_includes_default"
-if test "x$ac_cv_header_stdlib_h" = x""yes; then :
-  cat >>confdefs.h <<_ACEOF
-#define HAVE_STDLIB_H 1
-_ACEOF
-
-fi
-
-done
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU libc compatible realloc" >&5
-$as_echo_n "checking for GNU libc compatible realloc... " >&6; }
-if test "${ac_cv_func_realloc_0_nonnull+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test "$cross_compiling" = yes; then :
-  ac_cv_func_realloc_0_nonnull=no
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#if defined STDC_HEADERS || defined HAVE_STDLIB_H
-# include <stdlib.h>
-#else
-char *realloc ();
-#endif
-
-int
-main ()
-{
-return ! realloc (0, 0);
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_run "$LINENO"; then :
-  ac_cv_func_realloc_0_nonnull=yes
-else
-  ac_cv_func_realloc_0_nonnull=no
-fi
-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
-  conftest.$ac_objext conftest.beam conftest.$ac_ext
-fi
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_realloc_0_nonnull" >&5
-$as_echo "$ac_cv_func_realloc_0_nonnull" >&6; }
-if test $ac_cv_func_realloc_0_nonnull = yes; then :
-
-$as_echo "#define HAVE_REALLOC 1" >>confdefs.h
-
-else
-  $as_echo "#define HAVE_REALLOC 0" >>confdefs.h
-
-   case " $LIBOBJS " in
-  *" realloc.$ac_objext "* ) ;;
-  *) LIBOBJS="$LIBOBJS realloc.$ac_objext"
- ;;
-esac
-
-
-$as_echo "#define realloc rpl_realloc" >>confdefs.h
-
-fi
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for uid_t in sys/types.h" >&5
-$as_echo_n "checking for uid_t in sys/types.h... " >&6; }
-if test "${ac_cv_type_uid_t+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-#include <sys/types.h>
-
-_ACEOF
-if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
-  $EGREP "uid_t" >/dev/null 2>&1; then :
-  ac_cv_type_uid_t=yes
-else
-  ac_cv_type_uid_t=no
-fi
-rm -f conftest*
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_type_uid_t" >&5
-$as_echo "$ac_cv_type_uid_t" >&6; }
-if test $ac_cv_type_uid_t = no; then
-
-$as_echo "#define uid_t int" >>confdefs.h
-
-
-$as_echo "#define gid_t int" >>confdefs.h
-
-fi
-
-for ac_header in unistd.h
-do :
-  ac_fn_c_check_header_mongrel "$LINENO" "unistd.h" "ac_cv_header_unistd_h" "$ac_includes_default"
-if test "x$ac_cv_header_unistd_h" = x""yes; then :
-  cat >>confdefs.h <<_ACEOF
-#define HAVE_UNISTD_H 1
-_ACEOF
-
-fi
-
-done
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for working chown" >&5
-$as_echo_n "checking for working chown... " >&6; }
-if test "${ac_cv_func_chown_works+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  if test "$cross_compiling" = yes; then :
-  ac_cv_func_chown_works=no
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-$ac_includes_default
-#include <fcntl.h>
-
-int
-main ()
-{
-  char *f = "conftest.chown";
-  struct stat before, after;
-
-  if (creat (f, 0600) < 0)
-    return 1;
-  if (stat (f, &before) < 0)
-    return 1;
-  if (chown (f, (uid_t) -1, (gid_t) -1) == -1)
-    return 1;
-  if (stat (f, &after) < 0)
-    return 1;
-  return ! (before.st_uid == after.st_uid && before.st_gid == after.st_gid);
-
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_run "$LINENO"; then :
-  ac_cv_func_chown_works=yes
-else
-  ac_cv_func_chown_works=no
-fi
-rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
-  conftest.$ac_objext conftest.beam conftest.$ac_ext
-fi
-
-rm -f conftest.chown
-
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_chown_works" >&5
-$as_echo "$ac_cv_func_chown_works" >&6; }
-if test $ac_cv_func_chown_works = yes; then
-
-$as_echo "#define HAVE_CHOWN 1" >>confdefs.h
-
-fi
-
-for ac_func in strerror memset mkdir rmdir strdup
-do :
-  as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
-ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
-eval as_val=\$$as_ac_var
-   if test "x$as_val" = x""yes; then :
-  cat >>confdefs.h <<_ACEOF
-#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1
-_ACEOF
-
-fi
-done
-
-
-ac_config_files="$ac_config_files Makefile"
-
-cat >confcache <<\_ACEOF
-# This file is a shell script that caches the results of configure
-# tests run on this system so they can be shared between configure
-# scripts and configure runs, see configure's option --config-cache.
-# It is not useful on other systems.  If it contains results you don't
-# want to keep, you may remove or edit it.
-#
-# config.status only pays attention to the cache file if you give it
-# the --recheck option to rerun configure.
-#
-# `ac_cv_env_foo' variables (set or unset) will be overridden when
-# loading this file, other *unset* `ac_cv_foo' will be assigned the
-# following values.
-
-_ACEOF
-
-# The following way of writing the cache mishandles newlines in values,
-# but we know of no workaround that is simple, portable, and efficient.
-# So, we kill variables containing newlines.
-# Ultrix sh set writes to stderr and can't be redirected directly,
-# and sets the high bit in the cache file unless we assign to the vars.
-(
-  for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do
-    eval ac_val=\$$ac_var
-    case $ac_val in #(
-    *${as_nl}*)
-      case $ac_var in #(
-      *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
-$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
-      esac
-      case $ac_var in #(
-      _ | IFS | as_nl) ;; #(
-      BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
-      *) { eval $ac_var=; unset $ac_var;} ;;
-      esac ;;
-    esac
-  done
-
-  (set) 2>&1 |
-    case $as_nl`(ac_space=' '; set) 2>&1` in #(
-    *${as_nl}ac_space=\ *)
-      # `set' does not quote correctly, so add quotes: double-quote
-      # substitution turns \\\\ into \\, and sed turns \\ into \.
-      sed -n \
-	"s/'/'\\\\''/g;
-	  s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
-      ;; #(
-    *)
-      # `set' quotes correctly as required by POSIX, so do not add quotes.
-      sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
-      ;;
-    esac |
-    sort
-) |
-  sed '
-     /^ac_cv_env_/b end
-     t clear
-     :clear
-     s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/
-     t end
-     s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
-     :end' >>confcache
-if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
-  if test -w "$cache_file"; then
-    test "x$cache_file" != "x/dev/null" &&
-      { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5
-$as_echo "$as_me: updating cache $cache_file" >&6;}
-    cat confcache >$cache_file
-  else
-    { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5
-$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;}
-  fi
-fi
-rm -f confcache
-
-test "x$prefix" = xNONE && prefix=$ac_default_prefix
-# Let make expand exec_prefix.
-test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
-
-DEFS=-DHAVE_CONFIG_H
-
-ac_libobjs=
-ac_ltlibobjs=
-for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
-  # 1. Remove the extension, and $U if already installed.
-  ac_script='s/\$U\././;s/\.o$//;s/\.obj$//'
-  ac_i=`$as_echo "$ac_i" | sed "$ac_script"`
-  # 2. Prepend LIBOBJDIR.  When used with automake>=1.10 LIBOBJDIR
-  #    will be set to the directory where LIBOBJS objects are built.
-  as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext"
-  as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo'
-done
-LIBOBJS=$ac_libobjs
-
-LTLIBOBJS=$ac_ltlibobjs
-
-
-
-: ${CONFIG_STATUS=./config.status}
-ac_write_fail=0
-ac_clean_files_save=$ac_clean_files
-ac_clean_files="$ac_clean_files $CONFIG_STATUS"
-{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5
-$as_echo "$as_me: creating $CONFIG_STATUS" >&6;}
-as_write_fail=0
-cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1
-#! $SHELL
-# Generated by $as_me.
-# Run this file to recreate the current configuration.
-# Compiler output produced by configure, useful for debugging
-# configure, is in config.log if it exists.
-
-debug=false
-ac_cs_recheck=false
-ac_cs_silent=false
-
-SHELL=\${CONFIG_SHELL-$SHELL}
-export SHELL
-_ASEOF
-cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1
-## -------------------- ##
-## M4sh Initialization. ##
-## -------------------- ##
-
-# Be more Bourne compatible
-DUALCASE=1; export DUALCASE # for MKS sh
-if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
-  emulate sh
-  NULLCMD=:
-  # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
-  # is contrary to our usage.  Disable this feature.
-  alias -g '${1+"$@"}'='"$@"'
-  setopt NO_GLOB_SUBST
-else
-  case `(set -o) 2>/dev/null` in #(
-  *posix*) :
-    set -o posix ;; #(
-  *) :
-     ;;
-esac
-fi
-
-
-as_nl='
-'
-export as_nl
-# Printing a long string crashes Solaris 7 /usr/bin/printf.
-as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
-as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
-as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
-# Prefer a ksh shell builtin over an external printf program on Solaris,
-# but without wasting forks for bash or zsh.
-if test -z "$BASH_VERSION$ZSH_VERSION" \
-    && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
-  as_echo='print -r --'
-  as_echo_n='print -rn --'
-elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
-  as_echo='printf %s\n'
-  as_echo_n='printf %s'
-else
-  if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
-    as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
-    as_echo_n='/usr/ucb/echo -n'
-  else
-    as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
-    as_echo_n_body='eval
-      arg=$1;
-      case $arg in #(
-      *"$as_nl"*)
-	expr "X$arg" : "X\\(.*\\)$as_nl";
-	arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
-      esac;
-      expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
-    '
-    export as_echo_n_body
-    as_echo_n='sh -c $as_echo_n_body as_echo'
-  fi
-  export as_echo_body
-  as_echo='sh -c $as_echo_body as_echo'
-fi
-
-# The user is always right.
-if test "${PATH_SEPARATOR+set}" != set; then
-  PATH_SEPARATOR=:
-  (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
-    (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
-      PATH_SEPARATOR=';'
-  }
-fi
-
-
-# IFS
-# We need space, tab and new line, in precisely that order.  Quoting is
-# there to prevent editors from complaining about space-tab.
-# (If _AS_PATH_WALK were called with IFS unset, it would disable word
-# splitting by setting IFS to empty value.)
-IFS=" ""	$as_nl"
-
-# Find who we are.  Look in the path if we contain no directory separator.
-case $0 in #((
-  *[\\/]* ) as_myself=$0 ;;
-  *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
-for as_dir in $PATH
-do
-  IFS=$as_save_IFS
-  test -z "$as_dir" && as_dir=.
-    test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
-  done
-IFS=$as_save_IFS
-
-     ;;
-esac
-# We did not find ourselves, most probably we were run as `sh COMMAND'
-# in which case we are not to be found in the path.
-if test "x$as_myself" = x; then
-  as_myself=$0
-fi
-if test ! -f "$as_myself"; then
-  $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
-  exit 1
-fi
-
-# Unset variables that we do not need and which cause bugs (e.g. in
-# pre-3.0 UWIN ksh).  But do not cause bugs in bash 2.01; the "|| exit 1"
-# suppresses any "Segmentation fault" message there.  '((' could
-# trigger a bug in pdksh 5.2.14.
-for as_var in BASH_ENV ENV MAIL MAILPATH
-do eval test x\${$as_var+set} = xset \
-  && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
-done
-PS1='$ '
-PS2='> '
-PS4='+ '
-
-# NLS nuisances.
-LC_ALL=C
-export LC_ALL
-LANGUAGE=C
-export LANGUAGE
-
-# CDPATH.
-(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
-
-
-# as_fn_error ERROR [LINENO LOG_FD]
-# ---------------------------------
-# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
-# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
-# script with status $?, using 1 if that was 0.
-as_fn_error ()
-{
-  as_status=$?; test $as_status -eq 0 && as_status=1
-  if test "$3"; then
-    as_lineno=${as_lineno-"$2"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
-    $as_echo "$as_me:${as_lineno-$LINENO}: error: $1" >&$3
-  fi
-  $as_echo "$as_me: error: $1" >&2
-  as_fn_exit $as_status
-} # as_fn_error
-
-
-# as_fn_set_status STATUS
-# -----------------------
-# Set $? to STATUS, without forking.
-as_fn_set_status ()
-{
-  return $1
-} # as_fn_set_status
-
-# as_fn_exit STATUS
-# -----------------
-# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
-as_fn_exit ()
-{
-  set +e
-  as_fn_set_status $1
-  exit $1
-} # as_fn_exit
-
-# as_fn_unset VAR
-# ---------------
-# Portably unset VAR.
-as_fn_unset ()
-{
-  { eval $1=; unset $1;}
-}
-as_unset=as_fn_unset
-# as_fn_append VAR VALUE
-# ----------------------
-# Append the text in VALUE to the end of the definition contained in VAR. Take
-# advantage of any shell optimizations that allow amortized linear growth over
-# repeated appends, instead of the typical quadratic growth present in naive
-# implementations.
-if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
-  eval 'as_fn_append ()
-  {
-    eval $1+=\$2
-  }'
-else
-  as_fn_append ()
-  {
-    eval $1=\$$1\$2
-  }
-fi # as_fn_append
-
-# as_fn_arith ARG...
-# ------------------
-# Perform arithmetic evaluation on the ARGs, and store the result in the
-# global $as_val. Take advantage of shells that can avoid forks. The arguments
-# must be portable across $(()) and expr.
-if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
-  eval 'as_fn_arith ()
-  {
-    as_val=$(( $* ))
-  }'
-else
-  as_fn_arith ()
-  {
-    as_val=`expr "$@" || test $? -eq 1`
-  }
-fi # as_fn_arith
-
-
-if expr a : '\(a\)' >/dev/null 2>&1 &&
-   test "X`expr 00001 : '.*\(...\)'`" = X001; then
-  as_expr=expr
-else
-  as_expr=false
-fi
-
-if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
-  as_basename=basename
-else
-  as_basename=false
-fi
-
-if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
-  as_dirname=dirname
-else
-  as_dirname=false
-fi
-
-as_me=`$as_basename -- "$0" ||
-$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
-	 X"$0" : 'X\(//\)$' \| \
-	 X"$0" : 'X\(/\)' \| . 2>/dev/null ||
-$as_echo X/"$0" |
-    sed '/^.*\/\([^/][^/]*\)\/*$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\/\(\/\/\)$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\/\(\/\).*/{
-	    s//\1/
-	    q
-	  }
-	  s/.*/./; q'`
-
-# Avoid depending upon Character Ranges.
-as_cr_letters='abcdefghijklmnopqrstuvwxyz'
-as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
-as_cr_Letters=$as_cr_letters$as_cr_LETTERS
-as_cr_digits='0123456789'
-as_cr_alnum=$as_cr_Letters$as_cr_digits
-
-ECHO_C= ECHO_N= ECHO_T=
-case `echo -n x` in #(((((
--n*)
-  case `echo 'xy\c'` in
-  *c*) ECHO_T='	';;	# ECHO_T is single tab character.
-  xy)  ECHO_C='\c';;
-  *)   echo `echo ksh88 bug on AIX 6.1` > /dev/null
-       ECHO_T='	';;
-  esac;;
-*)
-  ECHO_N='-n';;
-esac
-
-rm -f conf$$ conf$$.exe conf$$.file
-if test -d conf$$.dir; then
-  rm -f conf$$.dir/conf$$.file
-else
-  rm -f conf$$.dir
-  mkdir conf$$.dir 2>/dev/null
-fi
-if (echo >conf$$.file) 2>/dev/null; then
-  if ln -s conf$$.file conf$$ 2>/dev/null; then
-    as_ln_s='ln -s'
-    # ... but there are two gotchas:
-    # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
-    # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
-    # In both cases, we have to default to `cp -p'.
-    ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
-      as_ln_s='cp -p'
-  elif ln conf$$.file conf$$ 2>/dev/null; then
-    as_ln_s=ln
-  else
-    as_ln_s='cp -p'
-  fi
-else
-  as_ln_s='cp -p'
-fi
-rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
-rmdir conf$$.dir 2>/dev/null
-
-
-# as_fn_mkdir_p
-# -------------
-# Create "$as_dir" as a directory, including parents if necessary.
-as_fn_mkdir_p ()
-{
-
-  case $as_dir in #(
-  -*) as_dir=./$as_dir;;
-  esac
-  test -d "$as_dir" || eval $as_mkdir_p || {
-    as_dirs=
-    while :; do
-      case $as_dir in #(
-      *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
-      *) as_qdir=$as_dir;;
-      esac
-      as_dirs="'$as_qdir' $as_dirs"
-      as_dir=`$as_dirname -- "$as_dir" ||
-$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
-	 X"$as_dir" : 'X\(//\)[^/]' \| \
-	 X"$as_dir" : 'X\(//\)$' \| \
-	 X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
-$as_echo X"$as_dir" |
-    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)[^/].*/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\).*/{
-	    s//\1/
-	    q
-	  }
-	  s/.*/./; q'`
-      test -d "$as_dir" && break
-    done
-    test -z "$as_dirs" || eval "mkdir $as_dirs"
-  } || test -d "$as_dir" || as_fn_error "cannot create directory $as_dir"
-
-
-} # as_fn_mkdir_p
-if mkdir -p . 2>/dev/null; then
-  as_mkdir_p='mkdir -p "$as_dir"'
-else
-  test -d ./-p && rmdir ./-p
-  as_mkdir_p=false
-fi
-
-if test -x / >/dev/null 2>&1; then
-  as_test_x='test -x'
-else
-  if ls -dL / >/dev/null 2>&1; then
-    as_ls_L_option=L
-  else
-    as_ls_L_option=
-  fi
-  as_test_x='
-    eval sh -c '\''
-      if test -d "$1"; then
-	test -d "$1/.";
-      else
-	case $1 in #(
-	-*)set "./$1";;
-	esac;
-	case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #((
-	???[sx]*):;;*)false;;esac;fi
-    '\'' sh
-  '
-fi
-as_executable_p=$as_test_x
-
-# Sed expression to map a string onto a valid CPP name.
-as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
-
-# Sed expression to map a string onto a valid variable name.
-as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
-
-
-exec 6>&1
-## ----------------------------------- ##
-## Main body of $CONFIG_STATUS script. ##
-## ----------------------------------- ##
-_ASEOF
-test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1
-
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-# Save the log message, to keep $0 and so on meaningful, and to
-# report actual input values of CONFIG_FILES etc. instead of their
-# values after options handling.
-ac_log="
-This file was extended by runAs $as_me 0.1, which was
-generated by GNU Autoconf 2.65.  Invocation command line was
-
-  CONFIG_FILES    = $CONFIG_FILES
-  CONFIG_HEADERS  = $CONFIG_HEADERS
-  CONFIG_LINKS    = $CONFIG_LINKS
-  CONFIG_COMMANDS = $CONFIG_COMMANDS
-  $ $0 $@
-
-on `(hostname || uname -n) 2>/dev/null | sed 1q`
-"
-
-_ACEOF
-
-case $ac_config_files in *"
-"*) set x $ac_config_files; shift; ac_config_files=$*;;
-esac
-
-case $ac_config_headers in *"
-"*) set x $ac_config_headers; shift; ac_config_headers=$*;;
-esac
-
-
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-# Files that config.status was made for.
-config_files="$ac_config_files"
-config_headers="$ac_config_headers"
-
-_ACEOF
-
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-ac_cs_usage="\
-\`$as_me' instantiates files and other configuration actions
-from templates according to the current configuration.  Unless the files
-and actions are specified as TAGs, all are instantiated by default.
-
-Usage: $0 [OPTION]... [TAG]...
-
-  -h, --help       print this help, then exit
-  -V, --version    print version number and configuration settings, then exit
-      --config     print configuration, then exit
-  -q, --quiet, --silent
-                   do not print progress messages
-  -d, --debug      don't remove temporary files
-      --recheck    update $as_me by reconfiguring in the same conditions
-      --file=FILE[:TEMPLATE]
-                   instantiate the configuration file FILE
-      --header=FILE[:TEMPLATE]
-                   instantiate the configuration header FILE
-
-Configuration files:
-$config_files
-
-Configuration headers:
-$config_headers
-
-Report bugs to the package provider."
-
-_ACEOF
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
-ac_cs_version="\\
-runAs config.status 0.1
-configured by $0, generated by GNU Autoconf 2.65,
-  with options \\"\$ac_cs_config\\"
-
-Copyright (C) 2009 Free Software Foundation, Inc.
-This config.status script is free software; the Free Software Foundation
-gives unlimited permission to copy, distribute and modify it."
-
-ac_pwd='$ac_pwd'
-srcdir='$srcdir'
-test -n "\$AWK" || AWK=awk
-_ACEOF
-
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-# The default lists apply if the user does not specify any file.
-ac_need_defaults=:
-while test $# != 0
-do
-  case $1 in
-  --*=*)
-    ac_option=`expr "X$1" : 'X\([^=]*\)='`
-    ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
-    ac_shift=:
-    ;;
-  *)
-    ac_option=$1
-    ac_optarg=$2
-    ac_shift=shift
-    ;;
-  esac
-
-  case $ac_option in
-  # Handling of the options.
-  -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
-    ac_cs_recheck=: ;;
-  --version | --versio | --versi | --vers | --ver | --ve | --v | -V )
-    $as_echo "$ac_cs_version"; exit ;;
-  --config | --confi | --conf | --con | --co | --c )
-    $as_echo "$ac_cs_config"; exit ;;
-  --debug | --debu | --deb | --de | --d | -d )
-    debug=: ;;
-  --file | --fil | --fi | --f )
-    $ac_shift
-    case $ac_optarg in
-    *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
-    esac
-    as_fn_append CONFIG_FILES " '$ac_optarg'"
-    ac_need_defaults=false;;
-  --header | --heade | --head | --hea )
-    $ac_shift
-    case $ac_optarg in
-    *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
-    esac
-    as_fn_append CONFIG_HEADERS " '$ac_optarg'"
-    ac_need_defaults=false;;
-  --he | --h)
-    # Conflict between --help and --header
-    as_fn_error "ambiguous option: \`$1'
-Try \`$0 --help' for more information.";;
-  --help | --hel | -h )
-    $as_echo "$ac_cs_usage"; exit ;;
-  -q | -quiet | --quiet | --quie | --qui | --qu | --q \
-  | -silent | --silent | --silen | --sile | --sil | --si | --s)
-    ac_cs_silent=: ;;
-
-  # This is an error.
-  -*) as_fn_error "unrecognized option: \`$1'
-Try \`$0 --help' for more information." ;;
-
-  *) as_fn_append ac_config_targets " $1"
-     ac_need_defaults=false ;;
-
-  esac
-  shift
-done
-
-ac_configure_extra_args=
-
-if $ac_cs_silent; then
-  exec 6>/dev/null
-  ac_configure_extra_args="$ac_configure_extra_args --silent"
-fi
-
-_ACEOF
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-if \$ac_cs_recheck; then
-  set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
-  shift
-  \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6
-  CONFIG_SHELL='$SHELL'
-  export CONFIG_SHELL
-  exec "\$@"
-fi
-
-_ACEOF
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-exec 5>>config.log
-{
-  echo
-  sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
-## Running $as_me. ##
-_ASBOX
-  $as_echo "$ac_log"
-} >&5
-
-_ACEOF
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-_ACEOF
-
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-
-# Handling of arguments.
-for ac_config_target in $ac_config_targets
-do
-  case $ac_config_target in
-    "runAs.h") CONFIG_HEADERS="$CONFIG_HEADERS runAs.h" ;;
-    "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
-
-  *) as_fn_error "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
-  esac
-done
-
-
-# If the user did not use the arguments to specify the items to instantiate,
-# then the envvar interface is used.  Set only those that are not.
-# We use the long form for the default assignment because of an extremely
-# bizarre bug on SunOS 4.1.3.
-if $ac_need_defaults; then
-  test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files
-  test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers
-fi
-
-# Have a temporary directory for convenience.  Make it in the build tree
-# simply because there is no reason against having it here, and in addition,
-# creating and moving files from /tmp can sometimes cause problems.
-# Hook for its removal unless debugging.
-# Note that there is a small window in which the directory will not be cleaned:
-# after its creation but before its name has been assigned to `$tmp'.
-$debug ||
-{
-  tmp=
-  trap 'exit_status=$?
-  { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status
-' 0
-  trap 'as_fn_exit 1' 1 2 13 15
-}
-# Create a (secure) tmp directory for tmp files.
-
-{
-  tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
-  test -n "$tmp" && test -d "$tmp"
-}  ||
-{
-  tmp=./conf$$-$RANDOM
-  (umask 077 && mkdir "$tmp")
-} || as_fn_error "cannot create a temporary directory in ." "$LINENO" 5
-
-# Set up the scripts for CONFIG_FILES section.
-# No need to generate them if there are no CONFIG_FILES.
-# This happens for instance with `./config.status config.h'.
-if test -n "$CONFIG_FILES"; then
-
-
-ac_cr=`echo X | tr X '\015'`
-# On cygwin, bash can eat \r inside `` if the user requested igncr.
-# But we know of no other shell where ac_cr would be empty at this
-# point, so we can use a bashism as a fallback.
-if test "x$ac_cr" = x; then
-  eval ac_cr=\$\'\\r\'
-fi
-ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' </dev/null 2>/dev/null`
-if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then
-  ac_cs_awk_cr='\r'
-else
-  ac_cs_awk_cr=$ac_cr
-fi
-
-echo 'BEGIN {' >"$tmp/subs1.awk" &&
-_ACEOF
-
-
-{
-  echo "cat >conf$$subs.awk <<_ACEOF" &&
-  echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' &&
-  echo "_ACEOF"
-} >conf$$subs.sh ||
-  as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5
-ac_delim_num=`echo "$ac_subst_vars" | grep -c '$'`
-ac_delim='%!_!# '
-for ac_last_try in false false false false false :; do
-  . ./conf$$subs.sh ||
-    as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5
-
-  ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X`
-  if test $ac_delim_n = $ac_delim_num; then
-    break
-  elif $ac_last_try; then
-    as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5
-  else
-    ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
-  fi
-done
-rm -f conf$$subs.sh
-
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-cat >>"\$tmp/subs1.awk" <<\\_ACAWK &&
-_ACEOF
-sed -n '
-h
-s/^/S["/; s/!.*/"]=/
-p
-g
-s/^[^!]*!//
-:repl
-t repl
-s/'"$ac_delim"'$//
-t delim
-:nl
-h
-s/\(.\{148\}\)..*/\1/
-t more1
-s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/
-p
-n
-b repl
-:more1
-s/["\\]/\\&/g; s/^/"/; s/$/"\\/
-p
-g
-s/.\{148\}//
-t nl
-:delim
-h
-s/\(.\{148\}\)..*/\1/
-t more2
-s/["\\]/\\&/g; s/^/"/; s/$/"/
-p
-b
-:more2
-s/["\\]/\\&/g; s/^/"/; s/$/"\\/
-p
-g
-s/.\{148\}//
-t delim
-' <conf$$subs.awk | sed '
-/^[^""]/{
-  N
-  s/\n//
-}
-' >>$CONFIG_STATUS || ac_write_fail=1
-rm -f conf$$subs.awk
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-_ACAWK
-cat >>"\$tmp/subs1.awk" <<_ACAWK &&
-  for (key in S) S_is_set[key] = 1
-  FS = ""
-
-}
-{
-  line = $ 0
-  nfields = split(line, field, "@")
-  substed = 0
-  len = length(field[1])
-  for (i = 2; i < nfields; i++) {
-    key = field[i]
-    keylen = length(key)
-    if (S_is_set[key]) {
-      value = S[key]
-      line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3)
-      len += length(value) + length(field[++i])
-      substed = 1
-    } else
-      len += 1 + keylen
-  }
-
-  print line
-}
-
-_ACAWK
-_ACEOF
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then
-  sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g"
-else
-  cat
-fi < "$tmp/subs1.awk" > "$tmp/subs.awk" \
-  || as_fn_error "could not setup config files machinery" "$LINENO" 5
-_ACEOF
-
-# VPATH may cause trouble with some makes, so we remove $(srcdir),
-# ${srcdir} and @srcdir@ from VPATH if srcdir is ".", strip leading and
-# trailing colons and then remove the whole line if VPATH becomes empty
-# (actually we leave an empty line to preserve line numbers).
-if test "x$srcdir" = x.; then
-  ac_vpsub='/^[	 ]*VPATH[	 ]*=/{
-s/:*\$(srcdir):*/:/
-s/:*\${srcdir}:*/:/
-s/:*@srcdir@:*/:/
-s/^\([^=]*=[	 ]*\):*/\1/
-s/:*$//
-s/^[^=]*=[	 ]*$//
-}'
-fi
-
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-fi # test -n "$CONFIG_FILES"
-
-# Set up the scripts for CONFIG_HEADERS section.
-# No need to generate them if there are no CONFIG_HEADERS.
-# This happens for instance with `./config.status Makefile'.
-if test -n "$CONFIG_HEADERS"; then
-cat >"$tmp/defines.awk" <<\_ACAWK ||
-BEGIN {
-_ACEOF
-
-# Transform confdefs.h into an awk script `defines.awk', embedded as
-# here-document in config.status, that substitutes the proper values into
-# config.h.in to produce config.h.
-
-# Create a delimiter string that does not exist in confdefs.h, to ease
-# handling of long lines.
-ac_delim='%!_!# '
-for ac_last_try in false false :; do
-  ac_t=`sed -n "/$ac_delim/p" confdefs.h`
-  if test -z "$ac_t"; then
-    break
-  elif $ac_last_try; then
-    as_fn_error "could not make $CONFIG_HEADERS" "$LINENO" 5
-  else
-    ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
-  fi
-done
-
-# For the awk script, D is an array of macro values keyed by name,
-# likewise P contains macro parameters if any.  Preserve backslash
-# newline sequences.
-
-ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]*
-sed -n '
-s/.\{148\}/&'"$ac_delim"'/g
-t rset
-:rset
-s/^[	 ]*#[	 ]*define[	 ][	 ]*/ /
-t def
-d
-:def
-s/\\$//
-t bsnl
-s/["\\]/\\&/g
-s/^ \('"$ac_word_re"'\)\(([^()]*)\)[	 ]*\(.*\)/P["\1"]="\2"\
-D["\1"]=" \3"/p
-s/^ \('"$ac_word_re"'\)[	 ]*\(.*\)/D["\1"]=" \2"/p
-d
-:bsnl
-s/["\\]/\\&/g
-s/^ \('"$ac_word_re"'\)\(([^()]*)\)[	 ]*\(.*\)/P["\1"]="\2"\
-D["\1"]=" \3\\\\\\n"\\/p
-t cont
-s/^ \('"$ac_word_re"'\)[	 ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p
-t cont
-d
-:cont
-n
-s/.\{148\}/&'"$ac_delim"'/g
-t clear
-:clear
-s/\\$//
-t bsnlc
-s/["\\]/\\&/g; s/^/"/; s/$/"/p
-d
-:bsnlc
-s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p
-b cont
-' <confdefs.h | sed '
-s/'"$ac_delim"'/"\\\
-"/g' >>$CONFIG_STATUS || ac_write_fail=1
-
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-  for (key in D) D_is_set[key] = 1
-  FS = ""
-}
-/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ {
-  line = \$ 0
-  split(line, arg, " ")
-  if (arg[1] == "#") {
-    defundef = arg[2]
-    mac1 = arg[3]
-  } else {
-    defundef = substr(arg[1], 2)
-    mac1 = arg[2]
-  }
-  split(mac1, mac2, "(") #)
-  macro = mac2[1]
-  prefix = substr(line, 1, index(line, defundef) - 1)
-  if (D_is_set[macro]) {
-    # Preserve the white space surrounding the "#".
-    print prefix "define", macro P[macro] D[macro]
-    next
-  } else {
-    # Replace #undef with comments.  This is necessary, for example,
-    # in the case of _POSIX_SOURCE, which is predefined and required
-    # on some systems where configure will not decide to define it.
-    if (defundef == "undef") {
-      print "/*", prefix defundef, macro, "*/"
-      next
-    }
-  }
-}
-{ print }
-_ACAWK
-_ACEOF
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-  as_fn_error "could not setup config headers machinery" "$LINENO" 5
-fi # test -n "$CONFIG_HEADERS"
-
-
-eval set X "  :F $CONFIG_FILES  :H $CONFIG_HEADERS    "
-shift
-for ac_tag
-do
-  case $ac_tag in
-  :[FHLC]) ac_mode=$ac_tag; continue;;
-  esac
-  case $ac_mode$ac_tag in
-  :[FHL]*:*);;
-  :L* | :C*:*) as_fn_error "invalid tag \`$ac_tag'" "$LINENO" 5;;
-  :[FH]-) ac_tag=-:-;;
-  :[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
-  esac
-  ac_save_IFS=$IFS
-  IFS=:
-  set x $ac_tag
-  IFS=$ac_save_IFS
-  shift
-  ac_file=$1
-  shift
-
-  case $ac_mode in
-  :L) ac_source=$1;;
-  :[FH])
-    ac_file_inputs=
-    for ac_f
-    do
-      case $ac_f in
-      -) ac_f="$tmp/stdin";;
-      *) # Look for the file first in the build tree, then in the source tree
-	 # (if the path is not absolute).  The absolute path cannot be DOS-style,
-	 # because $ac_f cannot contain `:'.
-	 test -f "$ac_f" ||
-	   case $ac_f in
-	   [\\/$]*) false;;
-	   *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
-	   esac ||
-	   as_fn_error "cannot find input file: \`$ac_f'" "$LINENO" 5;;
-      esac
-      case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac
-      as_fn_append ac_file_inputs " '$ac_f'"
-    done
-
-    # Let's still pretend it is `configure' which instantiates (i.e., don't
-    # use $as_me), people would be surprised to read:
-    #    /* config.h.  Generated by config.status.  */
-    configure_input='Generated from '`
-	  $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g'
-	`' by configure.'
-    if test x"$ac_file" != x-; then
-      configure_input="$ac_file.  $configure_input"
-      { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5
-$as_echo "$as_me: creating $ac_file" >&6;}
-    fi
-    # Neutralize special characters interpreted by sed in replacement strings.
-    case $configure_input in #(
-    *\&* | *\|* | *\\* )
-       ac_sed_conf_input=`$as_echo "$configure_input" |
-       sed 's/[\\\\&|]/\\\\&/g'`;; #(
-    *) ac_sed_conf_input=$configure_input;;
-    esac
-
-    case $ac_tag in
-    *:-:* | *:-) cat >"$tmp/stdin" \
-      || as_fn_error "could not create $ac_file" "$LINENO" 5 ;;
-    esac
-    ;;
-  esac
-
-  ac_dir=`$as_dirname -- "$ac_file" ||
-$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
-	 X"$ac_file" : 'X\(//\)[^/]' \| \
-	 X"$ac_file" : 'X\(//\)$' \| \
-	 X"$ac_file" : 'X\(/\)' \| . 2>/dev/null ||
-$as_echo X"$ac_file" |
-    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)[^/].*/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\/\)$/{
-	    s//\1/
-	    q
-	  }
-	  /^X\(\/\).*/{
-	    s//\1/
-	    q
-	  }
-	  s/.*/./; q'`
-  as_dir="$ac_dir"; as_fn_mkdir_p
-  ac_builddir=.
-
-case "$ac_dir" in
-.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
-*)
-  ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
-  # A ".." for each directory in $ac_dir_suffix.
-  ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
-  case $ac_top_builddir_sub in
-  "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
-  *)  ac_top_build_prefix=$ac_top_builddir_sub/ ;;
-  esac ;;
-esac
-ac_abs_top_builddir=$ac_pwd
-ac_abs_builddir=$ac_pwd$ac_dir_suffix
-# for backward compatibility:
-ac_top_builddir=$ac_top_build_prefix
-
-case $srcdir in
-  .)  # We are building in place.
-    ac_srcdir=.
-    ac_top_srcdir=$ac_top_builddir_sub
-    ac_abs_top_srcdir=$ac_pwd ;;
-  [\\/]* | ?:[\\/]* )  # Absolute name.
-    ac_srcdir=$srcdir$ac_dir_suffix;
-    ac_top_srcdir=$srcdir
-    ac_abs_top_srcdir=$srcdir ;;
-  *) # Relative name.
-    ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
-    ac_top_srcdir=$ac_top_build_prefix$srcdir
-    ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
-esac
-ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
-
-
-  case $ac_mode in
-  :F)
-  #
-  # CONFIG_FILE
-  #
-
-_ACEOF
-
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-# If the template does not know about datarootdir, expand it.
-# FIXME: This hack should be removed a few years after 2.60.
-ac_datarootdir_hack=; ac_datarootdir_seen=
-ac_sed_dataroot='
-/datarootdir/ {
-  p
-  q
-}
-/@datadir@/p
-/@docdir@/p
-/@infodir@/p
-/@localedir@/p
-/@mandir@/p'
-case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in
-*datarootdir*) ac_datarootdir_seen=yes;;
-*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*)
-  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
-$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
-_ACEOF
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-  ac_datarootdir_hack='
-  s&@datadir@&$datadir&g
-  s&@docdir@&$docdir&g
-  s&@infodir@&$infodir&g
-  s&@localedir@&$localedir&g
-  s&@mandir@&$mandir&g
-  s&\\\${datarootdir}&$datarootdir&g' ;;
-esac
-_ACEOF
-
-# Neutralize VPATH when `$srcdir' = `.'.
-# Shell code in configure.ac might set extrasub.
-# FIXME: do we really want to maintain this feature?
-cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
-ac_sed_extra="$ac_vpsub
-$extrasub
-_ACEOF
-cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
-:t
-/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
-s|@configure_input@|$ac_sed_conf_input|;t t
-s&@top_builddir@&$ac_top_builddir_sub&;t t
-s&@top_build_prefix@&$ac_top_build_prefix&;t t
-s&@srcdir@&$ac_srcdir&;t t
-s&@abs_srcdir@&$ac_abs_srcdir&;t t
-s&@top_srcdir@&$ac_top_srcdir&;t t
-s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t
-s&@builddir@&$ac_builddir&;t t
-s&@abs_builddir@&$ac_abs_builddir&;t t
-s&@abs_top_builddir@&$ac_abs_top_builddir&;t t
-$ac_datarootdir_hack
-"
-eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$tmp/subs.awk" >$tmp/out \
-  || as_fn_error "could not create $ac_file" "$LINENO" 5
-
-test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
-  { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } &&
-  { ac_out=`sed -n '/^[	 ]*datarootdir[	 ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } &&
-  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir'
-which seems to be undefined.  Please make sure it is defined." >&5
-$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
-which seems to be undefined.  Please make sure it is defined." >&2;}
-
-  rm -f "$tmp/stdin"
-  case $ac_file in
-  -) cat "$tmp/out" && rm -f "$tmp/out";;
-  *) rm -f "$ac_file" && mv "$tmp/out" "$ac_file";;
-  esac \
-  || as_fn_error "could not create $ac_file" "$LINENO" 5
- ;;
-  :H)
-  #
-  # CONFIG_HEADER
-  #
-  if test x"$ac_file" != x-; then
-    {
-      $as_echo "/* $configure_input  */" \
-      && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs"
-    } >"$tmp/config.h" \
-      || as_fn_error "could not create $ac_file" "$LINENO" 5
-    if diff "$ac_file" "$tmp/config.h" >/dev/null 2>&1; then
-      { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5
-$as_echo "$as_me: $ac_file is unchanged" >&6;}
-    else
-      rm -f "$ac_file"
-      mv "$tmp/config.h" "$ac_file" \
-	|| as_fn_error "could not create $ac_file" "$LINENO" 5
-    fi
-  else
-    $as_echo "/* $configure_input  */" \
-      && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" \
-      || as_fn_error "could not create -" "$LINENO" 5
-  fi
- ;;
-
-
-  esac
-
-done # for ac_tag
-
-
-as_fn_exit 0
-_ACEOF
-ac_clean_files=$ac_clean_files_save
-
-test $ac_write_fail = 0 ||
-  as_fn_error "write failure creating $CONFIG_STATUS" "$LINENO" 5
-
-
-# configure is writing to config.log, and then calls config.status.
-# config.status does its own redirection, appending to config.log.
-# Unfortunately, on DOS this fails, as config.log is still kept open
-# by configure, so config.status won't be able to write to it; its
-# output is simply discarded.  So we exec the FD to /dev/null,
-# effectively closing config.log, so it can be properly (re)opened and
-# appended to by config.status.  When coming back to configure, we
-# need to make the FD available again.
-if test "$no_create" != yes; then
-  ac_cs_success=:
-  ac_config_status_args=
-  test "$silent" = yes &&
-    ac_config_status_args="$ac_config_status_args --quiet"
-  exec 5>/dev/null
-  $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false
-  exec 5>>config.log
-  # Use ||, not &&, to avoid exiting from the if with $? = 1, which
-  # would make configure fail if this is the last instruction.
-  $ac_cs_success || as_fn_exit $?
-fi
-if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5
-$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;}
-fi
-
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for stdbool.h that conforms to C99" >&5
-$as_echo_n "checking for stdbool.h that conforms to C99... " >&6; }
-if test "${ac_cv_header_stdbool_h+set}" = set; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
-/* end confdefs.h.  */
-
-#include <stdbool.h>
-#ifndef bool
- "error: bool is not defined"
-#endif
-#ifndef false
- "error: false is not defined"
-#endif
-#if false
- "error: false is not 0"
-#endif
-#ifndef true
- "error: true is not defined"
-#endif
-#if true != 1
- "error: true is not 1"
-#endif
-#ifndef __bool_true_false_are_defined
- "error: __bool_true_false_are_defined is not defined"
-#endif
-
-	struct s { _Bool s: 1; _Bool t; } s;
-
-	char a[true == 1 ? 1 : -1];
-	char b[false == 0 ? 1 : -1];
-	char c[__bool_true_false_are_defined == 1 ? 1 : -1];
-	char d[(bool) 0.5 == true ? 1 : -1];
-	bool e = &s;
-	char f[(_Bool) 0.0 == false ? 1 : -1];
-	char g[true];
-	char h[sizeof (_Bool)];
-	char i[sizeof s.t];
-	enum { j = false, k = true, l = false * true, m = true * 256 };
-	/* The following fails for
-	   HP aC++/ANSI C B3910B A.05.55 [Dec 04 2003]. */
-	_Bool n[m];
-	char o[sizeof n == m * sizeof n[0] ? 1 : -1];
-	char p[-1 - (_Bool) 0 < 0 && -1 - (bool) 0 < 0 ? 1 : -1];
-#	if defined __xlc__ || defined __GNUC__
-	 /* Catch a bug in IBM AIX xlc compiler version 6.0.0.0
-	    reported by James Lemley on 2005-10-05; see
-	    http://lists.gnu.org/archive/html/bug-coreutils/2005-10/msg00086.html
-	    This test is not quite right, since xlc is allowed to
-	    reject this program, as the initializer for xlcbug is
-	    not one of the forms that C requires support for.
-	    However, doing the test right would require a runtime
-	    test, and that would make cross-compilation harder.
-	    Let us hope that IBM fixes the xlc bug, and also adds
-	    support for this kind of constant expression.  In the
-	    meantime, this test will reject xlc, which is OK, since
-	    our stdbool.h substitute should suffice.  We also test
-	    this with GCC, where it should work, to detect more
-	    quickly whether someone messes up the test in the
-	    future.  */
-	 char digs[] = "0123456789";
-	 int xlcbug = 1 / (&(digs + 5)[-2 + (bool) 1] == &digs[4] ? 1 : -1);
-#	endif
-	/* Catch a bug in an HP-UX C compiler.  See
-	   http://gcc.gnu.org/ml/gcc-patches/2003-12/msg02303.html
-	   http://lists.gnu.org/archive/html/bug-coreutils/2005-11/msg00161.html
-	 */
-	_Bool q = true;
-	_Bool *pq = &q;
-
-int
-main ()
-{
-
-	*pq |= q;
-	*pq |= ! q;
-	/* Refer to every declared value, to avoid compiler optimizations.  */
-	return (!a + !b + !c + !d + !e + !f + !g + !h + !i + !!j + !k + !!l
-		+ !m + !n + !o + !p + !q + !pq);
-
-  ;
-  return 0;
-}
-_ACEOF
-if ac_fn_c_try_compile "$LINENO"; then :
-  ac_cv_header_stdbool_h=yes
-else
-  ac_cv_header_stdbool_h=no
-fi
-rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
-fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdbool_h" >&5
-$as_echo "$ac_cv_header_stdbool_h" >&6; }
-ac_fn_c_check_type "$LINENO" "_Bool" "ac_cv_type__Bool" "$ac_includes_default"
-if test "x$ac_cv_type__Bool" = x""yes; then :
-
-cat >>confdefs.h <<_ACEOF
-#define HAVE__BOOL 1
-_ACEOF
-
-
-fi
-
-if test $ac_cv_header_stdbool_h = yes; then
-
-$as_echo "#define HAVE_STDBOOL_H 1" >>confdefs.h
-
-fi
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5
-$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; }
-set x ${MAKE-make}
-ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'`
-if { as_var=ac_cv_prog_make_${ac_make}_set; eval "test \"\${$as_var+set}\" = set"; }; then :
-  $as_echo_n "(cached) " >&6
-else
-  cat >conftest.make <<\_ACEOF
-SHELL = /bin/sh
-all:
-	@echo '@@@%%%=$(MAKE)=@@@%%%'
-_ACEOF
-# GNU make sometimes prints "make[1]: Entering...", which would confuse us.
-case `${MAKE-make} -f conftest.make 2>/dev/null` in
-  *@@@%%%=?*=@@@%%%*)
-    eval ac_cv_prog_make_${ac_make}_set=yes;;
-  *)
-    eval ac_cv_prog_make_${ac_make}_set=no;;
-esac
-rm -f conftest.make
-fi
-if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-  SET_MAKE=
-else
-  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-  SET_MAKE="MAKE=${MAKE-make}"
-fi
-

+ 0 - 65
hadoop-common-project/hadoop-common/src/test/system/c++/runAs/configure.ac

@@ -1,65 +0,0 @@
-#                                               -*- Autoconf -*-
-# Process this file with autoconf to produce a configure script.
-
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-AC_PREREQ(2.59)
-AC_INIT([runAs],[0.1])
-
-#changing default prefix value to empty string, so that binary does not
-#gets installed within system
-AC_PREFIX_DEFAULT(.)
-
-#add new arguments --with-home
-AC_ARG_WITH(home,[--with-home path to hadoop home dir])
-AC_CONFIG_SRCDIR([main.c])
-AC_CONFIG_HEADER([runAs.h])
-
-# Checks for programs.
-AC_PROG_CC
-
-# Checks for libraries.
-
-# Checks for header files.
-AC_HEADER_STDC
-AC_CHECK_HEADERS([stdlib.h string.h unistd.h fcntl.h])
-
-#check for HADOOP_PREFIX
-if test "$with_home" != ""
-then
-AC_DEFINE_UNQUOTED(HADOOP_PREFIX,"$with_home")
-fi
-
-# Checks for typedefs, structures, and compiler characteristics.
-AC_C_CONST
-AC_TYPE_PID_T
-AC_TYPE_MODE_T
-AC_TYPE_SIZE_T
-
-# Checks for library functions.
-AC_FUNC_MALLOC
-AC_FUNC_REALLOC
-AC_FUNC_CHOWN
-AC_CHECK_FUNCS([strerror memset mkdir rmdir strdup])
-
-AC_CONFIG_FILES([Makefile])
-AC_OUTPUT
-
-AC_HEADER_STDBOOL
-AC_PROG_MAKE_SET

+ 0 - 59
hadoop-common-project/hadoop-common/src/test/system/c++/runAs/main.c

@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "runAs.h"
-
-/**
- * The binary would be accepting the command of following format:
- * cluster-controller user hostname hadoop-daemon.sh-command
- */
-int main(int argc, char **argv) {
-  int errorcode;
-  char *user;
-  char *hostname;
-  char *command;
-  struct passwd user_detail;
-  int i = 1;
-  /*
-   * Minimum number of arguments required for the binary to perform.
-   */
-  if (argc < 4) {
-    fprintf(stderr, "Invalid number of arguments passed to the binary\n");
-    return INVALID_ARGUMENT_NUMER;
-  }
-
-  user = argv[1];
-  if (user == NULL) {
-    fprintf(stderr, "Invalid user name\n");
-    return INVALID_USER_NAME;
-  }
-
-  if (getuserdetail(user, &user_detail) != 0) {
-    fprintf(stderr, "Invalid user name\n");
-    return INVALID_USER_NAME;
-  }
-
-  if (user_detail.pw_gid == 0 || user_detail.pw_uid == 0) {
-      fprintf(stderr, "Cannot run tasks as super user\n");
-      return SUPER_USER_NOT_ALLOWED_TO_RUN_COMMANDS;
-  }
-
-  hostname = argv[2];
-  command = argv[3];
-  return process_controller_command(user, hostname, command);
-}

+ 0 - 111
hadoop-common-project/hadoop-common/src/test/system/c++/runAs/runAs.c

@@ -1,111 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "runAs.h"
-
-/*
- * Function to get the user details populated given a user name. 
- */
-int getuserdetail(char *user, struct passwd *user_detail) {
-  struct passwd *tempPwdPtr;
-  int size = sysconf(_SC_GETPW_R_SIZE_MAX);
-  char pwdbuffer[size];
-  if ((getpwnam_r(user, user_detail, pwdbuffer, size, &tempPwdPtr)) != 0) {
-    fprintf(stderr, "Invalid user provided to getpwnam\n");
-    return -1;
-  }
-  return 0;
-}
-
-/**
- * Function to switch the user identity and set the appropriate 
- * group control as the user specified in the argument.
- */
-int switchuser(char *user) {
-  //populate the user details
-  struct passwd user_detail;
-  if ((getuserdetail(user, &user_detail)) != 0) {
-    return INVALID_USER_NAME;
-  }
-  //set the right supplementary groups for the user.
-  if (initgroups(user_detail.pw_name, user_detail.pw_gid) != 0) {
-    fprintf(stderr, "Init groups call for the user : %s failed\n",
-        user_detail.pw_name);
-    return INITGROUPS_FAILED;
-  }
-  errno = 0;
-  //switch the group.
-  setgid(user_detail.pw_gid);
-  if (errno != 0) {
-    fprintf(stderr, "Setgid for the user : %s failed\n", user_detail.pw_name);
-    return SETUID_OPER_FAILED;
-  }
-  errno = 0;
-  //swith the user
-  setuid(user_detail.pw_uid);
-  if (errno != 0) {
-    fprintf(stderr, "Setuid for the user : %s failed\n", user_detail.pw_name);
-    return SETUID_OPER_FAILED;
-  }
-  errno = 0;
-  //set the effective user id.
-  seteuid(user_detail.pw_uid);
-  if (errno != 0) {
-    fprintf(stderr, "Seteuid for the user : %s failed\n", user_detail.pw_name);
-    return SETUID_OPER_FAILED;
-  }
-  return 0;
-}
-
-/*
- * Top level method which processes a cluster management
- * command.
- */
-int process_cluster_command(char * user,  char * node , char *command) {
-  char *finalcommandstr;
-  int len;
-  int errorcode = 0;
-  if (strncmp(command, "", strlen(command)) == 0) {
-    fprintf(stderr, "Invalid command passed\n");
-    return INVALID_COMMAND_PASSED;
-  }
-  len = STRLEN + strlen(command);
-  finalcommandstr = (char *) malloc((len + 1) * sizeof(char));
-  snprintf(finalcommandstr, len, SCRIPT_DIR_PATTERN, HADOOP_PREFIX,
-      command);
-  finalcommandstr[len + 1] = '\0';
-  errorcode = switchuser(user);
-  if (errorcode != 0) {
-    fprintf(stderr, "switch user failed\n");
-    return errorcode;
-  }
-  errno = 0;
-  execlp(SSH_COMMAND, SSH_COMMAND, node, finalcommandstr, NULL);
-  if (errno != 0) {
-    fprintf(stderr, "Excelp failed dude to : %s\n", strerror(errno));
-  }
-  return 0;
-}
-
-/*
- * Process cluster controller command the API exposed to the 
- * main in order to execute the cluster commands.
- */
-int process_controller_command(char *user, char * node, char *command) {
-  return process_cluster_command(user, node, command);
-}

+ 0 - 59
hadoop-common-project/hadoop-common/src/test/system/c++/runAs/runAs.h.in

@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdarg.h>
-#include <string.h>
-#include <errno.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <pwd.h>
-#include <assert.h>
-#include <getopt.h>
-#include <grp.h>
-
-/*
-* List of possible error codes.
-*/
-enum errorcodes {
-  INVALID_ARGUMENT_NUMER = 1,
-  INVALID_USER_NAME, //2
-  SUPER_USER_NOT_ALLOWED_TO_RUN_COMMANDS, //3
-  INITGROUPS_FAILED, //4
-  SETUID_OPER_FAILED, //5
-  INVALID_COMMAND_PASSED, //6
-};
-
-#undef HADOOP_PREFIX
-
-#define SSH_COMMAND "ssh"
-
-#define SCRIPT_DIR_PATTERN "%s/bin/hadoop-daemon.sh %s" //%s to be substituded 
-
-#define STRLEN strlen(SCRIPT_DIR_PATTERN) + strlen(HADOOP_PREFIX)
-
-/*
- * Function to get the user details populated given a user name. 
- */
-int getuserdetails(char *user, struct passwd *user_detail);
-
- /*
- * Process cluster controller command the API exposed to the 
- * main in order to execute the cluster commands.
- */
-int process_controller_command(char *user, char *node, char *command);

+ 0 - 68
hadoop-common-project/hadoop-common/src/test/system/conf/hadoop-policy-system-test.xml

@@ -1,68 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<configuration>
-<!--
-  This is Herriot specific protocols. This section shouldn't be present in
-  a production cluster configuration. This file needs to be linked up to the
-  main conf/hadoop-policy.xml in the deployment process
--->
-  <property>
-    <name>security.daemon.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DaemonProtocol, extended by all other
-    Herriot RPC protocols.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.nn.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NNProtocol, used by the
-    Herriot AbstractDaemonCluster's implementations to connect to a remote
-    NameNode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.dn.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DNProtocol, used by the
-    Herriot AbstractDaemonCluster's implementations to connect to a remote
-    DataNode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.tt.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TTProtocol, used by the
-    Herriot AbstractDaemonCluster's implementations to connect to a remote
-    TaskTracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-</configuration>

+ 0 - 599
hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonClient.java

@@ -1,599 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system;
-
-import java.io.IOException;
-import java.util.*;
-
-import org.junit.Assert;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.test.system.process.RemoteProcess;
-
-import javax.management.*;
-import javax.management.remote.JMXConnector;
-import javax.management.remote.JMXConnectorFactory;
-import javax.management.remote.JMXServiceURL;
-
-/**
- * Abstract class which encapsulates the DaemonClient which is used in the 
- * system tests.<br/>
- * 
- * @param PROXY the proxy implementation of a specific Daemon 
- */
-public abstract class AbstractDaemonClient<PROXY extends DaemonProtocol> {
-  private Configuration conf;
-  private Boolean jmxEnabled = null;
-  private MBeanServerConnection connection;
-  private int jmxPortNumber = -1;
-  private RemoteProcess process;
-  private boolean connected;
-
-  private static final Log LOG = LogFactory.getLog(AbstractDaemonClient.class);
-  private static final String HADOOP_JMX_DOMAIN = "Hadoop";
-  private static final String HADOOP_OPTS_ENV = "HADOOP_OPTS";
-
-  /**
-   * Create a Daemon client.<br/>
-   * 
-   * @param conf client to be used by proxy to connect to Daemon.
-   * @param process the Daemon process to manage the particular daemon.
-   * 
-   * @throws IOException on RPC error
-   */
-  public AbstractDaemonClient(Configuration conf, RemoteProcess process) 
-      throws IOException {
-    this.conf = conf;
-    this.process = process;
-  }
-
-  /**
-   * Gets if the client is connected to the Daemon <br/>
-   * 
-   * @return true if connected.
-   */
-  public boolean isConnected() {
-    return connected;
-  }
-
-  protected void setConnected(boolean connected) {
-    this.connected = connected;
-  }
-
-  /**
-   * Create an RPC proxy to the daemon <br/>
-   * 
-   * @throws IOException on RPC error
-   */
-  public abstract void connect() throws IOException;
-
-  /**
-   * Disconnect the underlying RPC proxy to the daemon.<br/>
-   * @throws IOException in case of communication errors
-   */
-  public abstract void disconnect() throws IOException;
-
-  /**
-   * Get the proxy to connect to a particular service Daemon.<br/>
-   * 
-   * @return proxy to connect to a particular service Daemon.
-   */
-  protected abstract PROXY getProxy();
-
-  /**
-   * Gets the daemon level configuration.<br/>
-   * 
-   * @return configuration using which daemon is running
-   */
-  public Configuration getConf() {
-    return conf;
-  }
-
-  /**
-   * Gets the host on which Daemon is currently running. <br/>
-   * 
-   * @return hostname
-   */
-  public String getHostName() {
-    return process.getHostName();
-  }
-
-  /**
-   * Gets if the Daemon is ready to accept RPC connections. <br/>
-   * 
-   * @return true if daemon is ready.
-   * @throws IOException on RPC error
-   */
-  public boolean isReady() throws IOException {
-    return getProxy().isReady();
-  }
-
-  /**
-   * Kills the Daemon process <br/>
-   * @throws IOException on RPC error
-   */
-  public void kill() throws IOException {
-    process.kill();
-  }
-
-  /**
-   * Checks if the Daemon process is alive or not <br/>
-   * @throws IOException on RPC error
-   */
-  public void ping() throws IOException {
-    getProxy().ping();
-  }
-
-  /**
-   * Start up the Daemon process. <br/>
-   * @throws IOException on RPC error
-   */
-  public void start() throws IOException {
-    process.start();
-  }
-
-  /**
-   * Get system level view of the Daemon process.
-   * 
-   * @return returns system level view of the Daemon process.
-   * 
-   * @throws IOException on RPC error. 
-   */
-  public ProcessInfo getProcessInfo() throws IOException {
-    return getProxy().getProcessInfo();
-  }
-
-  /**
-   * Abstract method to retrieve the name of a daemon specific env. var
-   * @return name of Hadoop environment variable containing a daemon options
-   */
-  abstract public String getHadoopOptsEnvName ();
-
-  /**
-   * Checks remote daemon process info to see if certain JMX sys. properties
-   * are available and reckon if the JMX service is enabled on the remote side
-   *
-   * @return <code>boolean</code> code indicating availability of remote JMX
-   * @throws IOException is throws in case of communication errors
-   */
-  public boolean isJmxEnabled() throws IOException {
-    return isJmxEnabled(HADOOP_OPTS_ENV) ||
-        isJmxEnabled(getHadoopOptsEnvName());
-  }
-
-  /**
-   * Checks remote daemon process info to see if certain JMX sys. properties
-   * are available and reckon if the JMX service is enabled on the remote side
-   *
-   * @param envivar name of an evironment variable to be searched
-   * @return <code>boolean</code> code indicating availability of remote JMX
-   * @throws IOException is throws in case of communication errors
-   */
-  protected boolean isJmxEnabled(String envivar) throws IOException {
-    if (jmxEnabled != null) return jmxEnabled;
-    boolean ret = false;
-    String jmxRemoteString = "-Dcom.sun.management.jmxremote";
-    String hadoopOpts = getProcessInfo().getEnv().get(envivar);
-    LOG.debug("Looking into " + hadoopOpts + " from " + envivar);
-    List<String> options = Arrays.asList(hadoopOpts.split(" "));
-    ret = options.contains(jmxRemoteString);
-    jmxEnabled = ret;
-    return ret;
-  }
-
-  /**
-   * Checks remote daemon process info to find remote JMX server port number
-   * By default this method will look into "HADOOP_OPTS" variable only.
-   * @return number of remote JMX server or -1 if it can't be found
-   * @throws IOException is throws in case of communication errors
-   * @throws IllegalArgumentException if non-integer port is set
-   *  in the remote process info
-   */
-  public int getJmxPortNumber() throws IOException, IllegalArgumentException {
-    int portNo = getJmxPortNumber(HADOOP_OPTS_ENV);
-    return portNo != -1 ? portNo : getJmxPortNumber(getHadoopOptsEnvName());
-  }
-
-  /**
-   * Checks remote daemon process info to find remote JMX server port number
-   *
-   * @param envivar name of the env. var. to look for JMX specific settings
-   * @return number of remote JMX server or -1 if it can't be found
-   * @throws IOException is throws in case of communication errors
-   * @throws IllegalArgumentException if non-integer port is set
-   *  in the remote process info
-   */
-  protected int getJmxPortNumber(final String envivar) throws
-      IOException, IllegalArgumentException {
-    if (jmxPortNumber != -1) return jmxPortNumber;
-    String jmxPortString = "-Dcom.sun.management.jmxremote.port";
-
-    String hadoopOpts = getProcessInfo().getEnv().get(envivar);
-    int portNumber = -1;
-    boolean found = false;
-    String[] options = hadoopOpts.split(" ");
-     for (String option : options) {
-       if (option.startsWith(jmxPortString)) {
-         found = true;
-         try {
-           portNumber = Integer.parseInt(option.split("=")[1]);
-         } catch (NumberFormatException e) {
-           throw new IllegalArgumentException("JMX port number isn't integer");
-         }
-         break;
-       }
-     }
-     if (!found)
-       throw new IllegalArgumentException("Can't detect JMX port number");
-    jmxPortNumber = portNumber;
-    return jmxPortNumber;
-  }
-
-  /**
-   * Return a file status object that represents the path.
-   * @param path
-   *          given path
-   * @param local
-   *          whether the path is local or not
-   * @return a FileStatus object
-   * @throws IOException see specific implementation
-   */
-  public FileStatus getFileStatus(String path, boolean local) throws IOException {
-    return getProxy().getFileStatus(path, local);
-  }
-
-  /**
-   * Create a file with full permissions in a file system.
-   * @param path - source path where the file has to create.
-   * @param fileName - file name
-   * @param local - identifying the path whether its local or not.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public void createFile(String path, String fileName, 
-      boolean local) throws IOException {
-    getProxy().createFile(path, fileName, null, local);
-  }
-
-  /**
-   * Create a file with given permissions in a file system.
-   * @param path - source path where the file has to create.
-   * @param fileName - file name.
-   * @param permission - file permissions.
-   * @param local - identifying the path whether its local or not.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public void createFile(String path, String fileName, 
-     FsPermission permission,  boolean local) throws IOException {
-    getProxy().createFile(path, fileName, permission, local);
-  }
-
-  /**
-   * Create a folder with default permissions in a file system.
-   * @param path - source path where the file has to be creating.
-   * @param folderName - folder name.
-   * @param local - identifying the path whether its local or not.
-   * @throws IOException - if an I/O error occurs. 
-   */
-  public void createFolder(String path, String folderName, 
-     boolean local) throws IOException {
-    getProxy().createFolder(path, folderName, null, local);
-  }
-
-  /**
-   * Create a folder with given permissions in a file system.
-   * @param path - source path where the file has to be creating.
-   * @param folderName - folder name.
-   * @param permission - folder permissions.
-   * @param local - identifying the path whether its local or not.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public void createFolder(String path, String folderName, 
-     FsPermission permission,  boolean local) throws IOException {
-    getProxy().createFolder(path, folderName, permission, local);
-  }
-
-  /**
-   * List the statuses of the files/directories in the given path if the path is
-   * a directory.
-   * 
-   * @param path
-   *          given path
-   * @param local
-   *          whether the path is local or not
-   * @return the statuses of the files/directories in the given patch
-   * @throws IOException on RPC error. 
-   */
-  public FileStatus[] listStatus(String path, boolean local) 
-    throws IOException {
-    return getProxy().listStatus(path, local);
-  }
-
-  /**
-   * List the statuses of the files/directories in the given path if the path is
-   * a directory recursive/nonrecursively depending on parameters
-   * 
-   * @param path
-   *          given path
-   * @param local
-   *          whether the path is local or not
-   * @param recursive 
-   *          whether to recursively get the status
-   * @return the statuses of the files/directories in the given patch
-   * @throws IOException is thrown on RPC error. 
-   */
-  public FileStatus[] listStatus(String path, boolean local, boolean recursive)
-    throws IOException {
-    List<FileStatus> status = new ArrayList<FileStatus>();
-    addStatus(status, path, local, recursive);
-    return status.toArray(new FileStatus[0]);
-  }
-
-  private void addStatus(List<FileStatus> status, String f, 
-      boolean local, boolean recursive) 
-    throws IOException {
-    FileStatus[] fs = listStatus(f, local);
-    if (fs != null) {
-      for (FileStatus fileStatus : fs) {
-        if (!f.equals(fileStatus.getPath().toString())) {
-          status.add(fileStatus);
-          if (recursive) {
-            addStatus(status, fileStatus.getPath().toString(), local, recursive);
-          }
-        }
-      }
-    }
-  }
-
-  /**
-   * Gets number of times FATAL log messages where logged in Daemon logs. 
-   * <br/>
-   * Pattern used for searching is FATAL. <br/>
-   * @param excludeExpList list of exception to exclude 
-   * @return number of occurrence of fatal message.
-   * @throws IOException in case of communication errors
-   */
-  public int getNumberOfFatalStatementsInLog(String [] excludeExpList)
-      throws IOException {
-    DaemonProtocol proxy = getProxy();
-    String pattern = "FATAL";
-    return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
-  }
-
-  /**
-   * Gets number of times ERROR log messages where logged in Daemon logs. 
-   * <br/>
-   * Pattern used for searching is ERROR. <br/>
-   * @param excludeExpList list of exception to exclude 
-   * @return number of occurrence of error message.
-   * @throws IOException is thrown on RPC error. 
-   */
-  public int getNumberOfErrorStatementsInLog(String[] excludeExpList) 
-      throws IOException {
-    DaemonProtocol proxy = getProxy();
-    String pattern = "ERROR";    
-    return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
-  }
-
-  /**
-   * Gets number of times Warning log messages where logged in Daemon logs. 
-   * <br/>
-   * Pattern used for searching is WARN. <br/>
-   * @param excludeExpList list of exception to exclude 
-   * @return number of occurrence of warning message.
-   * @throws IOException thrown on RPC error. 
-   */
-  public int getNumberOfWarnStatementsInLog(String[] excludeExpList) 
-      throws IOException {
-    DaemonProtocol proxy = getProxy();
-    String pattern = "WARN";
-    return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
-  }
-
-  /**
-   * Gets number of time given Exception were present in log file. <br/>
-   * 
-   * @param e exception class.
-   * @param excludeExpList list of exceptions to exclude. 
-   * @return number of exceptions in log
-   * @throws IOException is thrown on RPC error. 
-   */
-  public int getNumberOfExceptionsInLog(Exception e,
-      String[] excludeExpList) throws IOException {
-    DaemonProtocol proxy = getProxy();
-    String pattern = e.getClass().getSimpleName();    
-    return proxy.getNumberOfMatchesInLogFile(pattern, excludeExpList);
-  }
-
-  /**
-   * Number of times ConcurrentModificationException present in log file. 
-   * <br/>
-   * @param excludeExpList list of exceptions to exclude.
-   * @return number of times exception in log file.
-   * @throws IOException is thrown on RPC error. 
-   */
-  public int getNumberOfConcurrentModificationExceptionsInLog(
-      String[] excludeExpList) throws IOException {
-    return getNumberOfExceptionsInLog(new ConcurrentModificationException(),
-        excludeExpList);
-  }
-
-  private int errorCount;
-  private int fatalCount;
-  private int concurrentExceptionCount;
-
-  /**
-   * Populate the initial exception counts to be used to assert once a testcase
-   * is done there was no exception in the daemon when testcase was run.
-   * @param excludeExpList list of exceptions to exclude
-   * @throws IOException is thrown on RPC error. 
-   */
-  protected void populateExceptionCount(String [] excludeExpList) 
-      throws IOException {
-    errorCount = getNumberOfErrorStatementsInLog(excludeExpList);
-    LOG.info("Number of error messages in logs : " + errorCount);
-    fatalCount = getNumberOfFatalStatementsInLog(excludeExpList);
-    LOG.info("Number of fatal statement in logs : " + fatalCount);
-    concurrentExceptionCount =
-        getNumberOfConcurrentModificationExceptionsInLog(excludeExpList);
-    LOG.info("Number of concurrent modification in logs : "
-        + concurrentExceptionCount);
-  }
-
-  /**
-   * Assert if the new exceptions were logged into the log file.
-   * <br/>
-   * <b><i>
-   * Pre-req for the method is that populateExceptionCount() has 
-   * to be called before calling this method.</b></i>
-   * @param excludeExpList list of exceptions to exclude
-   * @throws IOException is thrown on RPC error. 
-   */
-  protected void assertNoExceptionsOccurred(String [] excludeExpList) 
-      throws IOException {
-    int newerrorCount = getNumberOfErrorStatementsInLog(excludeExpList);
-    LOG.info("Number of error messages while asserting :" + newerrorCount);
-    int newfatalCount = getNumberOfFatalStatementsInLog(excludeExpList);
-    LOG.info("Number of fatal messages while asserting : " + newfatalCount);
-    int newconcurrentExceptionCount =
-        getNumberOfConcurrentModificationExceptionsInLog(excludeExpList);
-    LOG.info("Number of concurrentmodification exception while asserting :"
-        + newconcurrentExceptionCount);
-    Assert.assertEquals(
-        "New Error Messages logged in the log file", errorCount, newerrorCount);
-    Assert.assertEquals(
-        "New Fatal messages logged in the log file", fatalCount, newfatalCount);
-    Assert.assertEquals(
-        "New ConcurrentModificationException in log file",
-        concurrentExceptionCount, newconcurrentExceptionCount);
-  }
-
-  /**
-   * Builds correct name of JMX object name from given domain, service name, type
-   * @param domain JMX domain name
-   * @param serviceName of the service where MBean is registered (NameNode)
-   * @param typeName of the MXBean class
-   * @return ObjectName for requested MXBean of <code>null</code> if one wasn't
-   *    found
-   * @throws java.io.IOException in if object name is malformed
-   */
-  protected ObjectName getJmxBeanName(String domain, String serviceName,
-                                      String typeName) throws IOException {
-    if (domain == null)
-      domain = HADOOP_JMX_DOMAIN;
-
-    ObjectName jmxBean;
-    try {
-      jmxBean = new ObjectName(domain + ":service=" + serviceName +
-        ",name=" + typeName);
-    } catch (MalformedObjectNameException e) {
-      LOG.debug(e.getStackTrace());
-      throw new IOException(e);
-    }
-    return jmxBean;
-  }
-
-  /**
-   * Create connection with the remote JMX server at given host and port
-   * @param host name of the remote JMX server host
-   * @param port port number of the remote JXM server host
-   * @return instance of MBeanServerConnection or <code>null</code> if one
-   *    hasn't been established
-   * @throws IOException in case of comminication errors
-   */
-  protected MBeanServerConnection establishJmxConnection(String host, int port)
-    throws IOException {
-    if (connection != null) return connection;
-    String urlPattern = null;
-    try {
-      urlPattern = "service:jmx:rmi:///jndi/rmi://" +
-        host + ":" + port +
-        "/jmxrmi";
-      JMXServiceURL url = new JMXServiceURL(urlPattern);
-      JMXConnector connector = JMXConnectorFactory.connect(url, null);
-      connection = connector.getMBeanServerConnection();
-    } catch (java.net.MalformedURLException badURLExc) {
-      LOG.debug("bad url: " + urlPattern, badURLExc);
-      throw new IOException(badURLExc);
-    }
-    return connection;
-  }
-
-  Hashtable<String, ObjectName> jmxObjectNames =
-    new Hashtable<String, ObjectName>();
-
-  /**
-   * Method implements all logic for receiving a bean's attribute.
-   * If any initializations such as establishing bean server connections, etc.
-   * are need it will do it.
-   * @param serviceName name of the service where MBean is registered (NameNode)
-   * @param type name of the MXBean class
-   * @param attributeName name of the attribute to be retrieved
-   * @return Object value of the attribute or <code>null</code> if not found
-   * @throws IOException is thrown in case of any errors
-   */
-  protected Object getJmxAttribute (String serviceName,
-                                    String type,
-                                    String attributeName)
-    throws IOException {
-    Object retAttribute = null;
-    String domain = null;
-    if (isJmxEnabled()) {
-      try {
-        MBeanServerConnection conn =
-          establishJmxConnection(getHostName(),
-              getJmxPortNumber(HADOOP_OPTS_ENV));
-        for (String d : conn.getDomains()) {
-          if (d != null && d.startsWith(HADOOP_JMX_DOMAIN))
-            domain = d;
-        }
-        if (!jmxObjectNames.containsKey(type))
-          jmxObjectNames.put(type, getJmxBeanName(domain, serviceName, type));
-        retAttribute =
-          conn.getAttribute(jmxObjectNames.get(type), attributeName);
-      } catch (MBeanException e) {
-        LOG.debug(e.getStackTrace());
-        throw new IOException(e);
-      } catch (AttributeNotFoundException e) {
-        LOG.warn(e.getStackTrace());
-        throw new IOException(e);
-      } catch (InstanceNotFoundException e) {
-        LOG.warn(e.getStackTrace());
-        throw new IOException(e);
-      } catch (ReflectionException e) {
-        LOG.debug(e.getStackTrace());
-        throw new IOException(e);
-      }
-    }
-    return retAttribute;
-  }
-
-  /**
-   * This method has to be implemented by appropriate concrete daemon client
-   * e.g. DNClient, NNClient, etc.
-   * Concrete implementation has to provide names of the service and bean type
-   * @param attributeName name of the attribute to be retrieved
-   * @return Object value of the given attribute
-   * @throws IOException is thrown in case of communication errors
-   */
-  public abstract Object getDaemonAttribute (String attributeName)
-    throws IOException;
-}

+ 0 - 537
hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/AbstractDaemonCluster.java

@@ -1,537 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system;
-
-import java.io.IOException;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.FileInputStream;
-import java.io.DataInputStream;
-import java.util.ArrayList;
-import java.util.LinkedHashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.Iterator;
-import java.util.Enumeration;
-import java.util.Arrays;
-import java.util.Hashtable;
-import java.net.URI;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.test.system.process.ClusterProcessManager;
-import org.apache.hadoop.test.system.process.RemoteProcess;
-
-/**
- * Abstract class which represent the cluster having multiple daemons.
- */
-@SuppressWarnings("unchecked")
-public abstract class AbstractDaemonCluster {
-
-  private static final Log LOG = LogFactory.getLog(AbstractDaemonCluster.class);
-  private String [] excludeExpList ;
-  private Configuration conf;
-  protected ClusterProcessManager clusterManager;
-  private Map<Enum<?>, List<AbstractDaemonClient>> daemons = 
-    new LinkedHashMap<Enum<?>, List<AbstractDaemonClient>>();
-  private String newConfDir = null;  
-  private static final  String CONF_HADOOP_LOCAL_DIR =
-      "test.system.hdrc.hadoop.local.confdir"; 
-  private final static Object waitLock = new Object();
-  
-  /**
-   * Constructor to create a cluster client.<br/>
-   * 
-   * @param conf
-   *          Configuration to be used while constructing the cluster.
-   * @param rcluster
-   *          process manger instance to be used for managing the daemons.
-   * 
-   * @throws IOException
-   */
-  public AbstractDaemonCluster(Configuration conf,
-      ClusterProcessManager rcluster) throws IOException {
-    this.conf = conf;
-    this.clusterManager = rcluster;
-    createAllClients();
-  }
-
-  /**
-   * The method returns the cluster manager. The system test cases require an
-   * instance of HadoopDaemonRemoteCluster to invoke certain operation on the
-   * daemon.
-   * 
-   * @return instance of clusterManager
-   */
-  public ClusterProcessManager getClusterManager() {
-    return clusterManager;
-  }
-
-  protected void createAllClients() throws IOException {
-    for (RemoteProcess p : clusterManager.getAllProcesses()) {
-      List<AbstractDaemonClient> dms = daemons.get(p.getRole());
-      if (dms == null) {
-        dms = new ArrayList<AbstractDaemonClient>();
-        daemons.put(p.getRole(), dms);
-      }
-      dms.add(createClient(p));
-    }
-  }
-  
-  /**
-   * Method to create the daemon client.<br/>
-   * 
-   * @param process
-   *          to manage the daemon.
-   * @return instance of the daemon client
-   * 
-   * @throws IOException
-   */
-  protected abstract AbstractDaemonClient<DaemonProtocol> 
-    createClient(RemoteProcess process) throws IOException;
-
-  /**
-   * Get the global cluster configuration which was used to create the 
-   * cluster. <br/>
-   * 
-   * @return global configuration of the cluster.
-   */
-  public Configuration getConf() {
-    return conf;
-  }
-
-  /**
-   *
-
-  /**
-   * Return the client handle of all the Daemons.<br/>
-   * 
-   * @return map of role to daemon clients' list.
-   */
-  public Map<Enum<?>, List<AbstractDaemonClient>> getDaemons() {
-    return daemons;
-  }
-
-  /**
-   * Checks if the cluster is ready for testing. <br/>
-   * Algorithm for checking is as follows : <br/>
-   * <ul>
-   * <li> Wait for Daemon to come up </li>
-   * <li> Check if daemon is ready </li>
-   * <li> If one of the daemon is not ready, return false </li>
-   * </ul> 
-   * 
-   * @return true if whole cluster is ready.
-   * 
-   * @throws IOException
-   */
-  public boolean isReady() throws IOException {
-    for (List<AbstractDaemonClient> set : daemons.values()) {
-      for (AbstractDaemonClient daemon : set) {
-        waitForDaemon(daemon);
-        if (!daemon.isReady()) {
-          return false;
-        }
-      }
-    }
-    return true;
-  }
-
-  protected void waitForDaemon(AbstractDaemonClient d) {
-    final int TEN_SEC = 10000;
-    while(true) {
-      try {
-        LOG.info("Waiting for daemon at " + d.getHostName() + " to come up.");
-        LOG.info("Daemon might not be " +
-            "ready or the call to setReady() method hasn't been " +
-            "injected to " + d.getClass() + " ");
-        d.connect();
-        break;
-      } catch (IOException e) {
-        try {
-          Thread.sleep(TEN_SEC);
-        } catch (InterruptedException ie) {
-        }
-      }
-    }
-  }
-
-  /**
-   * Starts the cluster daemons.
-   * @throws IOException
-   */
-  public void start() throws IOException {
-    clusterManager.start();
-  }
-
-  /**
-   * Stops the cluster daemons.
-   * @throws IOException
-   */
-  public void stop() throws IOException {
-    clusterManager.stop();
-  }
-
-  /**
-   * Connect to daemon RPC ports.
-   * @throws IOException
-   */
-  public void connect() throws IOException {
-    for (List<AbstractDaemonClient> set : daemons.values()) {
-      for (AbstractDaemonClient daemon : set) {
-        daemon.connect();
-      }
-    }
-  }
-
-  /**
-   * Disconnect to daemon RPC ports.
-   * @throws IOException
-   */
-  public void disconnect() throws IOException {
-    for (List<AbstractDaemonClient> set : daemons.values()) {
-      for (AbstractDaemonClient daemon : set) {
-        daemon.disconnect();
-      }
-    }
-  }
-
-  /**
-   * Ping all the daemons of the cluster.
-   * @throws IOException
-   */
-  public void ping() throws IOException {
-    for (List<AbstractDaemonClient> set : daemons.values()) {
-      for (AbstractDaemonClient daemon : set) {
-        LOG.info("Daemon is : " + daemon.getHostName() + " pinging....");
-        daemon.ping();
-      }
-    }
-  }
-
-  /**
-   * Connect to the cluster and ensure that it is clean to run tests.
-   * @throws Exception
-   */
-  public void setUp() throws Exception {
-    while (!isReady()) {
-      Thread.sleep(1000);
-    }
-    connect();
-    ping();
-    clearAllControlActions();
-    ensureClean();
-    populateExceptionCounts();
-  }
-  
-  /**
-   * This is mainly used for the test cases to set the list of exceptions
-   * that will be excluded.
-   * @param excludeExpList list of exceptions to exclude
-   */
-  public void setExcludeExpList(String [] excludeExpList) {
-    this.excludeExpList = excludeExpList;
-  }
-  
-  public void clearAllControlActions() throws IOException {
-    for (List<AbstractDaemonClient> set : daemons.values()) {
-      for (AbstractDaemonClient daemon : set) {
-        LOG.info("Daemon is : " + daemon.getHostName() + " pinging....");
-        daemon.getProxy().clearActions();
-      }
-    }
-  }
-
-  /**
-   * Ensure that the cluster is clean to run tests.
-   * @throws IOException
-   */
-  public void ensureClean() throws IOException {
-  }
-
-  /**
-   * Ensure that cluster is clean. Disconnect from the RPC ports of the daemons.
-   * @throws IOException
-   */
-  public void tearDown() throws IOException {
-    ensureClean();
-    clearAllControlActions();
-    assertNoExceptionMessages();
-    disconnect();
-  }
-
-  /**
-   * Populate the exception counts in all the daemons so that it can be checked when 
-   * the testcase has finished running.<br/>
-   * @throws IOException
-   */
-  protected void populateExceptionCounts() throws IOException {
-    for(List<AbstractDaemonClient> lst : daemons.values()) {
-      for(AbstractDaemonClient d : lst) {
-        d.populateExceptionCount(excludeExpList);
-      }
-    }
-  }
-
-  /**
-   * Assert no exception has been thrown during the sequence of the actions.
-   * <br/>
-   * @throws IOException
-   */
-  protected void assertNoExceptionMessages() throws IOException {
-    for(List<AbstractDaemonClient> lst : daemons.values()) {
-      for(AbstractDaemonClient d : lst) {
-        d.assertNoExceptionsOccurred(excludeExpList);
-      }
-    }
-  }
-
-  /**
-   * Get the proxy user definitions from cluster from configuration.
-   * @return ProxyUserDefinitions - proxy users data like groups and hosts.
-   * @throws Exception - if no proxy users found in config.
-   */
-  public ProxyUserDefinitions getHadoopProxyUsers() throws
-     Exception {
-    Iterator itr = conf.iterator();
-    ArrayList<String> proxyUsers = new ArrayList<String>();
-    while (itr.hasNext()) {
-      if (itr.next().toString().indexOf("hadoop.proxyuser") >= 0 &&
-          itr.next().toString().indexOf("groups=") >= 0) {
-         proxyUsers.add(itr.next().toString().split("\\.")[2]);
-      }
-    }
-    if (proxyUsers.size() == 0) {
-       LOG.error("No proxy users found in the configuration.");
-       throw new Exception("No proxy users found in the configuration.");
-    }
-
-    ProxyUserDefinitions pud = new ProxyUserDefinitions() {
-      @Override
-      public boolean writeToFile(URI filePath) throws IOException {
-        throw new UnsupportedOperationException("No such method exists.");
-      };
-    };
-
-    for (String userName : proxyUsers) {
-       List<String> groups = Arrays.asList(conf.get("hadoop.proxyuser." +
-           userName + ".groups").split("//,"));
-       List<String> hosts = Arrays.asList(conf.get("hadoop.proxyuser." +
-           userName + ".hosts").split("//,"));
-       ProxyUserDefinitions.GroupsAndHost definitions =
-           pud.new GroupsAndHost();
-       definitions.setGroups(groups);
-       definitions.setHosts(hosts);
-       pud.addProxyUser(userName, definitions);
-    }
-    return pud;
-  }
-  
-  /**
-   * It's a local folder where the config file stores temporarily
-   * while serializing the object.
-   * @return String temporary local folder path for configuration.
-   */
-  private String getHadoopLocalConfDir() {
-    String hadoopLocalConfDir = conf.get(CONF_HADOOP_LOCAL_DIR);
-    if (hadoopLocalConfDir == null || hadoopLocalConfDir.isEmpty()) {
-      LOG.error("No configuration "
-          + "for the CONF_HADOOP_LOCAL_DIR passed");
-      throw new IllegalArgumentException(
-          "No Configuration passed for hadoop conf local directory");
-    }
-    return hadoopLocalConfDir;
-  }
-
-  /**
-   * It uses to restart the cluster with new configuration at runtime.<br/>
-   * @param props attributes for new configuration.
-   * @param configFile configuration file.
-   * @throws IOException if an I/O error occurs.
-   */
-  public void restartClusterWithNewConfig(Hashtable<String,?> props, 
-      String configFile) throws IOException {
-
-    String mapredConf = null;
-    String localDirPath = null;
-    File localFolderObj = null;
-    File xmlFileObj = null;
-    String confXMLFile = null;
-    Configuration initConf = new Configuration(getConf());
-    Enumeration<String> e = props.keys();
-    while (e.hasMoreElements()) {
-      String propKey = e.nextElement();
-      Object propValue = props.get(propKey);
-      initConf.set(propKey,propValue.toString());
-    }
-
-    localDirPath = getHadoopLocalConfDir();
-    localFolderObj = new File(localDirPath);
-    if (!localFolderObj.exists()) {
-      localFolderObj.mkdir();
-    }
-    confXMLFile = localDirPath + File.separator + configFile;
-    xmlFileObj = new File(confXMLFile);
-    initConf.writeXml(new FileOutputStream(xmlFileObj));
-    newConfDir = clusterManager.pushConfig(localDirPath);
-    stop();
-    waitForClusterToStop();
-    clusterManager.start(newConfDir);
-    waitForClusterToStart();
-    localFolderObj.delete();
-  }
-  
-  /**
-   * It uses to restart the cluster with default configuration.<br/>
-   * @throws IOException if an I/O error occurs.
-   */
-  public void restart() throws 
-      IOException {
-    stop();
-    waitForClusterToStop();
-    start();
-    waitForClusterToStart();
-    cleanupNewConf(newConfDir);
-  }
-
-  /**
-   * It uses to delete the new configuration folder.
-   * @param path - configuration directory path.
-   * @throws IOException if an I/O error occurs.
-   */
-  public void cleanupNewConf(String path) throws IOException {
-    File file = new File(path);
-    file.delete();
-  }
-  
-  /**
-   * It uses to wait until the cluster is stopped.<br/>
-   * @throws IOException if an I/O error occurs.
-   */
-  public void waitForClusterToStop() throws 
-      IOException {
-    List<Thread> chkDaemonStop = new ArrayList<Thread>();
-    for (List<AbstractDaemonClient> set : daemons.values()) {	  
-      for (AbstractDaemonClient daemon : set) {
-        DaemonStopThread dmStop = new DaemonStopThread(daemon);
-        chkDaemonStop.add(dmStop);
-        dmStop.start();
-      }
-    }
-
-    for (Thread daemonThread : chkDaemonStop){
-      try {
-        daemonThread.join();
-      } catch(InterruptedException intExp) {
-         LOG.warn("Interrupted while thread is joining." + intExp.getMessage());
-      }
-    }
-  }
- 
-  /**
-   * It uses to wait until the cluster is started.<br/>
-   * @throws IOException if an I/O error occurs.
-   */
-  public void  waitForClusterToStart() throws 
-      IOException {
-    List<Thread> chkDaemonStart = new ArrayList<Thread>();
-    for (List<AbstractDaemonClient> set : daemons.values()) {
-      for (AbstractDaemonClient daemon : set) {
-        DaemonStartThread dmStart = new DaemonStartThread(daemon);
-        chkDaemonStart.add(dmStart);;
-        dmStart.start();
-      }
-    }
-
-    for (Thread daemonThread : chkDaemonStart){
-      try {
-        daemonThread.join();
-      } catch(InterruptedException intExp) {
-        LOG.warn("Interrupted while thread is joining" + intExp.getMessage());
-      }
-    }
-  }
-
-  /**
-   * It waits for specified amount of time.
-   * @param duration time in milliseconds.
-   * @throws InterruptedException if any thread interrupted the current
-   * thread while it is waiting for a notification.
-   */
-  public void waitFor(long duration) {
-    try {
-      synchronized (waitLock) {
-        waitLock.wait(duration);
-      }
-    } catch (InterruptedException intExp) {
-       LOG.warn("Interrrupeted while thread is waiting" + intExp.getMessage());
-    }
-  }
-  
-  class DaemonStartThread extends Thread {
-    private AbstractDaemonClient daemon;
-
-    public DaemonStartThread(AbstractDaemonClient daemon) {
-      this.daemon = daemon;
-    }
-
-    public void run(){
-      LOG.info("Waiting for Daemon " + daemon.getHostName() 
-          + " to come up.....");
-      while (true) { 
-        try {
-          daemon.ping();
-          LOG.info("Daemon is : " + daemon.getHostName() + " pinging...");
-          break;
-        } catch (Exception exp) {
-          if(LOG.isDebugEnabled()) {
-            LOG.debug(daemon.getHostName() + " is waiting to come up.");
-          }
-          waitFor(60000);
-        }
-      }
-    }
-  }
-  
-  class DaemonStopThread extends Thread {
-    private AbstractDaemonClient daemon;
-
-    public DaemonStopThread(AbstractDaemonClient daemon) {
-      this.daemon = daemon;
-    }
-
-    public void run() {
-      LOG.info("Waiting for Daemon " + daemon.getHostName() 
-          + " to stop.....");
-      while (true) {
-        try {
-          daemon.ping();
-          if(LOG.isDebugEnabled()) {
-            LOG.debug(daemon.getHostName() +" is waiting state to stop.");
-          }
-          waitFor(60000);
-        } catch (Exception exp) {
-          LOG.info("Daemon is : " + daemon.getHostName() + " stopped...");
-          break;
-        } 
-      }
-    }
-  }
-}
-

+ 0 - 86
hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ControlAction.java

@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Writable;
-
-/**
- * Class to represent a control action which can be performed on Daemon.<br/>
- * 
- */
-
-public abstract class ControlAction<T extends Writable> implements Writable {
-
-  private T target;
-
-  /**
-   * Default constructor of the Control Action, sets the Action type to zero. <br/>
-   */
-  public ControlAction() {
-  }
-
-  /**
-   * Constructor which sets the type of the Control action to a specific type. <br/>
-   * 
-   * @param target
-   *          of the control action.
-   */
-  public ControlAction(T target) {
-    this.target = target;
-  }
-
-  /**
-   * Gets the id of the control action <br/>
-   * 
-   * @return target of action
-   */
-  public T getTarget() {
-    return target;
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    target.readFields(in);
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    target.write(out);
-  }
-
-  @Override
-  public boolean equals(Object obj) {
-    if (obj instanceof ControlAction) {
-      ControlAction<T> other = (ControlAction<T>) obj;
-      return (this.target.equals(other.getTarget()));
-    } else {
-      return false;
-    }
-  }
-  
-  
-  @Override
-  public String toString() {
-    return "Action Target : " + this.target;
-  }
-}

+ 0 - 204
hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/DaemonProtocol.java

@@ -1,204 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.ipc.VersionedProtocol;
-import org.apache.hadoop.fs.permission.FsPermission;
-
-/**
- * RPC interface of a given Daemon.
- */
-public interface DaemonProtocol extends VersionedProtocol{
-  long versionID = 1L;
-
-  /**
-   * Returns the Daemon configuration.
-   * @return Configuration
-   * @throws IOException in case of errors
-   */
-  Configuration getDaemonConf() throws IOException;
-
-  /**
-   * Check if the Daemon is alive.
-   * 
-   * @throws IOException
-   *           if Daemon is unreachable.
-   */
-  void ping() throws IOException;
-
-  /**
-   * Check if the Daemon is ready to accept RPC connections.
-   * 
-   * @return true if Daemon is ready to accept RPC connection.
-   * @throws IOException in case of errors
-   */
-  boolean isReady() throws IOException;
-
-  /**
-   * Get system level view of the Daemon process.
-   * 
-   * @return returns system level view of the Daemon process.
-   * 
-   * @throws IOException in case of errors
-   */
-  ProcessInfo getProcessInfo() throws IOException;
-  
-  /**
-   * Return a file status object that represents the path.
-   * @param path
-   *          given path
-   * @param local
-   *          whether the path is local or not
-   * @return a FileStatus object
-   * @throws FileNotFoundException when the path does not exist;
-   *         IOException see specific implementation
-   */
-  FileStatus getFileStatus(String path, boolean local) throws IOException;
-
-  /**
-   * Create a file with given permissions in a file system.
-   * @param path - source path where the file has to create.
-   * @param fileName - file name.
-   * @param permission - file permissions.
-   * @param local - identifying the path whether its local or not.
-   * @throws IOException - if an I/O error occurs.
-   */
-  void createFile(String path, String fileName, 
-      FsPermission permission, boolean local) throws IOException;
-   
-  /**
-   * Create a folder with given permissions in a file system.
-   * @param path - source path where the file has to be creating.
-   * @param folderName - folder name.
-   * @param permission - folder permissions.
-   * @param local - identifying the path whether its local or not.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public void createFolder(String path, String folderName, 
-      FsPermission permission, boolean local) throws IOException;
-  /**
-   * List the statuses of the files/directories in the given path if the path is
-   * a directory.
-   * 
-   * @param path
-   *          given path
-   * @param local
-   *          whether the path is local or not
-   * @return the statuses of the files/directories in the given patch
-   * @throws IOException in case of errors
-   */
-  FileStatus[] listStatus(String path, boolean local) throws IOException;
-  
-  /**
-   * Enables a particular control action to be performed on the Daemon <br/>
-   * 
-   * @param action is a control action  to be enabled.
-   * 
-   * @throws IOException in case of errors
-   */
-  @SuppressWarnings("unchecked")
-  void sendAction(ControlAction action) throws IOException;
-  
-  /**
-   * Checks if the particular control action has be delivered to the Daemon 
-   * component <br/>
-   * 
-   * @param action to be checked.
-   * 
-   * @return true if action is still in waiting queue of 
-   *          actions to be delivered.
-   * @throws IOException in case of errors
-   */
-  @SuppressWarnings("unchecked")
-  boolean isActionPending(ControlAction action) throws IOException;
-  
-  /**
-   * Removes a particular control action from the list of the actions which the
-   * daemon maintains. <br/>
-   * <i><b>Not to be directly called by Test Case or clients.</b></i>
-   * @param action to be removed
-   * @throws IOException in case of errors
-   */
-  
-  @SuppressWarnings("unchecked")
-  void removeAction(ControlAction action) throws IOException;
-  
-  /**
-   * Clears out the list of control actions on the particular daemon.
-   * <br/>
-   * @throws IOException in case of errors
-   */
-  void clearActions() throws IOException;
-  
-  /**
-   * Gets a list of pending actions which are targeted on the specified key. 
-   * <br/>
-   * <i><b>Not to be directly used by clients</b></i>
-   * @param key target
-   * @return list of actions.
-   * @throws IOException in case of errors
-   */
-  @SuppressWarnings("unchecked")
-  ControlAction[] getActions(Writable key) throws IOException;
-
-  /**
-   * Gets the number of times a particular pattern has been found in the 
-   * daemons log file.<br/>
-   * <b><i>Please note that search spans across all previous messages of
-   * Daemon, so better practice is to get previous counts before an operation
-   * and then re-check if the sequence of action has caused any problems</i></b>
-   * @param pattern to look for in the damon's log file
-   * @param list of exceptions to ignore
-   * @return number of times the pattern if found in log file.
-   * @throws IOException in case of errors
-   */
-  int getNumberOfMatchesInLogFile(String pattern, String[] list) 
-      throws IOException;
-
-  /**
-   * Gets the user who started the particular daemon initially. <br/>
-   * 
-   * @return user who started the particular daemon.
-   * @throws IOException in case of errors
-   */
-  String getDaemonUser() throws IOException;
-
-  /**
-   * It uses for suspending the process.
-   * @param pid process id.
-   * @return true if the process is suspended otherwise false.
-   * @throws IOException if an I/O error occurs.
-   */
-  boolean suspendProcess(String pid) throws IOException;
-
-  /**
-   * It uses for resuming the suspended process.
-   * @param pid process id
-   * @return true if suspended process is resumed otherwise false.
-   * @throws IOException if an I/O error occurs.
-   */
-  boolean resumeProcess(String pid) throws IOException;
-}

+ 0 - 77
hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProcessInfo.java

@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system;
-
-import java.util.Map;
-
-import org.apache.hadoop.io.Writable;
-
-/**
- * Daemon system level process information.
- */
-public interface ProcessInfo extends Writable {
-  /**
-   * Get the current time in the millisecond.<br/>
-   * 
-   * @return current time on daemon clock in millisecond.
-   */
-  public long currentTimeMillis();
-
-  /**
-   * Get the environment that was used to start the Daemon process.<br/>
-   * 
-   * @return the environment variable list.
-   */
-  public Map<String,String> getEnv();
-
-  /**
-   * Get the System properties of the Daemon process.<br/>
-   * 
-   * @return the properties list.
-   */
-  public Map<String,String> getSystemProperties();
-
-  /**
-   * Get the number of active threads in Daemon VM.<br/>
-   * 
-   * @return number of active threads in Daemon VM.
-   */
-  public int activeThreadCount();
-
-  /**
-   * Get the maximum heap size that is configured for the Daemon VM. <br/>
-   * 
-   * @return maximum heap size.
-   */
-  public long maxMemory();
-
-  /**
-   * Get the free memory in Daemon VM.<br/>
-   * 
-   * @return free memory.
-   */
-  public long freeMemory();
-
-  /**
-   * Get the total used memory in Demon VM. <br/>
-   * 
-   * @return total used memory.
-   */
-  public long totalMemory();
-}

+ 0 - 159
hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProcessInfoImpl.java

@@ -1,159 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-
-
-public class ProcessInfoImpl implements ProcessInfo {
-
-  private int threadCount;
-  private long currentTime;
-  private long freemem;
-  private long maxmem;
-  private long totmem;
-  private Map<String, String> env;
-  private Map<String, String> props;
-
-  public ProcessInfoImpl() {
-    env = new HashMap<String, String>();
-    props = new HashMap<String, String>();
-  }
-
-  /**
-   * Construct a concrete process information object. <br/>
-   * 
-   * @param threadCount
-   *          count of threads.
-   * @param currentTime
-   * @param freemem
-   * @param maxmem
-   * @param totmem
-   * @param env environment list.
-   * @param props
-   */
-  public ProcessInfoImpl(int threadCount, long currentTime, long freemem,
-      long maxmem, long totmem, Map<String, String> env, 
-      Map<String, String> props) {
-    this.threadCount = threadCount;
-    this.currentTime = currentTime;
-    this.freemem = freemem;
-    this.maxmem = maxmem;
-    this.totmem = totmem;
-    this.env = env;
-    this.props = props;
-  }
-
-  @Override
-  public int activeThreadCount() {
-    return threadCount;
-  }
-
-  @Override
-  public long currentTimeMillis() {
-    return currentTime;
-  }
-
-  @Override
-  public long freeMemory() {
-    return freemem;
-  }
-
-  @Override
-  public Map<String, String> getEnv() {
-    return env;
-  }
-
-  @Override
-  public Map<String,String> getSystemProperties() {
-    return props;
-  }
-
-  @Override
-  public long maxMemory() {
-    return maxmem;
-  }
-
-  @Override
-  public long totalMemory() {
-    return totmem;
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    this.threadCount = in.readInt();
-    this.currentTime = in.readLong();
-    this.freemem = in.readLong();
-    this.maxmem = in.readLong();
-    this.totmem = in.readLong();
-    read(in, env);
-    read(in, props);
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    out.writeInt(threadCount);
-    out.writeLong(currentTime);
-    out.writeLong(freemem);
-    out.writeLong(maxmem);
-    out.writeLong(totmem);
-    write(out, env);
-    write(out, props);
-  }
-
-  private void read(DataInput in, Map<String, String> map) throws IOException {
-    int size = in.readInt();
-    for (int i = 0; i < size; i = i + 2) {
-      String key = in.readUTF();
-      String value = in.readUTF();
-      map.put(key, value);
-    }
-  }
-
-  private void write(DataOutput out, Map<String, String> map) 
-  throws IOException {
-    int size = (map.size() * 2);
-    out.writeInt(size);
-    for (Map.Entry<String, String> entry : map.entrySet()) {
-      out.writeUTF(entry.getKey());
-      out.writeUTF(entry.getValue());
-    }
-  }
-
-  @Override
-  public String toString() {
-    StringBuffer strBuf = new StringBuffer();
-    strBuf.append(String.format("active threads : %d\n", threadCount));
-    strBuf.append(String.format("current time  : %d\n", currentTime));
-    strBuf.append(String.format("free memory  : %d\n", freemem));
-    strBuf.append(String.format("total memory  : %d\n", totmem));
-    strBuf.append(String.format("max memory  : %d\n", maxmem));
-    strBuf.append("Environment Variables : \n");
-    for (Map.Entry<String, String> entry : env.entrySet()) {
-      strBuf.append(String.format("key : %s value : %s \n", entry.getKey(),
-          entry.getValue()));
-    }
-    return strBuf.toString();
-  }
-
-}

+ 0 - 90
hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/ProxyUserDefinitions.java

@@ -1,90 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.test.system;
-
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.io.IOException;
-import java.net.URI;
-
-/**
- *  Its the data container which contains host names and
- *  groups against each proxy user.
- */
-public abstract class ProxyUserDefinitions {
-
-  /**
-   *  Groups and host names container
-   */
-  public class GroupsAndHost {
-    private List<String> groups;
-    private List<String> hosts;
-    public List<String> getGroups() {
-      return groups;
-    }
-    public void setGroups(List<String> groups) {
-      this.groups = groups;
-    }
-    public List<String> getHosts() {
-      return hosts;
-    }
-    public void setHosts(List<String> hosts) {
-      this.hosts = hosts;
-    }
-  }
-
-  protected Map<String, GroupsAndHost> proxyUsers;
-  protected ProxyUserDefinitions () {
-    proxyUsers = new HashMap<String, GroupsAndHost>();
-  }
-
-  /**
-   * Add proxy user data to a container.
-   * @param userName - proxy user name.
-   * @param definitions - groups and host names.
-   */
-  public void addProxyUser (String userName, GroupsAndHost definitions) {
-    proxyUsers.put(userName, definitions);
-  }
-
-  /**
-   * Get the host names and groups against given proxy user.
-   * @return - GroupsAndHost object.
-   */
-  public GroupsAndHost getProxyUser (String userName) {
-    return proxyUsers.get(userName);
-  }
-
-  /**
-   * Get the Proxy users data which contains the host names
-   * and groups against each user.
-   * @return - the proxy users data as hash map.
-   */
-  public Map<String, GroupsAndHost> getProxyUsers () {
-    return proxyUsers;
-  }
-
-  /**
-   * The implementation of this method has to be provided by a child of the class
-   * @param filePath
-   * @return
-   * @throws IOException
-   */
-  public abstract boolean writeToFile(URI filePath) throws IOException;
-}

+ 0 - 99
hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/ClusterProcessManager.java

@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system.process;
-
-import java.io.IOException;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.hadoop.conf.Configuration;
-
-/**
- * Interface to manage the remote processes in the cluster.
- */
-public interface ClusterProcessManager {
-
-  /**
-   * Initialization method to pass the configuration object which is required 
-   * by the ClusterProcessManager to manage the cluster.<br/>
-   * Configuration object should typically contain all the parameters which are 
-   * required by the implementations.<br/>
-   *  
-   * @param conf configuration containing values of the specific keys which 
-   * are required by the implementation of the cluster process manger.
-   * 
-   * @throws IOException when initialization fails.
-   */
-  void init(Configuration conf) throws IOException;
-
-  /**
-   * Get the list of RemoteProcess handles of all the remote processes.
-   */
-  List<RemoteProcess> getAllProcesses();
-
-  /**
-   * Get all the roles this cluster's daemon processes have.
-   */
-  Set<Enum<?>> getRoles();
-
-  /**
-   * Method to start all the remote daemons.<br/>
-   * 
-   * @throws IOException if startup procedure fails.
-   */
-  void start() throws IOException;
-
-  /**
-   * Starts the daemon from the user specified conf dir.
-   * @param newConfLocation the dir where the new conf files reside.
-   * @throws IOException if start from new conf fails. 
-   */
-  void start(String newConfLocation) throws IOException;
-
-  /**
-   * Stops the daemon running from user specified conf dir.
-   * 
-   * @param newConfLocation the dir where the new conf files reside.
-   * @throws IOException if stop from new conf fails. 
-   */
-  void stop(String newConfLocation) throws IOException;
-
-  /**
-   * Method to shutdown all the remote daemons.<br/>
-   * 
-   * @throws IOException if shutdown procedure fails.
-   */
-  void stop() throws IOException;
-  
-  /**
-   * Gets if multi-user support is enabled for this cluster. 
-   * <br/>
-   * @return true if multi-user support is enabled.
-   * @throws IOException if RPC returns error. 
-   */
-  boolean isMultiUserSupported() throws IOException;
-
-  /**
-   * The pushConfig is used to push a new config to the daemons.
-   * @param localDir
-   * @return is the remoteDir location where config will be pushed
-   * @throws IOException if pushConfig fails.
-   */
-  String pushConfig(String localDir) throws IOException;
-}

+ 0 - 404
hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/HadoopDaemonRemoteCluster.java

@@ -1,404 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system.process;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileReader;
-import java.io.IOException;
-import java.net.InetAddress;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.util.Shell.ShellCommandExecutor;
-
-/**
- * The concrete class which implements the start up and shut down based routines
- * based on the hadoop-daemon.sh. <br/>
- * 
- * Class requires two keys to be present in the Configuration objects passed to
- * it. Look at <code>CONF_HADOOPHOME</code> and
- * <code>CONF_HADOOPCONFDIR</code> for the names of the
- * configuration keys.
- * 
- * Following will be the format which the final command execution would look : 
- * <br/>
- * <code>
- *  ssh host 'hadoop-home/bin/hadoop-daemon.sh --script scriptName 
- *  --config HADOOP_CONF_DIR (start|stop) command'
- * </code>
- */
-public abstract class HadoopDaemonRemoteCluster 
-    implements ClusterProcessManager {
-
-  private static final Log LOG = LogFactory
-      .getLog(HadoopDaemonRemoteCluster.class.getName());
-
-  public static final String CONF_HADOOPNEWCONFDIR =
-    "test.system.hdrc.hadoopnewconfdir";
-  /**
-   * Key used to configure the HADOOP_PREFIX to be used by the
-   * HadoopDaemonRemoteCluster.
-   */
-  public final static String CONF_HADOOPHOME =
-    "test.system.hdrc.hadoophome";
-
-  public final static String CONF_SCRIPTDIR =
-    "test.system.hdrc.deployed.scripts.dir";
-  /**
-   * Key used to configure the HADOOP_CONF_DIR to be used by the
-   * HadoopDaemonRemoteCluster.
-   */
-  public final static String CONF_HADOOPCONFDIR = 
-    "test.system.hdrc.hadoopconfdir";
-
-  public final static String CONF_DEPLOYED_HADOOPCONFDIR =
-    "test.system.hdrc.deployed.hadoopconfdir";
-
-  private String hadoopHome;
-  protected String hadoopConfDir;
-  protected String scriptsDir;
-  protected String hadoopNewConfDir;
-  private final Set<Enum<?>> roles;
-  private final List<HadoopDaemonInfo> daemonInfos;
-  private List<RemoteProcess> processes;
-  protected Configuration conf;
-  
-  public static class HadoopDaemonInfo {
-    public final String cmd;
-    public final Enum<?> role;
-    public final List<String> hostNames;
-    public HadoopDaemonInfo(String cmd, Enum<?> role, List<String> hostNames) {
-      super();
-      this.cmd = cmd;
-      this.role = role;
-      this.hostNames = hostNames;
-    }
-
-    public HadoopDaemonInfo(String cmd, Enum<?> role, String hostFile) 
-        throws IOException {
-      super();
-      this.cmd = cmd;
-      this.role = role;
-      File file = new File(getDeployedHadoopConfDir(), hostFile);
-      BufferedReader reader = null;
-      hostNames = new ArrayList<String>();
-      try {
-        reader = new BufferedReader(new FileReader(file));
-        String host = null;
-        while ((host = reader.readLine()) != null) {
-          if (host.trim().isEmpty() || host.startsWith("#")) {
-            // Skip empty and possible comment lines
-            // throw new IllegalArgumentException(
-            // "Hostname could not be found in file " + hostFile);
-            continue;
-          }
-          hostNames.add(host.trim());
-        }
-        if (hostNames.size() < 1) {
-          throw new IllegalArgumentException("At least one hostname "
-              +
-            "is required to be present in file - " + hostFile);
-        }
-      } finally {
-        try {
-          reader.close();
-        } catch (IOException e) {
-          LOG.warn("Could not close reader");
-        }
-      }
-      LOG.info("Created HadoopDaemonInfo for " + cmd + " " + role + " from " 
-          + hostFile);
-    }
-  }
-
-  @Override
-  public String pushConfig(String localDir) throws IOException {
-    for (RemoteProcess process : processes){
-      process.pushConfig(localDir);
-    }
-    return hadoopNewConfDir;
-  }
-
-  public HadoopDaemonRemoteCluster(List<HadoopDaemonInfo> daemonInfos) {
-    this.daemonInfos = daemonInfos;
-    this.roles = new HashSet<Enum<?>>();
-    for (HadoopDaemonInfo info : daemonInfos) {
-      this.roles.add(info.role);
-    }
-  }
-
-  @Override
-  public void init(Configuration conf) throws IOException {
-    this.conf = conf;
-    populateDirectories(conf);
-    this.processes = new ArrayList<RemoteProcess>();
-    populateDaemons();
-  }
-
-  @Override
-  public List<RemoteProcess> getAllProcesses() {
-    return processes;
-  }
-
-  @Override
-  public Set<Enum<?>> getRoles() {
-    return roles;
-  }
-
-  /**
-   * Method to populate the hadoop home and hadoop configuration directories.
-   * 
-   * @param conf
-   *          Configuration object containing values for
-   *          CONF_HADOOPHOME and
-   *          CONF_HADOOPCONFDIR
-   * 
-   * @throws IllegalArgumentException
-   *           if the configuration or system property set does not contain
-   *           values for the required keys.
-   */
-  protected void populateDirectories(Configuration conf) {
-    hadoopHome = conf.get(CONF_HADOOPHOME);
-    hadoopConfDir = conf.get(CONF_HADOOPCONFDIR);
-    scriptsDir = conf.get(CONF_SCRIPTDIR);
-    hadoopNewConfDir = conf.get(CONF_HADOOPNEWCONFDIR);
-    if (hadoopHome == null || hadoopConfDir == null || hadoopHome.isEmpty()
-        || hadoopConfDir.isEmpty()) {
-      LOG.error("No configuration "
-          + "for the HADOOP_PREFIX and HADOOP_CONF_DIR passed");
-      throw new IllegalArgumentException(
-          "No Configuration passed for hadoop home " +
-          "and hadoop conf directories");
-    }
-  }
-
-  public static String getDeployedHadoopConfDir() {
-    String dir = System.getProperty(CONF_DEPLOYED_HADOOPCONFDIR);
-    if (dir == null || dir.isEmpty()) {
-      LOG.error("No configuration "
-          + "for the CONF_DEPLOYED_HADOOPCONFDIR passed");
-      throw new IllegalArgumentException(
-          "No Configuration passed for hadoop deployed conf directory");
-    }
-    return dir;
-  }
-
-  @Override
-  public void start() throws IOException {
-    for (RemoteProcess process : processes) {
-      process.start();
-    }
-  }
-
-  @Override
-  public void start(String newConfLocation)throws IOException {
-    for (RemoteProcess process : processes) {
-      process.start(newConfLocation);
-    }
-  }
-
-  @Override
-  public void stop() throws IOException {
-    for (RemoteProcess process : processes) {
-      process.kill();
-    }
-  }
-
-  @Override
-  public void stop(String newConfLocation) throws IOException {
-    for (RemoteProcess process : processes) {
-      process.kill(newConfLocation);
-    }
-  }
-
-  protected void populateDaemon(HadoopDaemonInfo info) throws IOException {
-    for (String host : info.hostNames) {
-      InetAddress addr = InetAddress.getByName(host);
-      RemoteProcess process = getProcessManager(info, 
-          addr.getCanonicalHostName());
-      processes.add(process);
-    }
-  }
-
-  protected void populateDaemons() throws IOException {
-   for (HadoopDaemonInfo info : daemonInfos) {
-     populateDaemon(info);
-   }
-  }
-
-  @Override
-  public boolean isMultiUserSupported() throws IOException {
-    return false;
-  }
-
-  protected RemoteProcess getProcessManager(
-      HadoopDaemonInfo info, String hostName) {
-    RemoteProcess process = new ScriptDaemon(info.cmd, hostName, info.role);
-    return process;
-  }
-
-  /**
-   * The core daemon class which actually implements the remote process
-   * management of actual daemon processes in the cluster.
-   * 
-   */
-  class ScriptDaemon implements RemoteProcess {
-
-    private static final String STOP_COMMAND = "stop";
-    private static final String START_COMMAND = "start";
-    private static final String SCRIPT_NAME = "hadoop-daemon.sh";
-    private static final String PUSH_CONFIG ="pushConfig.sh";
-    protected final String daemonName;
-    protected final String hostName;
-    private final Enum<?> role;
-
-    public ScriptDaemon(String daemonName, String hostName, Enum<?> role) {
-      this.daemonName = daemonName;
-      this.hostName = hostName;
-      this.role = role;
-    }
-
-    @Override
-    public String getHostName() {
-      return hostName;
-    }
-
-    private String[] getPushConfigCommand(String localDir, String remoteDir,
-        File scriptDir) throws IOException{
-      ArrayList<String> cmdArgs = new ArrayList<String>();
-      cmdArgs.add(scriptDir.getAbsolutePath() + File.separator + PUSH_CONFIG);
-      cmdArgs.add(localDir);
-      cmdArgs.add(hostName);
-      cmdArgs.add(remoteDir);
-      cmdArgs.add(hadoopConfDir);
-      return (String[]) cmdArgs.toArray(new String[cmdArgs.size()]);
-    }
-
-    private ShellCommandExecutor buildPushConfig(String local, String remote )
-        throws IOException {
-      File scriptDir = new File(scriptsDir);
-      String[] commandArgs = getPushConfigCommand(local, remote, scriptDir);
-      HashMap<String, String> env = new HashMap<String, String>();
-      ShellCommandExecutor executor = new ShellCommandExecutor(commandArgs,
-          scriptDir, env);
-      LOG.info(executor.toString());
-      return executor;
-    }
-
-    private ShellCommandExecutor createNewConfDir() throws IOException {
-      ArrayList<String> cmdArgs = new ArrayList<String>();
-      cmdArgs.add("ssh");
-      cmdArgs.add(hostName);
-      cmdArgs.add("if [ -d "+ hadoopNewConfDir+
-          " ];\n then echo Will remove existing directory;  rm -rf "+
-          hadoopNewConfDir+";\nmkdir "+ hadoopNewConfDir+"; else \n"+
-          "echo " + hadoopNewConfDir + " doesnt exist hence creating" +
-          ";  mkdir " + hadoopNewConfDir + ";\n  fi");
-      String[] cmd = (String[]) cmdArgs.toArray(new String[cmdArgs.size()]);
-      ShellCommandExecutor executor = new ShellCommandExecutor(cmd);
-      LOG.info(executor.toString());
-      return executor;
-    }
-
-    @Override
-    public void pushConfig(String localDir) throws IOException {
-      createNewConfDir().execute();
-      buildPushConfig(localDir, hadoopNewConfDir).execute();
-    }
-
-    private ShellCommandExecutor buildCommandExecutor(String command,
-        String confDir) {
-      String[] commandArgs = getCommand(command, confDir);
-      File cwd = new File(".");
-      HashMap<String, String> env = new HashMap<String, String>();
-      env.put("HADOOP_CONF_DIR", confDir);
-      ShellCommandExecutor executor
-        = new ShellCommandExecutor(commandArgs, cwd, env);
-      LOG.info(executor.toString());
-      return executor;
-    }
-
-    private File getBinDir() {
-      File binDir = new File(hadoopHome, "bin");
-      return binDir;
-    }
-
-    protected String[] getCommand(String command, String confDir) {
-      ArrayList<String> cmdArgs = new ArrayList<String>();
-      File binDir = getBinDir();
-      cmdArgs.add("ssh");
-      cmdArgs.add(hostName);
-      cmdArgs.add(binDir.getAbsolutePath() + File.separator + SCRIPT_NAME);
-      cmdArgs.add("--config");
-      cmdArgs.add(confDir);
-      // XXX Twenty internal version does not support --script option.
-      cmdArgs.add(command);
-      cmdArgs.add(daemonName);
-      return (String[]) cmdArgs.toArray(new String[cmdArgs.size()]);
-    }
-
-    @Override
-    public void kill() throws IOException {
-      kill(hadoopConfDir);
-    }
-
-    @Override
-    public void start() throws IOException {
-      start(hadoopConfDir);
-    }
-
-    public void start(String newConfLocation) throws IOException {
-      ShellCommandExecutor cme = buildCommandExecutor(START_COMMAND,
-          newConfLocation);
-      cme.execute();
-      String output = cme.getOutput();
-      if (!output.isEmpty()) { //getOutput() never returns null value
-        if (output.toLowerCase().contains("error")) {
-          LOG.warn("Error is detected.");
-          throw new IOException("Start error\n" + output);
-        }
-      }
-    }
-
-    public void kill(String newConfLocation) throws IOException {
-      ShellCommandExecutor cme
-        = buildCommandExecutor(STOP_COMMAND, newConfLocation);
-      cme.execute();
-      String output = cme.getOutput();
-      if (!output.isEmpty()) { //getOutput() never returns null value
-        if (output.toLowerCase().contains("error")) {
-          LOG.info("Error is detected.");
-          throw new IOException("Kill error\n" + output);
-        }
-      }
-    }
-
-    @Override
-    public Enum<?> getRole() {
-      return role;
-    }
-  }
-}

+ 0 - 96
hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/MultiUserHadoopDaemonRemoteCluster.java

@@ -1,96 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to you under the Apache License, Version
- * 2.0 (the "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- * 
- * http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- * implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-package org.apache.hadoop.test.system.process;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster.HadoopDaemonInfo;
-
-public abstract class MultiUserHadoopDaemonRemoteCluster
-    extends HadoopDaemonRemoteCluster {
-
-  public MultiUserHadoopDaemonRemoteCluster(List<HadoopDaemonInfo> daemonInfos) {
-    super(daemonInfos);
-  }
-
-  @Override
-  protected RemoteProcess getProcessManager(
-      HadoopDaemonInfo info, String hostName) {
-    return new MultiUserScriptDaemon(info.cmd, hostName, info.role);
-  }
-
-  @Override
-  public boolean isMultiUserSupported() throws IOException {
-    return true;
-  }
-
-  class MultiUserScriptDaemon extends ScriptDaemon {
-
-    private static final String MULTI_USER_BINARY_PATH_KEY =
-        "test.system.hdrc.multi-user.binary.path";
-    private static final String MULTI_USER_MANAGING_USER =
-        "test.system.hdrc.multi-user.managinguser.";
-    private String binaryPath;
-    /**
-     * Manging user for a particular daemon is gotten by
-     * MULTI_USER_MANAGING_USER + daemonname
-     */
-    private String mangingUser;
-
-    public MultiUserScriptDaemon(
-        String daemonName, String hostName, Enum<?> role) {
-      super(daemonName, hostName, role);
-      initialize(daemonName);
-    }
-
-    private void initialize(String daemonName) {
-      binaryPath = conf.get(MULTI_USER_BINARY_PATH_KEY);
-      if (binaryPath == null || binaryPath.trim().isEmpty()) {
-        throw new IllegalArgumentException(
-            "Binary path for multi-user path is not present. Please set "
-                + MULTI_USER_BINARY_PATH_KEY + " correctly");
-      }
-      File binaryFile = new File(binaryPath);
-      if (!binaryFile.exists() || !binaryFile.canExecute()) {
-        throw new IllegalArgumentException(
-            "Binary file path is not configured correctly. Please set "
-                + MULTI_USER_BINARY_PATH_KEY
-                + " to properly configured binary file.");
-      }
-      mangingUser = conf.get(MULTI_USER_MANAGING_USER + daemonName);
-      if (mangingUser == null || mangingUser.trim().isEmpty()) {
-        throw new IllegalArgumentException(
-            "Manging user for daemon not present please set : "
-                + MULTI_USER_MANAGING_USER + daemonName + " to correct value.");
-      }
-    }
-
-    @Override
-    protected String[] getCommand(String command,String confDir) {
-      ArrayList<String> commandList = new ArrayList<String>();
-      commandList.add(binaryPath);
-      commandList.add(mangingUser);
-      commandList.add(hostName);
-      commandList.add("--config "
-          + confDir + " " + command + " " + daemonName);
-      return (String[]) commandList.toArray(new String[commandList.size()]);
-    }
-  }
-}

+ 0 - 74
hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/test/system/process/RemoteProcess.java

@@ -1,74 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test.system.process;
-
-import java.io.IOException;
-
-/**
- * Interface to manage the remote process.
- */
-public interface RemoteProcess {
-  /**
-   * Get the host on which the daemon process is running/stopped.<br/>
-   * 
-   * @return hostname on which process is running/stopped.
-   */
-  String getHostName();
-
-  /**
-   * Start a given daemon process.<br/>
-   * 
-   * @throws IOException if startup fails.
-   */
-  void start() throws IOException;
-  /**
-   * Starts a daemon from user specified conf dir. 
-   * @param newConfLocation is dir where new conf resides. 
-   * @throws IOException if start of process fails from new location.
-   */
-  void start(String newConfLocation) throws IOException;
-  /**
-   * Stop a given daemon process.<br/>
-   * 
-   * @throws IOException if shutdown fails.
-   */
-  void kill() throws IOException;
-  
-  /**
-   * Stops a given daemon running from user specified 
-   * conf dir. </br>
-   * @param newConfLocation dir location where new conf resides. 
-   * @throws IOException if kill fails from new conf location.
-   */
-   void kill(String newConfLocation) throws IOException;
-  /**
-   * Get the role of the Daemon in the cluster.
-   * 
-   * @return Enum
-   */
-  Enum<?> getRole();
-  
-  /**
-   * Pushed the configuration to new configuration directory 
-   * @param localDir The local directory which has config files that will be 
-   * pushed to the remote location
-   * @throws IOException is thrown if the pushConfig results in a error. 
-   */
-  void pushConfig(String localDir) throws IOException;
-}

+ 0 - 27
hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/util/RemoteExecution.java

@@ -1,27 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.util;
-
-public interface RemoteExecution {
-  public void executeCommand (String remoteHostName, String user,
-          String  command) throws Exception;
-  public int getExitCode();
-  public String getOutput();
-  public String getCommandString();
-}

+ 0 - 203
hadoop-common-project/hadoop-common/src/test/system/java/org/apache/hadoop/util/SSHRemoteExecution.java

@@ -1,203 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.util;
-
-import com.jcraft.jsch.*;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.util.Properties;
-
-/**
- * Remote Execution of commands  on a remote machine.
- */
-
-public class SSHRemoteExecution implements RemoteExecution {
-
-  static final Log LOG = LogFactory.getLog(SSHRemoteExecution.class);
-  static final int SSH_PORT = 22;
-  static final String DEFAULT_IDENTITY="id_dsa";
-  static final String DEFAULT_KNOWNHOSTS="known_hosts";
-  static final String FS = System.getProperty("file.separator");
-  static final String LS = System.getProperty("line.separator");
-  private int exitCode;
-  private StringBuffer output;
-  private String commandString;
-
-  final StringBuffer errorMessage = new StringBuffer();
-  public SSHRemoteExecution() throws Exception {
-  }
-
-  protected String getHomeDir() {
-    String currentUser=System.getProperty("user.name");
-    String userHome=System.getProperty("user.home");
-
-    return userHome.substring(0, userHome.indexOf(currentUser)-1);
-  }
-
-  /**
-   * Execute command at remote host under given user
-   * @param remoteHostName remote host name
-   * @param user is the name of the user to be login under;
-   *   current user will be used if this is set to <code>null</code>
-   * @param command to be executed remotely
-   * @param identityFile is the name of alternative identity file; default
-   *   is ~user/.ssh/id_dsa
-   * @param portNumber remote SSH daemon port number, default is 22
-   * @throws Exception in case of errors
-   */
-  public void executeCommand (String remoteHostName, String user,
-          String  command, String identityFile, int portNumber) throws Exception {
-    commandString = command;
-    String sessionUser = System.getProperty("user.name");
-    String userHome=System.getProperty("user.home");
-    if (user != null) {
-      sessionUser = user;
-      userHome = getHomeDir() + FS + user;
-    }
-    String dotSSHDir = userHome + FS + ".ssh";
-    String sessionIdentity = dotSSHDir + FS + DEFAULT_IDENTITY;
-    if (identityFile != null) {
-      sessionIdentity = identityFile;
-    }
-
-    JSch jsch = new JSch();
-
-    Session session = jsch.getSession(sessionUser, remoteHostName, portNumber);
-    jsch.setKnownHosts(dotSSHDir + FS + DEFAULT_KNOWNHOSTS);
-    jsch.addIdentity(sessionIdentity);
-
-    Properties config = new Properties();
-    config.put("StrictHostKeyChecking", "no");
-    session.setConfig(config);
-
-    session.connect(30000);   // making a connection with timeout.
-
-    Channel channel=session.openChannel("exec");
-    ((ChannelExec)channel).setCommand(command);
-    channel.setInputStream(null);
-
-    final BufferedReader errReader =
-            new BufferedReader(
-              new InputStreamReader(((ChannelExec)channel).getErrStream()));
-    BufferedReader inReader =
-            new BufferedReader(new InputStreamReader(channel.getInputStream()));
-
-    channel.connect();
-    Thread errorThread = new Thread() {
-      @Override
-      public void run() {
-        try {
-          String line = errReader.readLine();
-          while((line != null) && !isInterrupted()) {
-            errorMessage.append(line);
-            errorMessage.append(LS);
-            line = errReader.readLine();
-          }
-        } catch(IOException ioe) {
-          LOG.warn("Error reading the error stream", ioe);
-        }
-      }
-    };
-
-    try {
-      errorThread.start();
-    } catch (IllegalStateException e) {
-      LOG.debug(e);
-    }
-    try {
-      parseExecResult(inReader);
-      String line = inReader.readLine();
-      while (line != null) {
-        line = inReader.readLine();
-      }
-
-      if(channel.isClosed()) {
-        exitCode = channel.getExitStatus();
-        LOG.debug("exit-status: " + exitCode);
-      }
-      try {
-        // make sure that the error thread exits
-        errorThread.join();
-      } catch (InterruptedException ie) {
-        LOG.warn("Interrupted while reading the error stream", ie);
-      }
-    } catch (Exception ie) {
-      throw new IOException(ie.toString());
-    }
-    finally {
-      try {
-        inReader.close();
-      } catch (IOException ioe) {
-        LOG.warn("Error while closing the input stream", ioe);
-      }
-      try {
-        errReader.close();
-      } catch (IOException ioe) {
-        LOG.warn("Error while closing the error stream", ioe);
-      }
-      channel.disconnect();
-      session.disconnect();
-    }
-  }
-
-  /**
-   * Execute command at remote host under given username
-   * Default identity is ~/.ssh/id_dsa key will be used
-   * Default known_hosts file is ~/.ssh/known_hosts will be used
-   * @param remoteHostName remote host name
-   * @param user is the name of the user to be login under;
-   *   if equals to <code>null</code> then current user name will be used
-   * @param command to be executed remotely
-   */
-  @Override
-  public void executeCommand (String remoteHostName, String user,
-          String  command) throws Exception {
-    executeCommand(remoteHostName, user, command, null, SSH_PORT);
-  }
-
-  @Override
-  public int getExitCode() {
-    return exitCode;
-  }
-
-  protected void parseExecResult(BufferedReader lines) throws IOException {
-    output = new StringBuffer();
-    char[] buf = new char[512];
-    int nRead;
-    while ( (nRead = lines.read(buf, 0, buf.length)) > 0 ) {
-      output.append(buf, 0, nRead);
-    }
-  }
-
-  /** Get the output of the ssh command.*/
-  @Override
-  public String getOutput() {
-    return (output == null) ? "" : output.toString();
-  }
-
-  /** Get the String representation of ssh command */
-  @Override
-  public String getCommandString() {
-    return commandString;
-  }
-}

+ 0 - 48
hadoop-common-project/hadoop-common/src/test/system/scripts/pushConfig.sh

@@ -1,48 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# local folder with new configuration file
-LOCAL_DIR=$1
-# remote daemon host
-HOST=$2
-#remote dir points to the location of new config files
-REMOTE_DIR=$3
-# remote daemon HADOOP_CONF_DIR location
-DAEMON_HADOOP_CONF_DIR=$4
-
-if [ $# -ne 4 ]; then
-  echo "Wrong number of parameters" >&2
-  exit 2
-fi
-
-ret_value=0
-
-echo The script makes a remote copy of existing ${DAEMON_HADOOP_CONF_DIR} to ${REMOTE_DIR}
-echo and populates it with new configs prepared in $LOCAL_DIR
-
-ssh ${HOST} cp -r ${DAEMON_HADOOP_CONF_DIR}/* ${REMOTE_DIR}
-ret_value=$?
-
-# make sure files are writeble
-ssh ${HOST} chmod u+w ${REMOTE_DIR}/*
-
-# copy new files over
-scp -r ${LOCAL_DIR}/* ${HOST}:${REMOTE_DIR}
-
-err_code=`echo $? + $ret_value | bc`
-echo Copying of files from local to remote returned ${err_code}
-

+ 0 - 50
hadoop-common-project/hadoop-common/src/test/system/validation/org/apache/hadoop/util/TestSSHRemoteExecution.java

@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.util;
-
-import static org.junit.Assert.assertEquals;
-import org.junit.Test;
-
-public class TestSSHRemoteExecution {
-  
-  @Test
-  /**
-   * Method: executeCommand(String remoteHostName, String user, String  command)
-   */
-  public void testExecuteCommandForRemoteHostNameUserCommand() throws Exception {
-    String command = "ls -l /bin";
-    SSHRemoteExecution sshRE = new SSHRemoteExecution();
-    sshRE.executeCommand("localhost", null, "ls -l /bin");
-    System.out.println(sshRE.getOutput());
-    assertEquals("Exit code should is expected to be 0", sshRE.getExitCode(), 0);
-    assertEquals("Mismatched command string", sshRE.getCommandString(), command);
-  }
-
-  @Test
-  /**
-   * Method: getHomeDir()
-   */
-  public void testGetHomeDir() throws Exception {
-    SSHRemoteExecution sshRE = new SSHRemoteExecution();
-    String ret = sshRE.getHomeDir();
-    assertEquals(System.getProperty("user.home"),
-      ret + System.getProperty("file.separator") +
-        System.getProperty("user.name"));
-  }
-}

+ 0 - 63
hadoop-hdfs-project/hadoop-hdfs/src/test/system/aop/org/apache/hadoop/hdfs/HDFSPolicyProviderAspect.aj

@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-import java.util.ArrayList;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.test.system.DaemonProtocol;
-import org.apache.hadoop.hdfs.test.system.DNProtocol;
-import org.apache.hadoop.hdfs.test.system.NNProtocol;
-import org.apache.hadoop.security.authorize.Service;
-import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
-
-/**
- * This aspect adds two HDFS Herriot specific protocols tp the list of 'authorized'
- * Herriot protocols.
- * Protocol descriptors i.e. 'security.nn.protocol.acl' have to be added to
- * <code>hadoop-policy.xml</code> if present
- */
-public privileged aspect HDFSPolicyProviderAspect {
-  private static final Log LOG = LogFactory
-      .getLog(HDFSPolicyProviderAspect.class);
-
-  ArrayList<Service> herriotHDFSServices = null;
-
-  pointcut updateHDFSServices() :
-    execution (public Service[] HDFSPolicyProvider.getServices());
-
-  Service[] around() : updateHDFSServices () {
-    herriotHDFSServices = new ArrayList<Service>();
-    for (Service s : HDFSPolicyProvider.hdfsServices) {
-      LOG.debug("Copying configured protocol to "
-          + s.getProtocol().getCanonicalName());
-      herriotHDFSServices.add(s);
-    }
-    herriotHDFSServices.add(new Service("security.daemon.protocol.acl",
-        DaemonProtocol.class));
-    herriotHDFSServices.add(new Service("security.nn.protocol.acl",
-        NNProtocol.class));
-    herriotHDFSServices.add(new Service("security.dn.protocol.acl",
-        DNProtocol.class));
-    final Service[] retArray = herriotHDFSServices
-        .toArray(new Service[herriotHDFSServices.size()]);
-    LOG.debug("Number of configured protocols to return: " + retArray.length);
-    return retArray;
-  }
-}

+ 0 - 70
hadoop-hdfs-project/hadoop-hdfs/src/test/system/aop/org/apache/hadoop/hdfs/server/datanode/DataNodeAspect.aj

@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.server.datanode;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.AbstractList;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.test.system.DNProtocol;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.test.system.DaemonProtocol;
-import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
-
-public privileged aspect DataNodeAspect {
-  declare parents : DataNode implements DNProtocol;
-
-  public Configuration DataNode.getDaemonConf() {
-    return super.getConf();
-  }
-
-  pointcut dnConstructorPointcut(Configuration conf, AbstractList<File> dirs,
-      SecureResources resources) :
-    call(DataNode.new(Configuration, AbstractList<File>, SecureResources))
-    && args(conf, dirs, resources);
-
-  after(Configuration conf, AbstractList<File> dirs, SecureResources resources)
-    returning (DataNode datanode):
-    dnConstructorPointcut(conf, dirs, resources) {
-    try {
-      UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-      datanode.setUser(ugi.getShortUserName());
-    } catch (IOException e) {
-      datanode.LOG.warn("Unable to get the user information for the " +
-          "DataNode");
-    }
-    datanode.setReady(true);
-  }
-
-  pointcut getVersionAspect(String protocol, long clientVersion) :
-    execution(public long DataNode.getProtocolVersion(String ,
-      long) throws IOException) && args(protocol, clientVersion);
-
-  long around(String protocol, long clientVersion) :
-    getVersionAspect(protocol, clientVersion) {
-    if(protocol.equals(DaemonProtocol.class.getName())) {
-      return DaemonProtocol.versionID;
-    } else if(protocol.equals(DNProtocol.class.getName())) {
-      return DNProtocol.versionID;
-    } else {
-      return proceed(protocol, clientVersion);
-    }
-  }
-}

+ 0 - 77
hadoop-hdfs-project/hadoop-hdfs/src/test/system/aop/org/apache/hadoop/hdfs/server/namenode/NameNodeAspect.aj

@@ -1,77 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.server.namenode;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.test.system.NNProtocol;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.test.system.DaemonProtocol;
-
-public privileged aspect NameNodeAspect {
-  declare parents : NameNode implements NNProtocol;
-
-  // Namename doesn't store a copy of its configuration
-  // because it can be changed through the life cycle of the object
-  // So, the an exposed reference needs to be added and updated after
-  // new NameNode(Configuration conf) is complete
-  Configuration NameNode.configRef = null;
-
-  // Method simply assign a reference to the NameNode configuration object
-  void NameNode.setRef (Configuration conf) {
-    if (configRef == null)
-      configRef = conf;
-  }
-
-  public Configuration NameNode.getDaemonConf() {
-    return configRef;
-  }
-
-  pointcut nnConstructorPointcut(Configuration conf) :
-    call(NameNode.new(Configuration)) && args(conf);
-
-  after(Configuration conf) returning (NameNode namenode):
-    nnConstructorPointcut(conf) {
-    try {
-      UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-      namenode.setUser(ugi.getShortUserName());
-    } catch (IOException e) {
-      namenode.LOG.warn("Unable to get the user information for the " +
-          "Jobtracker");
-    }
-    namenode.setRef(conf);
-    namenode.setReady(true);
-  }
-
-  pointcut getVersionAspect(String protocol, long clientVersion) :
-    execution(public long NameNode.getProtocolVersion(String ,
-      long) throws IOException) && args(protocol, clientVersion);
-
-  long around(String protocol, long clientVersion) :
-    getVersionAspect(protocol, clientVersion) {
-    if(protocol.equals(DaemonProtocol.class.getName())) {
-      return DaemonProtocol.versionID;
-    } else if(protocol.equals(NNProtocol.class.getName())) {
-      return NNProtocol.versionID;
-    } else {
-      return proceed(protocol, clientVersion);
-    }
-  }
-}

+ 0 - 147
hadoop-hdfs-project/hadoop-hdfs/src/test/system/conf/system-test-hdfs.xml

@@ -1,147 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-<!-- Mandatory properties that are to be set and uncommented before running the tests -->
-
-<property>
-  <name>test.system.hdrc.hadoophome</name>
-  <value>$(TO_DO_HADOOP_INSTALL)/share/hadoop-current</value>
-  <description> This is the path to the home directory of the hadoop deployment.
-  </description>
-</property>
-<property>
-  <name>test.system.hdrc.hadoopconfdir</name>
-  <value>$(TO_DO_HADOOP_INSTALL)/conf/hadoop</value>
-  <description> This is the path to the configuration directory of the hadoop
-  cluster that is deployed.
-  </description>
-</property>
-
-<property>
-  <name>test.system.hdrc.dn.hostfile</name>
-  <value>slaves.localcopy.txt</value>
-  <description> File name containing the hostnames where the DataNodes are running.
-  </description>
-</property>
-
-<property>
-  <name>test.system.hdfs.clusterprocess.impl.class</name>
-  <value>org.apache.hadoop.hdfs.test.system.HDFSCluster$HDFSProcessManager</value>
-  <description>
-  Cluster process manager for the Hdfs subsystem of the cluster. The value
-  org.apache.hadoop.hdfs.test.system.HDFSCluster$MultiUserHDFSProcessManager can
-  be used to enable multi user support.
-  </description>
-</property>
-
-<property>
-   <name>test.system.hdrc.deployed.scripts.dir</name>
-   <value>./src/test/system/scripts</value>
-   <description>
-     This directory hosts the scripts in the deployed location where
-     the system test client runs.
-   </description>
-</property>
-
-<property>
-  <name>test.system.hdrc.hadoopnewconfdir</name>
-  <value>$(TO_DO_GLOBAL_TMP_DIR)/newconf</value>
-  <description>
-  The directory where the new config files will be copied to in all
-  the clusters is pointed out this directory. 
-  </description>
-</property>
-
-<property>
-  <name>test.system.hdrc.suspend.cmd</name>
-  <value>kill -SIGSTOP</value>
-  <description>
-    Command for suspending the given process.
-  </description>
-</property>
-
-<property>
-  <name>test.system.hdrc.resume.cmd</name>
-  <value>kill -SIGCONT</value>
-  <description>
-  Command for resuming the given suspended process.
-  </description>
-</property>
-<property>
-  <name>test.system.hdrc.hadoop.local.confdir</name>
-  <value>$(TO_DO_GLOBAL_TMP_DIR)/localconf</value>
-  <description>
-    A local directory where a new config file is placed before
-    being pushed into new config location on the cluster.
-  </description>
-</property>
-
-<!-- Mandatory keys to be set for the multi user support to be enabled.  -->
-
-<property>
-  <name>test.system.hdfs.clusterprocess.impl.class</name>
-  <value>org.apache.hadoop.hdfs.test.system.HDFSCluster$MultiUserHDFSProcessManager</value>
-  <description>
-    Enabling multi user based cluster process manger.
-  </description>
-</property>
-<property>
-  <name>test.system.hdrc.multi-user.list.path</name>
-  <value>$(TO_DO_HADOOP_INSTALL)/conf/hadoop/proxyusers</value>
-  <description>
-  Multi user list for creating the proxy users.
-  </description>
-</property>
-<property>
-  <name>test.system.hdrc.multi-user.binary.path</name>
-  <value>$(TO_DO_HADOOP_INSTALL)/conf/hadoop/runAs</value>
-  <description>
-    Local file system path on gate way to cluster-controller binary including the binary name.
-    To build the binary the following commands need to be executed:
-     % ant run-as -Drun-as.hadoop.home.dir=(HADOOP_PREFIX of setup cluster)
-     % cp build-fi/system/c++-build/runAs test.system.hdrc.multi-user.binary.path
-    Location of the cluster is important security precaution.
-    The binary should be owned by root and test user group permission should be set such a
-    way that it can be executed by binary. Example usage would be:
-     % sudo chown root binary
-     % sudo chmod 6511 binary
-    Change permission appropriately to make it more secure.
-  </description>
-</property>
-<property>
-  <name>test.system.hdrc.multi-user.managinguser.namenode</name>
-  <value>*</value>
-  <description>
-    User value for managing the particular daemon, please note that these user should be
-    present on gateways also, an example configuration for the above would be 
-    key name = test.system.hdrc.multi-user.managinguser.namenode
-    key value = guest
-    Please note the daemon names are all lower case, corresponding to hadoop-daemon.sh command.
-  </description>
-</property>
-<property>
-  <name>test.system.hdrc.multi-user.managinguser.datanode</name>
-  <value>*</value>
-</property>
- 
-</configuration>

+ 0 - 99
hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/DNClient.java

@@ -1,99 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.test.system;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.test.system.process.RemoteProcess;
-
-/**
- * Datanode client for system tests. Assumption of the class is that the
- * configuration key is set for the configuration key : {@code
- * DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY} is set, only the port portion of
- * the address is used.
- */
-public class DNClient extends HDFSDaemonClient<DNProtocol> {
-
-  DNProtocol proxy;
-  private static final String HADOOP_DATANODE_OPTS_ENV = "HADOOP_DATANODE_OPTS";
-
-  public DNClient(Configuration conf, RemoteProcess process) throws IOException {
-    super(conf, process);
-  }
-
-  @Override
-  public void connect() throws IOException {
-    if (isConnected()) {
-      return;
-    }
-    String sockAddrStr = getConf().get(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY);
-    if (sockAddrStr == null) {
-      throw new IllegalArgumentException("Datenode IPC address is not set."
-          + "Check if " + DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY
-          + " is configured.");
-    }
-    String[] splits = sockAddrStr.split(":");
-    if (splits.length != 2) {
-      throw new IllegalArgumentException(
-          "Datanode IPC address is not correctly configured");
-    }
-    String port = splits[1];
-    String sockAddr = getHostName() + ":" + port;
-    InetSocketAddress bindAddr = NetUtils.createSocketAddr(sockAddr);
-    proxy = (DNProtocol) RPC.getProxy(DNProtocol.class, DNProtocol.versionID,
-        bindAddr, getConf());
-    setConnected(true);
-  }
-
-  @Override
-  public void disconnect() throws IOException {
-    RPC.stopProxy(proxy);
-    setConnected(false);
-  }
-
-  @Override
-  protected DNProtocol getProxy() {
-    return proxy;
-  }
-
-  public Configuration getDatanodeConfig() throws IOException {
-    return getProxy().getDaemonConf();
-  }
-
-  @Override
-  public String getHadoopOptsEnvName() {
-    return HADOOP_DATANODE_OPTS_ENV;
-  }
-
-  /**
-   * Concrete implementation of abstract super class method
-   * @param attributeName name of the attribute to be retrieved
-   * @return Object value of the given attribute
-   * @throws IOException is thrown in case of communication errors
-   */
-  @Override
-  public Object getDaemonAttribute (String attributeName) throws IOException {
-    return getJmxAttribute("DataNode", "DataNodeInfo", attributeName);
-  }
-}

+ 0 - 36
hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/DNProtocol.java

@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.test.system;
-
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.security.KerberosInfo;
-import org.apache.hadoop.test.system.DaemonProtocol;
-
-/**
- * Client side API exposed from Datanode.
- * Actual implementations are likely to be injected
- *
- * The protocol has to be annotated so KerberosInfo can be filled in during
- * creation of a ipc.Client connection
- */
-@KerberosInfo(
-    serverPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY)
-public interface DNProtocol extends DaemonProtocol {
-  public static final long versionID = 1L;
-}

+ 0 - 149
hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSCluster.java

@@ -1,149 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.test.system;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Iterator;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.test.system.AbstractDaemonClient;
-import org.apache.hadoop.test.system.AbstractDaemonCluster;
-import org.apache.hadoop.test.system.process.ClusterProcessManager;
-import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster;
-import org.apache.hadoop.test.system.process.MultiUserHadoopDaemonRemoteCluster;
-import org.apache.hadoop.test.system.process.RemoteProcess;
-import org.apache.hadoop.test.system.process.HadoopDaemonRemoteCluster.HadoopDaemonInfo;
-
-public class HDFSCluster extends AbstractDaemonCluster {
-
-  static {
-    Configuration.addDefaultResource("hdfs-site.xml");
-  }
-
-  private static final Log LOG = LogFactory.getLog(HDFSCluster.class);
-  public static final String CLUSTER_PROCESS_MGR_IMPL =
-    "test.system.hdfs.clusterprocess.impl.class";
-
-  private HDFSCluster(Configuration conf, ClusterProcessManager rCluster)
-    throws IOException {
-    super(conf, rCluster);
-  }
-
-  /**
-   * Key is used to to point to the file containing hostnames of tasktrackers
-   */
-  public static final String CONF_HADOOP_DN_HOSTFILE_NAME =
-    "test.system.hdrc.dn.hostfile";
-
-  private static List<HadoopDaemonInfo> hdfsDaemonInfos;
-
-  private static String nnHostName;
-  private static String DN_hostFileName;
-
-  protected enum Role {NN, DN}
-
-  @Override
-  protected AbstractDaemonClient
-    createClient(RemoteProcess process) throws IOException {
-    Enum<?> pRole = process.getRole();
-    if (Role.NN.equals(pRole)) {
-      return createNNClient(process);
-    } else if (Role.DN.equals(pRole)) {
-      return createDNClient(process);
-    } else throw new IOException("Role " + pRole +
-      " is not supported by HDFSCluster");
-  }
-
-  protected DNClient createDNClient(RemoteProcess dnDaemon) throws IOException {
-    return new DNClient(getConf(), dnDaemon);
-  }
-
-  protected NNClient createNNClient(RemoteProcess nnDaemon) throws IOException {
-    return new NNClient(getConf(), nnDaemon);
-  }
-
-  public NNClient getNNClient () {
-    Iterator<AbstractDaemonClient> iter = getDaemons().get(Role.NN).iterator();
-    return (NNClient) iter.next();
-  }
-
-  public List<DNClient> getDNClients () {
-    return (List) getDaemons().get(Role.DN);
-  }
-
-  public DNClient getDNClient (String hostname) {
-    for (DNClient dnC : getDNClients()) {
-      if (dnC.getHostName().equals(hostname))
-        return dnC;
-    }
-    return null;
-  }
-
-  public static class HDFSProcessManager extends HadoopDaemonRemoteCluster {
-    public HDFSProcessManager() {
-      super(hdfsDaemonInfos);
-    }
-  }
-
-  public static class MultiUserHDFSProcessManager
-      extends MultiUserHadoopDaemonRemoteCluster {
-    public MultiUserHDFSProcessManager() {
-      super(hdfsDaemonInfos);
-    }
-  }
-
-
-  public static HDFSCluster createCluster(Configuration conf) throws Exception {
-    conf.addResource("system-test.xml");
-    String sockAddrStr = FileSystem.getDefaultUri(conf).getAuthority();
-    if (sockAddrStr == null) {
-      throw new IllegalArgumentException("Namenode IPC address is not set");
-    }
-    String[] splits = sockAddrStr.split(":");
-    if (splits.length != 2) {
-      throw new IllegalArgumentException(
-          "Namenode report IPC is not correctly configured");
-    }
-    nnHostName = splits[0];
-    DN_hostFileName = conf.get(CONF_HADOOP_DN_HOSTFILE_NAME, "slaves");
-
-    hdfsDaemonInfos = new ArrayList<HadoopDaemonInfo>();
-    hdfsDaemonInfos.add(new HadoopDaemonInfo("namenode", 
-        Role.NN, Arrays.asList(new String[]{nnHostName})));
-    hdfsDaemonInfos.add(new HadoopDaemonInfo("datanode", 
-        Role.DN, DN_hostFileName));
-    
-    String implKlass = conf.get(CLUSTER_PROCESS_MGR_IMPL);
-    if (implKlass == null || implKlass.isEmpty()) {
-      implKlass = HDFSCluster.HDFSProcessManager.class.getName();
-    }
-    Class<ClusterProcessManager> klass =
-      (Class<ClusterProcessManager>) Class.forName(implKlass);
-    ClusterProcessManager clusterProcessMgr = klass.newInstance();
-    LOG.info("Created ClusterProcessManager as " + implKlass);
-    clusterProcessMgr.init(conf);
-    return new HDFSCluster(conf, clusterProcessMgr);
-  }
-}

+ 0 - 46
hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/HDFSDaemonClient.java

@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.test.system;
-
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.test.system.AbstractDaemonClient;
-import org.apache.hadoop.test.system.DaemonProtocol;
-import org.apache.hadoop.test.system.process.RemoteProcess;
-
-public abstract class HDFSDaemonClient<PROXY extends DaemonProtocol>
-  extends AbstractDaemonClient<PROXY> {
-
-  public HDFSDaemonClient(Configuration conf, RemoteProcess process)
-      throws IOException {
-    super(conf, process);
-  }
-
-  public String[] getHDFSDataDirs() throws IOException {
-    return getProxy().getDaemonConf().getStrings(
-        DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
-  }
-
-  public String getHDFSNameDirs() throws IOException {
-    return getProxy().getDaemonConf().getStrings(
-        DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY)[0];
-  }
-}

+ 0 - 88
hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/NNClient.java

@@ -1,88 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.test.system;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.test.system.process.RemoteProcess;
-
-public class NNClient extends HDFSDaemonClient<NNProtocol> {
-  
-  NNProtocol proxy;
-  private static final String HADOOP_NAMENODE_OPTS_ENV = "HADOOP_NAMENODE_OPTS";
-
-  public NNClient(Configuration conf, RemoteProcess process) throws IOException {
-    super(conf, process);
-  }
-
-  @Override
-  public void connect() throws IOException {
-    if (isConnected())
-      return;
-    String sockAddrStr = FileSystem.getDefaultUri(getConf()).getAuthority();
-    if (sockAddrStr == null) {
-      throw new IllegalArgumentException("Namenode IPC address is not set");
-    }
-    String[] splits = sockAddrStr.split(":");
-    if (splits.length != 2) {
-      throw new IllegalArgumentException(
-          "Namenode report IPC is not correctly configured");
-    }
-    String port = splits[1];
-    String sockAddr = getHostName() + ":" + port;
-
-    InetSocketAddress bindAddr = NetUtils.createSocketAddr(sockAddr);
-    proxy = (NNProtocol) RPC.getProxy(NNProtocol.class, NNProtocol.versionID,
-        bindAddr, getConf());
-    setConnected(true);
-  }
-
-  @Override
-  public void disconnect() throws IOException {
-    RPC.stopProxy(proxy);
-    setConnected(false);
-  }
-
-  @Override
-  protected NNProtocol getProxy() {
-    return proxy;
-  }
-
-  @Override
-  public String getHadoopOptsEnvName() {
-    return HADOOP_NAMENODE_OPTS_ENV;
-  }
-
-  /**
-   * Concrete implementation of abstract super class method
-   * @param attributeName name of the attribute to be retrieved
-   * @return Object value of the given attribute
-   * @throws IOException is thrown in case of communication errors
-   */
-  @Override
-  public Object getDaemonAttribute (String attributeName) throws IOException {
-    return getJmxAttribute("NameNode", "NameNodeInfo", attributeName);
-  }
-}

+ 0 - 36
hadoop-hdfs-project/hadoop-hdfs/src/test/system/java/org/apache/hadoop/hdfs/test/system/NNProtocol.java

@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.test.system;
-
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.security.KerberosInfo;
-import org.apache.hadoop.test.system.DaemonProtocol;
-
-/**
- * Client side API exposed from Namenode.
- * Actual implementations are likely to be injected
- *
- * The protocol has to be annotated so KerberosInfo can be filled in during
- * creation of a ipc.Client connection
- */
-@KerberosInfo(
-    serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
-public interface NNProtocol extends DaemonProtocol {
-  public static final long versionID = 1L;
-}

+ 0 - 86
hadoop-hdfs-project/hadoop-hdfs/src/test/system/test/org/apache/hadoop/hdfs/TestHL040.java

@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Map;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.test.system.DNClient;
-import org.apache.hadoop.hdfs.test.system.HDFSCluster;
-import org.apache.hadoop.hdfs.test.system.NNClient;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mortbay.util.ajax.JSON;
-
-public class TestHL040 {
-  private HDFSCluster cluster = null;
-  private static final Log LOG = LogFactory.getLog(TestHL040.class);
-
-  public TestHL040() throws Exception {
-  }
-
-  @Before
-  public void setupUp() throws Exception {
-    cluster = HDFSCluster.createCluster(new Configuration());
-    cluster.setUp();
-  }
-
-  @After
-  public void tearDown() throws Exception {
-    cluster.tearDown();
-  }
-
-  @Test
-  public void testConnect() throws IOException {
-    LOG.info("Staring TestHL040: connecting to the HDFSCluster ");
-    LOG.info("================ Getting namenode info ================");
-    NNClient dfsMaster = cluster.getNNClient();
-    LOG.info("Process info of namenode " + dfsMaster.getHostName() + " is: " +
-        dfsMaster.getProcessInfo());
-    LOG.info("================ Getting datanode info ================");
-    Collection<DNClient> clients = cluster.getDNClients();
-    for (DNClient dnC : clients) {
-      LOG.info("Process info of datanode " + dnC.getHostName() + " is: " +
-          dnC.getProcessInfo());
-      Assert.assertNotNull("Datanode process info isn't suppose to be null",
-          dnC.getProcessInfo());
-      LOG.info("Free space " + getFreeSpace(dnC));
-    }
-  }
-
-  private long getFreeSpace(DNClient dnC) throws IOException {
-    Object volObj = dnC.getDaemonAttribute("VolumeInfo");
-    Assert.assertNotNull("Attribute value is expected to be not null", volObj);
-    LOG.debug("Got object: " + volObj);
-    Map volInfoMap = (Map) JSON.parse(volObj.toString());
-    long totalFreeSpace = 0L;
-    for (Object key : volInfoMap.keySet()) {
-      Map attrMap = (Map) volInfoMap.get(key);
-      long freeSpace = (Long) attrMap.get("freeSpace");
-      totalFreeSpace += freeSpace;
-    }
-    return totalFreeSpace;
-  }
-}

+ 0 - 231
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/GridmixSystemTestCase.java

@@ -1,231 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.test.system.MRCluster;
-import org.apache.hadoop.mapreduce.test.system.JTProtocol;
-import org.apache.hadoop.mapreduce.test.system.JTClient;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.gridmix.test.system.GridmixJobSubmission;
-import org.apache.hadoop.mapred.gridmix.test.system.GridmixJobVerification;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapred.gridmix.test.system.GridmixJobStory;
-import org.apache.hadoop.tools.rumen.ZombieJob;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.mapreduce.JobID;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-
-import java.util.Iterator;
-import java.util.Map;
-import java.util.List;
-import java.util.Set;
-import java.io.IOException;
-import org.junit.Assert;
-
-/**
- * Run and verify the Gridmix jobs for given a trace.
- */
-public class GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog(GridmixSystemTestCase.class);
-  protected static Configuration  conf = new Configuration();
-  protected static MRCluster cluster;
-  protected static int cSize;
-  protected static JTClient jtClient;
-  protected static JTProtocol rtClient;
-  protected static Path gridmixDir;
-  protected static Map<String, String> map;
-  protected static GridmixJobSubmission gridmixJS;
-  protected static GridmixJobVerification gridmixJV;
-  protected static List<JobID> jobids;
-  
-  @BeforeClass
-  public static void before() throws Exception {
-    String [] excludeExpList = {"java.net.ConnectException", 
-                                "java.io.IOException"};
-    cluster = MRCluster.createCluster(conf);
-    cluster.setExcludeExpList(excludeExpList);
-    cluster.setUp();
-    cSize = cluster.getTTClients().size();
-    jtClient = cluster.getJTClient();
-    rtClient = jtClient.getProxy();
-    gridmixDir = new Path("herriot-gridmix");
-    UtilsForGridmix.createDirs(gridmixDir, rtClient.getDaemonConf());
-    map = UtilsForGridmix.getMRTraces(rtClient.getDaemonConf());
-  }
-
-  @AfterClass
-  public static void after() throws Exception {
-    UtilsForGridmix.cleanup(gridmixDir, rtClient.getDaemonConf());
-    org.apache.hadoop.fs.FileUtil.fullyDelete(new java.io.File(System.
-        getProperty("java.io.tmpdir") + "/gridmix-st/"));
-    cluster.tearDown();
-
-    /* Clean up the proxy user directories if gridmix run with 
-      RoundRobinUserResovler mode.*/
-    if (gridmixJV != null 
-       && gridmixJV.getJobUserResolver().contains("RoundRobin")) {
-       List<String> proxyUsers = 
-           UtilsForGridmix.listProxyUsers(gridmixJS.getJobConf(),
-           UserGroupInformation.getLoginUser().getShortUserName());
-       for(int index = 0; index < proxyUsers.size(); index++){
-         UtilsForGridmix.cleanup(new Path("hdfs:///user/" + 
-            proxyUsers.get(index)), 
-            rtClient.getDaemonConf());
-       }
-    }
-  }
-  
-  /**
-   * Run the gridmix with specified runtime parameters and 
-   * verify the jobs the after completion of execution.
-   * @param runtimeValues - common runtime arguments for gridmix.
-   * @param otherValues - test specific runtime arguments for gridmix.
-   * @param tracePath - path of a trace file.
-   * @throws Exception - if an exception occurs.
-   */
-  public static void runGridmixAndVerify(String[] runtimeValues, 
-     String [] otherValues, String tracePath) throws Exception {
-     runGridmixAndVerify(runtimeValues, otherValues, tracePath , 
-         GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Run the gridmix with specified runtime parameters and
-   * verify the jobs the after completion of execution.
-   * @param runtimeValues - common runtime arguments for gridmix.
-   * @param otherValues - test specific runtime arguments for gridmix.
-   * @param tracePath - path of a trace file.
-   * @param mode - 1 for data generation, 2 for run the gridmix and 3 for
-   * data generation and run the gridmix.
-   * @throws Exception - if an exception occurs.
-   */
-  public static void runGridmixAndVerify(String [] runtimeValues, 
-      String [] otherValues, String tracePath, int mode) throws Exception {
-    List<JobID> jobids = runGridmix(runtimeValues, otherValues, mode);
-    gridmixJV = new GridmixJobVerification(new Path(tracePath), 
-                                           gridmixJS.getJobConf(), jtClient);
-    gridmixJV.verifyGridmixJobsWithJobStories(jobids);  
-  }
-
-  /**
-   * Run the gridmix with user specified mode.
-   * @param runtimeValues - common runtime parameters for gridmix.
-   * @param otherValues - test specifix runtime parameters for gridmix.
-   * @param mode -  1 for data generation, 2 for run the gridmix and 3 for
-   * data generation and run the gridmix.
-   * @return - list of gridmix job ids.
-   * @throws Exception - if an exception occurs.
-   */
-  public static List<JobID> runGridmix(String[] runtimeValues, 
-     String[] otherValues, int mode) throws Exception {
-    gridmixJS = new GridmixJobSubmission(rtClient.getDaemonConf(),
-       jtClient, gridmixDir);
-    gridmixJS.submitJobs(runtimeValues, otherValues, mode);
-    List<JobID> jobids = 
-        UtilsForGridmix.listGridmixJobIDs(jtClient.getClient(), 
-                                          gridmixJS.getGridmixJobCount());
-    return jobids;
-  }
-  
-  /**
-   * get the trace file based on given regular expression.
-   * @param regExp - trace file file pattern. 
-   * @return - trace file as string.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public static String getTraceFile(String regExp) throws IOException {
-    List<String> listTraces = UtilsForGridmix.listMRTraces(
-        rtClient.getDaemonConf());
-    Iterator<String> ite = listTraces.iterator();
-    while(ite.hasNext()) {
-      String traceFile = ite.next();
-      if (traceFile.indexOf(regExp)>=0) {
-        return traceFile;
-      }
-    }
-    return null;
-  }
-
-  /**
-   * Validate the task memory parameters.
-   * @param tracePath - trace file.
-   * @param isTraceHasHighRamJobs - true if trace has high ram job(s) 
-   *                                otherwise its false 
-   */
-  @SuppressWarnings("deprecation")
-  public static void validateTaskMemoryParamters(String tracePath,
-      boolean isTraceHasHighRamJobs) throws IOException {
-    if (isTraceHasHighRamJobs) {
-      GridmixJobStory gjs = new GridmixJobStory(new Path(tracePath),
-                                                rtClient.getDaemonConf());
-      Set<JobID> jobids = gjs.getZombieJobs().keySet();
-      boolean isHighRamFlag = false;
-      for (JobID jobid :jobids) {
-        ZombieJob zombieJob = gjs.getZombieJobs().get(jobid);
-        JobConf origJobConf = zombieJob.getJobConf();
-        int origMapFactor =
-            GridmixJobVerification.getMapFactor(origJobConf);
-        int origReduceFactor =
-            GridmixJobVerification.getReduceFactor(origJobConf);
-        if (origMapFactor >= 2 || origReduceFactor >= 2) {
-          isHighRamFlag = true;
-          long TaskMapMemInMB =
-              GridmixJobVerification.getScaledTaskMemInMB(
-                      GridMixConfig.JOB_MAP_MEMORY_MB,
-                      GridMixConfig.CLUSTER_MAP_MEMORY,
-                      origJobConf, rtClient.getDaemonConf());
-
-          long TaskReduceMemInMB =
-              GridmixJobVerification.getScaledTaskMemInMB(
-                      GridMixConfig.JOB_REDUCE_MEMORY_MB,
-                      GridMixConfig.CLUSTER_REDUCE_MEMORY,
-                      origJobConf, rtClient.getDaemonConf());
-          long taskMapLimitInMB =
-              conf.getLong(GridMixConfig.CLUSTER_MAX_MAP_MEMORY,
-                           JobConf.DISABLED_MEMORY_LIMIT);
-
-          long taskReduceLimitInMB =
-              conf.getLong(GridMixConfig.CLUSTER_MAX_REDUCE_MEMORY,
-                           JobConf.DISABLED_MEMORY_LIMIT);
-
-          GridmixJobVerification.verifyMemoryLimits(TaskMapMemInMB,
-                                                    taskMapLimitInMB);
-          GridmixJobVerification.verifyMemoryLimits(TaskReduceMemInMB,
-                                                    taskReduceLimitInMB);
-        }
-      }
-      Assert.assertTrue("Trace doesn't have atleast one high ram job.",
-                        isHighRamFlag);
-    }
-  }
-
-  public static boolean isLocalDistCache(String fileName, String userName, 
-                                         boolean visibility) {
-    return DistributedCacheEmulator.isLocalDistCacheFile(fileName, 
-                                                         userName, visibility);
-  }
-}
-

+ 0 - 108
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsAndReducesWithCustomInterval.java

@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Test cpu emulation with default interval for gridmix jobs 
- * against different input data, submission policies and user resolvers.
- * Verify the cpu resource metrics of both maps and reduces phase of
- * Gridmix jobs with their corresponding original job in the input trace.
- */
-public class TestCPUEmulationForMapsAndReducesWithCustomInterval 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-          LogFactory.getLog("TestCPUEmulationWithUncompressedInput.class");
-  int execMode = GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue();
-
- /**
-   * Generate compressed input and run {@link Gridmix} by turning on the 
-   * cpu emulation feature with default setting. The {@link Gridmix} 
-   * should use the following runtime parameters.
-   * Submission Policy : STRESS, UserResovler: RoundRobinUserResolver. 
-   * Once the {@link Gridmix} run is complete, verify cpu resource metrics of 
-   * {@link Gridmix} jobs with their corresponding original job in a trace.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void  testCPUEmulationForMapsAndReducesWithCompressedInputCase7() 
-      throws Exception {
-    final long inputSizeInMB = 1024 * 7;
-    String tracePath = getTraceFile("cpu_emul_case2");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-            { "LOADJOB",
-              RoundRobinUserResolver.class.getName(),
-              "STRESS",
-              inputSizeInMB + "m",
-              "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-              tracePath};
-
-    String [] otherArgs = { 
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-            "-D", GridMixConfig.GRIDMIX_CPU_CUSTOM_INTERVAL + "=0.35F",
-            "-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" + 
-                  GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode); 
-  }
-  
-  /**
-   * Generate uncompressed input and run {@link Gridmix} by turning on the 
-   * cpu emulation feature with default setting. The {@link Gridmix} 
-   * should use the following runtime parameters.
-   * Submission Policy : SERIAL, UserResovler: SubmitterUserResolver 
-   * Once the {@link Gridmix} run is complete, verify cpu resource metrics of 
-   * {@link Gridmix} jobs with their corresponding original job in a trace.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void  testCPUEmulatonForMapsAndReducesWithUncompressedInputCase8() 
-      throws Exception {
-    final long inputSizeInMB = cSize * 300;
-    String tracePath = getTraceFile("cpu_emul_case2");
-    Assert.assertNotNull("Trace file not found.", tracePath);
-    String [] runtimeValues = 
-              { "LOADJOB", 
-                SubmitterUserResolver.class.getName(), 
-                "SERIAL", 
-                inputSizeInMB + "m",
-                tracePath};
-
-    String [] otherArgs = {
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-            "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-            "-D", GridMixConfig.GRIDMIX_CPU_CUSTOM_INTERVAL + "=0.4F",
-            "-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" + 
-                  GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN     };
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode); 
-  }
-}
-
-

+ 0 - 105
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsAndReducesWithDefaultInterval.java

@@ -1,105 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.JobContext;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Test cpu emulation with default interval for gridmix jobs 
- * against different input data, submission policies and user resolvers.
- * Verify the cpu resource metrics for both maps and reduces of
- * Gridmix jobs with their corresponding original job in the input trace.
- */
-public class TestCPUEmulationForMapsAndReducesWithDefaultInterval 
-                                            extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-          LogFactory.getLog(
-              "TestCPUEmulationForMapsAndReducesWithDefaultInterval.class");
-  int execMode = GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue();
-
- /**
-   * Generate compressed input and run {@link Gridmix} by turning on the 
-   * cpu emulation feature with default setting. The {@link Gridmix} 
-   * should use the following runtime parameters.
-   * Submission Policy : REPLAY, UserResovler: RoundRobinUserResolver. 
-   * Once the {@link Gridmix} run is complete, verify cpu resource metrics of 
-   * {@link Gridmix} jobs with their corresponding original jobs in the trace.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void  testCPUEmulationForMapsAndReducesWithCompressedInputCase5() 
-      throws Exception {
-    final long inputSizeInMB = 7168;
-    String tracePath = getTraceFile("cpu_emul_case2");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-            { "LOADJOB",
-              RoundRobinUserResolver.class.getName(),
-              "REPLAY",
-              inputSizeInMB + "m",
-              "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-              tracePath};
-
-    String [] otherArgs = { 
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", JobContext.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-            "-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" + 
-                  GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode); 
-  }
-  
-  /**
-   * Generate uncompressed input and run {@link Gridmix} by turning on the 
-   * cpu emulation feature with default settings. The {@link Gridmix} 
-   * should use the following runtime parameters.
-   * Submission Policy : STRESS, UserResovler: SubmitterUserResolver 
-   * Once the Gridmix run is complete, verify cpu resource metrics of 
-   * {@link Gridmix} jobs with their corresponding original jobs in the trace.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void  testCPUEmulatonForMapsAndReducesWithUncompressedInputCase6() 
-      throws Exception {
-    final long inputSizeInMB = cSize * 400;
-    String tracePath = getTraceFile("cpu_emul_case2");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-              { "LOADJOB", 
-                SubmitterUserResolver.class.getName(), 
-                "STRESS",
-                inputSizeInMB + "m",
-                tracePath};
-
-    String [] otherArgs = {
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-            "-D", JobContext.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-            "-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" + 
-                  GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN     };
-    
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode); 
-  }
-}

+ 0 - 105
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsWithCustomInterval.java

@@ -1,105 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Test the {@link Gridmix} cpu emulation with custom interval for 
- * gridmix jobs against different input data, submission policies and 
- * user resolvers. Verify the map phase cpu metrics of gridmix jobs 
- * against their original job in the trace. 
- */
-public class TestCPUEmulationForMapsWithCustomInterval 
-                                            extends GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog("TestCPUEmulationForMapsWithCustomInterval.class");
-  int execMode = GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue();
-
-  /**
-   * Generate compressed input and run {@link Gridmix} by turning on 
-   * cpu emulation feature with custom setting. The {@link Gridmix} should 
-   * use the following runtime parameters while running gridmix jobs.
-   * Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver
-   * Once {@link Gridmix} run is complete, verify maps phase cpu resource 
-   * metrics of {@link Gridmix} jobs with their corresponding original
-   * in the trace.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void  testCPUEmulatonForMapsWithCompressedInputCase3() 
-      throws Exception { 
-    final long inputSizeInMB = 1024 * 7;
-    String tracePath = getTraceFile("cpu_emul_case1");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = {"LOADJOB",
-                               SubmitterUserResolver.class.getName(),
-                               "STRESS",
-                               inputSizeInMB + "m",
-                               tracePath};
-
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" +
-              GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN,
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_CPU_CUSTOM_INTERVAL + "=0.25F"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode);
-  }
-
-  /**
-   * Generate uncompressed input and run {@link Gridmix} by turning on 
-   * cpu emulation feature with custom settings. The {@link Gridmix} 
-   * should use the following runtime paramters while running gridmix jobs.
-   * Submission Policy: REPLAY  User Resolver Mode: RoundRobinUserResolver
-   * Once {@link Gridmix} run is complete, verify the map phase cpu resource 
-   * metrics of {@link Gridmix} jobs with their corresponding jobs
-   * in the original trace.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testCPUEmulatonForMapsUnCompressedInputCase4() 
-      throws Exception { 
-    final long inputSizeInMB = cSize * 200;
-    String tracePath = getTraceFile("cpu_emul_case1");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-           {"LOADJOB",
-            RoundRobinUserResolver.class.getName(),
-            "REPLAY",
-            inputSizeInMB + "m",
-            "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-            tracePath};
-
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" + 
-              GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN,
-        "-D", GridMixConfig.GRIDMIX_CPU_CUSTOM_INTERVAL + "=0.35F"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode);
-  }
-}
-

+ 0 - 103
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCPUEmulationForMapsWithDefaultInterval.java

@@ -1,103 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Test the {@link Gridmix} cpu emulation with default settings for 
- * gridmix jobs against different input data, submission policies and 
- * user resolvers. Verify the map phase cpu metrics of gridmix jobs 
- * against their original jobs in the trace. 
- */
-public class TestCPUEmulationForMapsWithDefaultInterval 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog("TestCPUEmulationForMapsWithDefaultInterval.class");
-  int execMode = GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue();
-
-  /**
-   * Generate compressed input and run {@link Gridmix} by turning on cpu 
-   * emulation feature with default settings. The {@link Gridmix} should 
-   * use the following runtime parameters while running the gridmix jobs.
-   * Submission Policy: STRESS, UserResolver: SubmitterUserResolver. 
-   * Once the {@link Gridmix} run is complete, verify map phase cpu metrics of 
-   * {@link Gridmix} jobs with their corresponding original job in a trace.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testCPUEmulatonForMapsWithCompressedInputCase1() 
-      throws Exception {
-    final long inputSizeInMB = 1024 * 6;
-    String tracePath = getTraceFile("cpu_emul_case1");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = { "LOADJOB", 
-                                SubmitterUserResolver.class.getName(), 
-                                "STRESS", 
-                                inputSizeInMB + "m", 
-                                tracePath};
-
-    String [] otherArgs = { 
-            "-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" + 
-                  GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN,
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", GridMixConfig.GRIDMIX_HIGH_RAM_JOB_ENABLE + "=false"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, execMode); 
-  }
-  
-  /**
-   * Generate uncompressed input and run {@link Gridmix} by turning on 
-   * cpu emulation feature with default settings. The {@link Gridmix} 
-   * should use the following runtime parameters while running Gridmix jobs.
-   * Submission Policy: REPLAY, UserResolver: RoundRobinUserResolver
-   * Once the Gridmix run is complete, verify cpu resource metrics of 
-   * {@link Gridmix} jobs with their corresponding original job in a trace.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testCPUEmulatonForMapsWithUnCompressedInputCase2() 
-      throws Exception { 
-    final long inputSizeInMB = cSize * 200;
-    String tracePath = getTraceFile("cpu_emul_case1");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-            { "LOADJOB",
-              RoundRobinUserResolver.class.getName(),
-              "REPLAY",
-              inputSizeInMB + "m",
-              "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-              tracePath};
-
-    String [] otherArgs = { 
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-            "-D", GridMixConfig.GRIDMIX_HIGH_RAM_JOB_ENABLE + "=false",
-            "-D", GridMixConfig.GRIDMIX_CPU_EMULATON + "=" + 
-                  GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-           GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-}

+ 0 - 96
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationEnableForAllTypesOfJobs.java

@@ -1,96 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Verify the compression emulation for all the jobs in the trace 
- * irrespective of compressed inputs.
- */
-public class TestCompressionEmulationEnableForAllTypesOfJobs 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog(
-          "TestCompressionEmulationEnableForAllTypesOfJobs.class");
-
-  /**
-   *  Generate compressed input data and verify the compression emulation
-   *  for all the jobs in the trace irrespective of whether the original
-   *  job uses the compressed input or not.Also use the custom compression
-   *  ratios for map input, map output and reduce output.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testInputCompressionEmualtionEnableForAllJobsWithDefaultRatios() 
-      throws Exception { 
-    final long inputSizeInMB = 1024 * 6;
-    final String tracePath = getTraceFile("compression_case4_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    final String [] runtimeValues = {"LOADJOB",
-                                     SubmitterUserResolver.class.getName(),
-                                     "REPLAY",
-                                     inputSizeInMB + "m",
-                                     tracePath};
-
-    final String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_INPUT_DECOMPRESS_ENABLE + "=true",
-        "-D", GridMixConfig.GRIDMIX_INPUT_COMPRESS_RATIO + "=0.46",
-        "-D", GridMixConfig.GRIDMIX_INTERMEDIATE_COMPRESSION_RATIO + "=0.35",
-        "-D", GridMixConfig.GRIDMIX_OUTPUT_COMPRESSION_RATIO + "=0.36"
-    };
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   *  Use existing compressed input data and turn off the compression 
-   *  emulation. Verify the compression emulation whether it uses 
-   *  by the jobs or not.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testInputCompressionEmulationEnableForAllJobsWithCustomRatios() 
-      throws Exception { 
-     final String tracePath = getTraceFile("compression_case4_trace");
-     Assert.assertNotNull("Trace file has not found.", tracePath);
-     final String [] runtimeValues = {"LOADJOB",
-                                      SubmitterUserResolver.class.getName(),
-                                      "SERIAL",
-                                      tracePath};
-
-    final String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-                        GridMixRunMode.RUN_GRIDMIX.getValue());
-  }  
-}
-

+ 0 - 98
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationForCompressInAndUncompressOut.java

@@ -1,98 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.junit.Assert;
-import org.junit.Test;
-/**
- * Verify the gridmix jobs compression ratio's of input, 
- * intermediate input and with default/custom ratios.Also verify
- * the compressed output file format is enabled or not.
- *
- */
-public class TestCompressionEmulationForCompressInAndUncompressOut 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog(
-          "TestCompressionEmulationForCompressInAndUncompressOut.class");
-  final long inputSizeInMB = 1024 * 6;
-
-  /**
-   * Generate a compressed input data and verify the compression ratios 
-   * of map input and map output against default compression ratios 
-   * and also verify the whether the compressed output file output format 
-   * is enabled or not.
-   * @throws Exception -if an error occurs.
-   */
-  @Test
-  public void testCompressionEmulationOfCompressedInputWithDefaultRatios() 
-      throws Exception {
-    final String tracePath = getTraceFile("compression_case2_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    final String [] runtimeValues = {"LOADJOB",
-                                     SubmitterUserResolver.class.getName(),
-                                     "STRESS",
-                                     inputSizeInMB + "m",
-                                     tracePath};
-
-    final String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true"
-    };
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Use existing compressed input data and verify the compression ratios 
-   * of input and intermediate input against custom compression ratios 
-   * and also verify the compressed output file output format is enabled or not.
-   * @throws Exception -if an error occurs.
-   */
-  @Test
-  public void testCompressionEmulationOfCompressedInputWithCustomRatios() 
-      throws Exception {
-    final String tracePath = getTraceFile("compression_case2_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    UtilsForGridmix.cleanup(gridmixDir, rtClient.getDaemonConf());
-    final String [] runtimeValues = {"LOADJOB",
-                                     SubmitterUserResolver.class.getName(),
-                                     "STRESS",
-                                     inputSizeInMB + "m",
-                                     tracePath};
-
-    final String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true",
-        "-D", GridMixConfig.GRIDMIX_INPUT_DECOMPRESS_ENABLE + "=true",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_INPUT_COMPRESS_RATIO + "=0.58",
-        "-D", GridMixConfig.GRIDMIX_INTERMEDIATE_COMPRESSION_RATIO + "=0.42"
-    };
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-}
-

+ 0 - 93
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestCompressionEmulationForUncompressInAndCompressOut.java

@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.junit.Assert;
-import org.junit.Test;
-/**
- * Verify the gridmix jobs compression ratio's of reduce output and 
- * with default and custom ratios.
- */
-public class TestCompressionEmulationForUncompressInAndCompressOut
-   extends GridmixSystemTestCase { 
-   private static final Log LOG = 
-       LogFactory.getLog(
-           "TestCompressionEmulationForUncompressInAndCompressOut.class");
-   final long inputSizeInMB = 1024 * 6;
-
-  /**
-   * Generate a uncompressed input data and verify the compression ratios 
-   * of reduce output against default output compression ratio.
-   * @throws Exception -if an error occurs.
-   */
-  @Test
-  public void testCompressionEmulationOfCompressedOuputWithDefaultRatios() 
-      throws Exception { 
-    final String tracePath = getTraceFile("compression_case3_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    final String [] runtimeValues = 
-                     {"LOADJOB",
-                      RoundRobinUserResolver.class.getName(),
-                      "REPLAY",
-                      inputSizeInMB + "m",
-                      "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                      tracePath};
-
-    final String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true"
-    };
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Use existing uncompressed input data and verify the compression ratio 
-   * of reduce output against custom output compression ratio and also verify 
-   * the compression output file output format.
-   * @throws Exception -if an error occurs.
-   */
-  @Test
-  public void testCompressionEmulationOfCompressedOutputWithCustomRatios() 
-      throws Exception {
-    final String tracePath = getTraceFile("compression_case3_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    UtilsForGridmix.cleanup(gridmixDir, rtClient.getDaemonConf());
-    final String [] runtimeValues = { "LOADJOB",
-                                      SubmitterUserResolver.class.getName(),
-                                      "STRESS",
-                                      inputSizeInMB + "m",
-                                      tracePath };
-
-    final String [] otherArgs = { 
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_OUTPUT_COMPRESSION_RATIO + "=0.38"
-    };
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-}

+ 0 - 65
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestDisableGridmixEmulationOfHighRam.java

@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.GridmixJob;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Run the {@link Gridmix} with a high ram jobs trace by disabling the 
- * emulation of high ram  and verify each {@link Gridmix} job 
- * whether it honors the high ram or not. In disable mode it should 
- * should not honor the high ram and run it as a normal job.
- */
-public class TestDisableGridmixEmulationOfHighRam 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog("TestDisableGridmixEmulationOfHighRam.class");
-
-  /**
-   * Generate input data and run {@link Gridmix} with a high ram jobs trace 
-   * as a load job and STRESS submission policy in a SubmitterUserResolver 
-   * mode. Verify each {@link Gridmix} job whether it honors the 
-   * high ram or not after completion of execution. In disable mode the
-   * jobs should not honor the high ram. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testEmulationOfHighRamForReducersOfMRJobs() 
-      throws Exception { 
-    final long inputSizeInMB = cSize * 250;
-    String tracePath = getTraceFile("highram_mr_jobs_case3");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    String [] runtimeValues = {"LOADJOB",
-                               SubmitterUserResolver.class.getName(),
-                               "SERIAL",
-                               inputSizeInMB + "m",
-                               tracePath};
-
-    String [] otherArgs = {
-               "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", 
-               "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", 
-               "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
-  }
-}

+ 0 - 95
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSAndLocalFSDCFiles.java

@@ -1,95 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Verify the emulation of HDFS and Local FS distributed cache files against
- * the given input trace file.
- */
-public class TestEmulationOfHDFSAndLocalFSDCFiles extends 
-    GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog("TestEmulationOfLocalFSDCFiles.class");
-
-  /**
-   * Generate the input data and distributed cache files for HDFS and 
-   * local FS. Verify the gridmix emulation of HDFS and Local FS 
-   * distributed cache files in RoundRobinUserResolver mode with STRESS
-   * submission policy.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGenerateDataEmulateHDFSAndLocalFSDCFiles() 
-     throws Exception  {
-    final long inputSizeInMB = 1024 * 6;
-    final String tracePath = getTraceFile("distcache_case8_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    final String [] runtimeValues = 
-                     {"LOADJOB",
-                      RoundRobinUserResolver.class.getName(),
-                      "STRESS",
-                      inputSizeInMB + "m",
-                      "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                      tracePath};
-
-    final String [] otherArgs = {
-       "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-       "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true",
-       "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-       "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Use existing input and distributed cache files for HDFS and
-   * local FS. Verify the gridmix emulation of HDFS and Local FS
-   * distributed cache files in SubmitterUserResolver mode with REPLAY
-   * submission policy.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testEmulationOfHDFSAndLocalFSDCFiles() 
-     throws Exception  {
-    final String tracePath = getTraceFile("distcache_case8_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    final String [] runtimeValues ={"LOADJOB",
-                                    SubmitterUserResolver.class.getName(),
-                                    "STRESS",
-                                    tracePath};
-
-    final String [] otherArgs = { 
-       "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-       "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true",
-       "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-       "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-                        GridMixRunMode.RUN_GRIDMIX.getValue());
-  }
-}

+ 0 - 91
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSDCFileUsesMultipleJobs.java

@@ -1,91 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Verify the Gridmix emulation of HDFS distributed cache file which uses 
- * different jobs that are submitted with different users.
- */
-public class TestEmulationOfHDFSDCFileUsesMultipleJobs extends 
-    GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog("TestEmulationOfHDFSDCFileUsesMultipleJobs.class");
-
-  /**
-   * Generate the input data and HDFS distributed cache file based 
-   * on given input trace. Verify the Gridmix emulation of HDFS
-   * distributed cache file in RoundRobinResolver mode with 
-   * STRESS submission policy.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGenerateAndEmulationOfHDFSDCFile() 
-     throws Exception { 
-    final long inputSizeInMB = 1024 * 6;
-    final String tracePath = getTraceFile("distcache_case9_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    final String [] runtimeValues = 
-                     {"LOADJOB",
-                      RoundRobinUserResolver.class.getName(),
-                      "STRESS",
-                      inputSizeInMB + "m",
-                      "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                      tracePath};
-
-    final String [] otherArgs = {
-        "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Verify the Gridmix emulation of HDFS distributed cache
-   * file in SubmitterUserResolver mode with STRESS submission policy 
-   * by using the existing input data and HDFS distributed cache file. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixEmulationOfHDFSPublicDCFile() 
-      throws Exception {
-    final String tracePath = getTraceFile("distcache_case9_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    final String [] runtimeValues = {"LOADJOB",
-                                     SubmitterUserResolver.class.getName(),
-                                     "STRESS",
-                                     tracePath};
-
-    final String [] otherArgs = {
-      "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-      "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-                        GridMixRunMode.RUN_GRIDMIX.getValue());
-  }
-}

+ 0 - 92
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHDFSDCFilesWithDifferentVisibilities.java

@@ -1,92 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- *  Verify the Gridmix emulation of HDFS distributed cache files of 
- *  different visibilities. 
- */
-
-public class TestEmulationOfHDFSDCFilesWithDifferentVisibilities 
-    extends GridmixSystemTestCase {
-  private static final Log LOG = 
-     LogFactory.getLog(
-         "TestEmulationOfHDFSDCFilesWithDifferentVisibilities.class");
-  
-  /**
-   * Generate input data and HDFS distributed cache files of different
-   * visibilities based on given input trace. Verify the Gridmix emulation 
-   * of HDFS distributed cache files of different visibilities in 
-   * RoundRobinUserResolver mode with SERIAL submission policy.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGenerateAndEmulateOfHDFSDCFilesWithDiffVisibilities() 
-     throws Exception {
-    final long INPUT_SIZE = 1024 * 9;
-    final String tracePath = getTraceFile("distcache_case5_trace");
-    Assert.assertNotNull("Trace file was not found.", tracePath);
-    final String [] runtimeValues = 
-                     { "LOADJOB",
-                       RoundRobinUserResolver.class.getName(),
-                       "STRESS",
-                       INPUT_SIZE+"m",
-                       "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                       tracePath};
-
-    final String [] otherArgs = { 
-        "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE +  "=true"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Disable the distributed cache emulation and verify the Gridmix jobs
-   * whether it emulates or not. 
-   * @throws Exception
-   */
-  @Test
-  public void testHDFSDCFilesWithoutEnableDCEmulation() 
-     throws Exception {
-    final String tracePath = getTraceFile("distcache_case6_trace");
-    Assert.assertNotNull("Trace file was not found.", tracePath);
-    final String [] runtimeValues ={ "LOADJOB",
-                                     SubmitterUserResolver.class.getName(),
-                                     "REPLAY",
-                                     tracePath};
-    final String [] otherArgs = {
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-                        GridMixRunMode.RUN_GRIDMIX.getValue());
-  }
-}
-

+ 0 - 64
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfHighRamAndNormalMRJobs.java

@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Run the {@link Gridmix} with combination of high ram and normal jobs of
- * trace and verify whether high ram jobs{@link Gridmix} are honoring or not.
- * Normal MR jobs should not honors the high ram emulation.
- */
-public class TestEmulationOfHighRamAndNormalMRJobs
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog("TestEmulationOfHighRamAndNormalMRJobs.class");
-
-  /**
-   * Generate input data and run the combination normal and high ram 
-   * {@link Gridmix} jobs as load job and STRESS submission policy 
-   * in a SubmitterUserResolver mode. Verify whether each {@link Gridmix} 
-   * job honors the high ram or not after completion of execution. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testEmulationOfHighRamForReducersOfMRJobs() 
-      throws Exception { 
-    final long inputSizeInMB = cSize * 250;
-    String tracePath = getTraceFile("highram_mr_jobs_case4");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    String [] runtimeArgs = {"LOADJOB",
-                               SubmitterUserResolver.class.getName(),
-                               "SERIAL",
-                               inputSizeInMB + "m",
-                               tracePath};
-    String [] otherArgs = {
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", 
-            "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", 
-            "-D", GridMixConfig.GRIDMIX_HIGH_RAM_JOB_ENABLE + "=true"};
-
-    validateTaskMemoryParamters(tracePath, true);
-    runGridmixAndVerify(runtimeArgs, otherArgs, tracePath);
-  }
-}

+ 0 - 93
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestEmulationOfLocalFSDCFiles.java

@@ -1,93 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Verify the emulation of local FS distributed cache files.
- *
- */
-public class TestEmulationOfLocalFSDCFiles extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog("TestEmulationOfLocalFSDCFiles.class");
-
-  /**
-   * Generate the input data and distributer cache files.Verify the 
-   * gridmix emulation of local file system distributed cache files 
-   * in RoundRobinUserResolver mode with REPLAY submission policy.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGenerateInputAndEmulateLocalFSDCFile() 
-     throws Exception { 
-    final long inputSizeInMB = 1024 * 6;
-    final String tracePath = getTraceFile("distcache_case7_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    final String [] runtimeValues = 
-                     {"LOADJOB",
-                      RoundRobinUserResolver.class.getName(),
-                      "REPLAY",
-                      inputSizeInMB + "m",
-                      "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                      tracePath};
-
-    final String [] otherArgs = {
-       "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-       "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true",
-       "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-       "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Use existing input and local distributed cache files and  verify 
-   * the gridmix emulation of local file system distributed cache 
-   * files in SubmitterUserResolver mode with STRESS
-   * Submission policy.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testEmulationOfLocalFSDCFile() 
-     throws Exception  {
-    final String tracePath = getTraceFile("distcache_case7_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    final String [] runtimeValues = {"LOADJOB",
-                                     SubmitterUserResolver.class.getName(),
-                                     "STRESS",
-                                     tracePath};
-
-    final String [] otherArgs = {
-       "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-      "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true",
-      "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-      "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-                        GridMixRunMode.RUN_GRIDMIX.getValue());
-  }
-}

+ 0 - 229
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridMixDataGeneration.java

@@ -1,229 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapreduce.test.system.MRCluster;
-import org.apache.hadoop.mapreduce.test.system.JTProtocol;
-import org.apache.hadoop.mapreduce.test.system.JTClient;
-import org.apache.hadoop.mapred.JobClient;
-import org.apache.hadoop.mapred.JobStatus;
-import org.apache.hadoop.mapred.gridmix.RoundRobinUserResolver;
-import org.apache.hadoop.mapred.gridmix.EchoUserResolver;
-import org.apache.hadoop.mapred.gridmix.SubmitterUserResolver;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.ContentSummary;
-import org.junit.BeforeClass;
-import org.junit.AfterClass;
-import org.junit.Test;
-import org.junit.Assert;
-import java.io.IOException;
-
-/**
- * Verify the Gridmix data generation with various submission policies and 
- * user resolver modes.
- */
-public class TestGridMixDataGeneration {
-  private static final Log LOG = 
-      LogFactory.getLog(TestGridMixDataGeneration.class);
-  private static Configuration conf = new Configuration();
-  private static MRCluster cluster;
-  private static JTClient jtClient;
-  private static JTProtocol rtClient;
-  private static Path gridmixDir;
-  private static int cSize;
-
-  @BeforeClass
-  public static void before() throws Exception {
-    String [] excludeExpList = {"java.net.ConnectException", 
-                                "java.io.IOException"};
-    cluster = MRCluster.createCluster(conf);
-    cluster.setExcludeExpList(excludeExpList);
-    cluster.setUp();
-    cSize = cluster.getTTClients().size();
-    jtClient = cluster.getJTClient();
-    rtClient = jtClient.getProxy();
-    gridmixDir = new Path("herriot-gridmix");
-    UtilsForGridmix.createDirs(gridmixDir, rtClient.getDaemonConf());
-  }
-
-  @AfterClass
-  public static void after() throws Exception {
-    UtilsForGridmix.cleanup(gridmixDir,conf);
-    cluster.tearDown();
-  }
-  
-  /**
-   * Generate the data in a STRESS submission policy with SubmitterUserResolver 
-   * mode and verify whether the generated data matches with given 
-   * input size or not.
-   * @throws IOException
-   */
-  @Test
-  public void testGenerateDataWithSTRESSSubmission() throws Exception {
-    conf = rtClient.getDaemonConf();
-    final long inputSizeInMB = cSize * 128;
-    String [] runtimeValues = {"LOADJOB",
-                               SubmitterUserResolver.class.getName(),
-                               "STRESS",
-                               inputSizeInMB + "m",
-                               "file:///dev/null"};
-
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", 
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-    int exitCode = 
-        UtilsForGridmix.runGridmixJob(gridmixDir, conf, 
-            GridMixRunMode.DATA_GENERATION.getValue(), 
-            runtimeValues, otherArgs);
-    Assert.assertEquals("Data generation has failed.", 0 , exitCode);
-    checkGeneratedDataAndJobStatus(inputSizeInMB);
-  }
-  
-  /**
-   * Generate the data in a REPLAY submission policy with RoundRobinUserResolver
-   * mode and verify whether the generated data matches with the given 
-   * input size or not.
-   * @throws Exception
-   */
-  @Test
-  public void testGenerateDataWithREPLAYSubmission() throws Exception {
-    conf = rtClient.getDaemonConf();
-    final long inputSizeInMB = cSize * 300;
-    String [] runtimeValues = 
-               {"LOADJOB",
-                RoundRobinUserResolver.class.getName(),
-                "REPLAY",
-                inputSizeInMB +"m",
-                "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                "file:///dev/null"};
-    
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", 
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-
-    int exitCode = 
-        UtilsForGridmix.runGridmixJob(gridmixDir, conf, 
-            GridMixRunMode.DATA_GENERATION.getValue(), 
-            runtimeValues, otherArgs);
-    Assert.assertEquals("Data generation has failed.", 0 , exitCode);
-    checkGeneratedDataAndJobStatus(inputSizeInMB); 
-  }
-  
-  /**
-   * Generate the data in a SERIAL submission policy with EchoUserResolver
-   * mode and also set the no.of bytes per file in the data.Verify whether each 
-   * file size matches with given per file size or not and also 
-   * verify the overall size of generated data.
-   * @throws Exception
-   */
-  @Test
-  public void testGenerateDataWithSERIALSubmission() throws Exception {
-    conf = rtClient.getDaemonConf();
-    long perNodeSizeInMB = 500; // 500 mb per node data
-    final long inputSizeInMB = cSize * perNodeSizeInMB;
-    String [] runtimeValues ={"LOADJOB", 
-                              EchoUserResolver.class.getName(), 
-                              "SERIAL", 
-                              inputSizeInMB + "m", 
-                              "file:///dev/null"};
-    long bytesPerFile = 200  * 1024 * 1024; // 200 mb per file of data
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_BYTES_PER_FILE + "=" + bytesPerFile, 
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", 
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-    int exitCode = 
-        UtilsForGridmix.runGridmixJob(gridmixDir, conf, 
-            GridMixRunMode.DATA_GENERATION.getValue(), 
-            runtimeValues, otherArgs);
-    Assert.assertEquals("Data generation has failed.", 0 , exitCode);
-    LOG.info("Verify the eache file size in a generate data.");
-    verifyEachNodeSize(new Path(gridmixDir, "input"), perNodeSizeInMB);
-    verifyNumOfFilesGeneratedInEachNode(new Path(gridmixDir, "input"), 
-                                        perNodeSizeInMB, bytesPerFile);
-    checkGeneratedDataAndJobStatus(inputSizeInMB);
-  }
-  
-  private void checkGeneratedDataAndJobStatus(long inputSize) 
-      throws IOException {
-    LOG.info("Verify the generated data size.");
-    long dataSizeInMB = getDataSizeInMB(new Path(gridmixDir,"input"));
-    Assert.assertTrue("Generate data has not matched with given size",
-       dataSizeInMB + 0.1 > inputSize || dataSizeInMB - 0.1 < inputSize);
- 
-    JobClient jobClient = jtClient.getClient();
-    int len = jobClient.getAllJobs().length;
-    LOG.info("Verify the job status after completion of job.");
-    Assert.assertEquals("Job has not succeeded.", JobStatus.SUCCEEDED, 
-                        jobClient.getAllJobs()[len-1].getRunState());
-  }
-  
-  private void verifyEachNodeSize(Path inputDir, long dataSizePerNode) 
-      throws IOException {
-    FileSystem fs = inputDir.getFileSystem(conf);
-    FileStatus [] fstatus = fs.listStatus(inputDir);
-    for (FileStatus fstat : fstatus) {
-      if ( fstat.isDirectory()) {
-        long fileSize = getDataSizeInMB(fstat.getPath());
-        Assert.assertTrue("The Size has not matched with given "
-                         + "per node file size(" + dataSizePerNode +"MB)", 
-                         fileSize + 0.1 > dataSizePerNode 
-                         || fileSize - 0.1 < dataSizePerNode);
-      }
-    }    
-  }
-
-  private void verifyNumOfFilesGeneratedInEachNode(Path inputDir, 
-      long nodeSize, long fileSize) throws IOException {
-    long fileCount = nodeSize/fileSize;
-    long expFileCount = Math.round(fileCount);
-    expFileCount = expFileCount + ((nodeSize%fileSize != 0)? 1:0);
-    FileSystem fs = inputDir.getFileSystem(conf);
-    FileStatus [] fstatus = fs.listStatus(inputDir);
-    for (FileStatus fstat : fstatus) {
-      if ( fstat.isDirectory()) {
-        FileSystem nodeFs = fstat.getPath().getFileSystem(conf);
-        long actFileCount = nodeFs.getContentSummary(
-            fstat.getPath()).getFileCount();
-        Assert.assertEquals("File count has not matched.", expFileCount, 
-                            actFileCount);
-      }
-    }
-  }
-
-  private static long getDataSizeInMB(Path inputDir) throws IOException {
-    FileSystem fs = inputDir.getFileSystem(conf);
-    ContentSummary csmry = fs.getContentSummary(inputDir);
-    long dataSize = csmry.getLength();
-    dataSize = dataSize/(1024 * 1024);
-    return dataSize;
-  }
-}

+ 0 - 128
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridMixFilePool.java

@@ -1,128 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapreduce.test.system.MRCluster;
-import org.apache.hadoop.mapreduce.test.system.JTClient;
-import org.apache.hadoop.mapreduce.test.system.JTProtocol;
-import org.apache.hadoop.mapred.gridmix.FilePool;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileStatus;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.AfterClass;
-import org.junit.Test;
-import java.io.IOException;
-import java.util.ArrayList;
-
-public class TestGridMixFilePool {
-  private static final Log LOG = 
-      LogFactory.getLog(TestGridMixFilePool.class);
-  private static Configuration conf = new Configuration();
-  private static MRCluster cluster;
-  private static JTProtocol remoteClient;
-  private static JTClient jtClient;
-  private static Path gridmixDir;
-  private static int clusterSize; 
-  
-  @BeforeClass
-  public static void before() throws Exception {
-    String []  excludeExpList = {"java.net.ConnectException", 
-                                 "java.io.IOException"};
-    cluster = MRCluster.createCluster(conf);
-    cluster.setExcludeExpList(excludeExpList);
-    cluster.setUp();
-    jtClient = cluster.getJTClient();
-    remoteClient = jtClient.getProxy();
-    clusterSize = cluster.getTTClients().size();
-    gridmixDir = new Path("herriot-gridmix");
-    UtilsForGridmix.createDirs(gridmixDir, remoteClient.getDaemonConf());
-  }
-
-  @AfterClass
-  public static void after() throws Exception {
-    UtilsForGridmix.cleanup(gridmixDir, conf);
-    cluster.tearDown();
-  }
-  
-  @Test
-  public void testFilesCountAndSizesForSpecifiedFilePool() throws Exception {
-    conf = remoteClient.getDaemonConf();
-    final long inputSizeInMB = clusterSize * 200;
-    int [] fileSizesInMB = {50, 100, 400, 50, 300, 10, 60, 40, 20 ,10 , 500};
-    long targetSize = Long.MAX_VALUE;
-    final int expFileCount = clusterSize + 4;
-    String [] runtimeValues ={"LOADJOB",
-                              SubmitterUserResolver.class.getName(),
-                              "STRESS",
-                              inputSizeInMB + "m",
-                              "file:///dev/null"}; 
-
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-
-    // Generate the input data by using gridmix framework.
-    int exitCode = 
-        UtilsForGridmix.runGridmixJob(gridmixDir, conf, 
-            GridMixRunMode.DATA_GENERATION.getValue(), 
-            runtimeValues, otherArgs);
-    Assert.assertEquals("Data generation has failed.", 0 , exitCode);
-    // Create the files without using gridmix input generation with 
-    // above mentioned sizes in a array.
-    createFiles(new Path(gridmixDir, "input"), fileSizesInMB);
-    conf.setLong(FilePool.GRIDMIX_MIN_FILE, 100 * 1024 * 1024);
-    FilePool fpool = new FilePool(conf, new Path(gridmixDir, "input"));
-    fpool.refresh();
-    verifyFilesSizeAndCountForSpecifiedPool(expFileCount, targetSize, fpool);
-  }
-  
-  private void createFiles(Path inputDir, int [] fileSizes) 
-      throws Exception { 
-    for (int size : fileSizes) {
-      UtilsForGridmix.createFile(size, inputDir, conf);
-    }
-  }
-  
-  private void verifyFilesSizeAndCountForSpecifiedPool(int expFileCount, 
-      long minFileSize, FilePool pool) throws IOException {
-    final ArrayList<FileStatus> files = new ArrayList<FileStatus>();
-    long filesSizeInBytes = pool.getInputFiles(minFileSize, files);
-    long actFilesSizeInMB = filesSizeInBytes / (1024 * 1024);
-    long expFilesSizeInMB = (clusterSize * 200) + 1300;
-    Assert.assertEquals("Files Size has not matched for specified pool.", 
-                        expFilesSizeInMB, actFilesSizeInMB);
-    int actFileCount = files.size();
-    Assert.assertEquals("File count has not matched.", expFileCount, 
-                        actFileCount);
-    int count = 0;
-    for (FileStatus fstat : files) {
-      String fp = fstat.getPath().toString();
-      count = count + ((fp.indexOf("datafile_") > 0)? 0 : 1);
-    }
-    Assert.assertEquals("Total folders are not matched with cluster size", 
-                        clusterSize, count);
-  }
-}

+ 0 - 173
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixCompressedInputGeneration.java

@@ -1,173 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.JobClient;
-import org.apache.hadoop.mapred.JobStatus;
-import org.apache.hadoop.mapred.gridmix.Gridmix;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Verify the Gridmix generated input if compression emulation turn on.
- */
-public class TestGridmixCompressedInputGeneration 
-    extends GridmixSystemTestCase { 
-
-  private static final Log LOG = 
-      LogFactory.getLog("TestGridmixCompressedInputGeneration.class");
-
-  /**
-   * Generate input data and verify whether input files are compressed
-   * or not.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixCompressionInputGeneration() throws Exception {
-    final long inputSizeInMB = 1024 * 7;
-    final String [] runtimeValues = {"LOADJOB",
-                                     SubmitterUserResolver.class.getName(),
-                                     "STRESS",
-                                     inputSizeInMB  + "m",
-                                     "file:///dev/null"};
-    final String [] otherArgs = { 
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true"
-    };
-    LOG.info("Verify the generated compressed input data.");
-    runAndVerify(true, inputSizeInMB, runtimeValues, otherArgs);
-  }
-
-  /**
-   * Disable compression emulation and verify whether input files are 
-   * compressed or not.
-   * @throws Exception
-   */
-  @Test
-  public void testGridmixInputGenerationWithoutCompressionEnable() 
-      throws Exception { 
-    UtilsForGridmix.cleanup(gridmixDir, rtClient.getDaemonConf());
-    final long inputSizeInMB = 1024 * 6;
-    final String [] runtimeValues = {"LOADJOB",
-                                     SubmitterUserResolver.class.getName(),
-                                     "STRESS",
-                                     inputSizeInMB + "m",
-                                     "file:///dev/null"};
-    final String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-
-    LOG.info("Verify the generated uncompressed input data.");
-    runAndVerify(false, inputSizeInMB, runtimeValues, otherArgs);
-  }
-  
-  private void runAndVerify(boolean isCompressed, long INPUT_SIZE, 
-      String [] runtimeValues, String [] otherArgs) throws Exception { 
-    int exitCode = 
-        UtilsForGridmix.runGridmixJob(gridmixDir, conf, 
-                                      GridMixRunMode.DATA_GENERATION.getValue(),
-                                      runtimeValues,otherArgs);
-    Assert.assertEquals("Data generation has failed.", 0, exitCode);
-    verifyJobStatus();
-    verifyInputDataSize(INPUT_SIZE);
-    verifyInputFiles(isCompressed);
-  }
-  
-  private void verifyInputFiles(boolean isCompressed) throws IOException { 
-    List<String> inputFiles = 
-        getInputFiles(conf, Gridmix.getGridmixInputDataPath(gridmixDir));
-    for (String inputFile: inputFiles) {
-      boolean fileStatus = (inputFile.contains(".gz") 
-                         || inputFile.contains(".tgz"))? true : false;
-      if (isCompressed) { 
-        Assert.assertTrue("Compressed input split file was not found.",
-                          fileStatus);
-      } else {
-        Assert.assertFalse("Uncompressed input split file was not found.",
-                           fileStatus);
-      }
-    }
-  }
-
-  private void verifyInputDataSize(long INPUT_SIZE) throws IOException {
-    long actDataSize = 
-        getInputDataSizeInMB(conf, Gridmix.getGridmixInputDataPath(gridmixDir));
-    double ratio = ((double)actDataSize)/INPUT_SIZE;
-    long expDataSize = (long)(INPUT_SIZE * ratio);
-    Assert.assertEquals("Generated data has not matched with given size.", 
-                        expDataSize, actDataSize);
-  }
-
-  private void verifyJobStatus() throws IOException { 
-    JobClient jobClient = jtClient.getClient();
-    int len = jobClient.getAllJobs().length;
-    LOG.info("Verify the job status after completion of job...");
-    Assert.assertEquals("Job has not succeeded.", JobStatus.SUCCEEDED, 
-                        jobClient.getAllJobs()[len -1].getRunState());
-  }
-
-  private long getInputDataSizeInMB(Configuration conf, Path inputDir) 
-      throws IOException { 
-    FileSystem fs = inputDir.getFileSystem(conf);
-    ContentSummary csmry = fs.getContentSummary(inputDir);
-    long dataSize = csmry.getLength();
-    dataSize = dataSize/(1024 * 1024);
-    return dataSize;
-  }
-
-  private List<String> getInputFiles(Configuration conf, Path inputDir) 
-      throws IOException {
-    FileSystem fs = inputDir.getFileSystem(conf);
-    FileStatus [] listStatus = fs.listStatus(inputDir);
-    List<String> files = new ArrayList<String>();
-    for (FileStatus fileStat : listStatus) {
-      files.add(getInputFile(fileStat, conf));
-    }
-    return files;
-  }
-
-  private String getInputFile(FileStatus fstatus, Configuration conf) 
-      throws IOException {
-    String fileName = null;
-    if (!fstatus.isDirectory()) {
-      fileName = fstatus.getPath().getName();
-    } else {
-      FileSystem fs = fstatus.getPath().getFileSystem(conf);
-      FileStatus [] listStatus = fs.listStatus(fstatus.getPath());
-      for (FileStatus fileStat : listStatus) {
-         return getInputFile(fileStat, conf);
-      }
-    }
-    return fileName;
-  }
-}
-

+ 0 - 102
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixCompressionEmulationWithCompressInput.java

@@ -1,102 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Verify the gridmix jobs compression ratios of map input, 
- * map output and reduce output with default and user specified 
- * compression ratios.
- *
- */
-public class TestGridmixCompressionEmulationWithCompressInput 
-    extends GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog(
-              "TestGridmixCompressionEmulationWithCompressInput.class");
-  final long inputSizeInMB = 1024 * 6;
-
-  /**
-   * Generate compressed input data and verify the map input, 
-   * map output and reduce output compression ratios of gridmix jobs 
-   * against the default compression ratios. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixCompressionRatiosAgainstDefaultCompressionRatio() 
-      throws Exception { 
-    final String tracePath = getTraceFile("compression_case1_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-
-    final String [] runtimeValues = 
-                     {"LOADJOB",
-                      RoundRobinUserResolver.class.getName(),
-                      "STRESS",
-                      inputSizeInMB + "m",
-                      "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                      tracePath};
-
-    final String [] otherArgs = { 
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath,
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Verify map input, map output and  reduce output compression ratios of
-   * gridmix jobs against user specified compression ratios. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixOuputCompressionRatiosAgainstCustomRatios() 
-      throws Exception { 
-    final String tracePath = getTraceFile("compression_case1_trace");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    UtilsForGridmix.cleanup(gridmixDir, rtClient.getDaemonConf());
-
-    final String [] runtimeValues = 
-                     {"LOADJOB",
-                      RoundRobinUserResolver.class.getName(),
-                      "STRESS",
-                      inputSizeInMB + "m",
-                      "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                      tracePath};
-
-    final String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=true",
-        "-D", GridMixConfig.GRIDMIX_INPUT_DECOMPRESS_ENABLE + "=true",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_INPUT_COMPRESS_RATIO + "=0.68",
-        "-D", GridMixConfig.GRIDMIX_INTERMEDIATE_COMPRESSION_RATIO + "=0.35",
-        "-D", GridMixConfig.GRIDMIX_OUTPUT_COMPRESSION_RATIO + "=0.40"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-}
-

+ 0 - 89
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHDFSPrivateDCFile.java

@@ -1,89 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Verify the Gridmix emulation of HDFS private distributed cache file.
- */
-public class TestGridmixEmulationOfHDFSPrivateDCFile 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog("TestGridmixEmulationOfHDFSPrivateDCFile.class");
-  /**
-   * Generate input data and single HDFS private distributed cache 
-   * file based on given input trace.Verify the Gridmix emulation of 
-   * single private HDFS distributed cache file in RoundRobinUserResolver 
-   * mode with STRESS submission policy.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGenerateAndEmulateOfHDFSPrivateDCFile() 
-      throws Exception {
-    final long inputSizeInMB = 8192;
-    final String tracePath = getTraceFile("distcache_case3_trace");
-    Assert.assertNotNull("Trace file was not found.", tracePath);
-    final String [] runtimeValues = 
-                     {"LOADJOB",
-                      RoundRobinUserResolver.class.getName(),
-                      "STRESS",
-                      inputSizeInMB + "m",
-                      "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                      tracePath};
-
-    final String [] otherArgs = {
-        "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  /**
-   * Verify the Gridmix emulation of single HDFS private distributed 
-   * cache file in SubmitterUserResolver mode with REPLAY submission 
-   * policy by using the existing input data and HDFS private 
-   * distributed cache file.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixEmulationOfHDFSPrivateDCFile() 
-      throws Exception {
-    final String tracePath = getTraceFile("distcache_case3_trace");
-    Assert.assertNotNull("Trace file was not found.", tracePath);
-    final String [] runtimeValues ={"LOADJOB",
-                                    SubmitterUserResolver.class.getName(),
-                                    "REPLAY",
-                                    tracePath};
-    final String [] otherArgs = {
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-                        GridMixRunMode.RUN_GRIDMIX.getValue());
-  }
-}
-

+ 0 - 91
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHDFSPublicDCFile.java

@@ -1,91 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Verify the Gridmix emulation of HDFS public distributed cache file.
- */
-public class TestGridmixEmulationOfHDFSPublicDCFile 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog("TestGridmixEmulationOfHDFSPublicDCFile.class");
-
-  /**
-   * Generate the input data and HDFS distributed cache file based 
-   * on given input trace. Verify the Gridmix emulation of single HDFS
-   * public distributed cache file in SubmitterUserResolver mode with 
-   * STRESS submission policy.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGenerateAndEmulationOfSingleHDFSDCFile() 
-      throws Exception { 
-    final long inputSizeInMB = 7168;
-    final String tracePath = getTraceFile("distcache_case1_trace");
-    Assert.assertNotNull("Trace file was not found.", tracePath);
-    final String [] runtimeValues = {"LOADJOB",
-                                     SubmitterUserResolver.class.getName(),
-                                     "STRESS",
-                                     inputSizeInMB + "m",
-                                     tracePath};
-
-    final String [] otherArgs = { 
-      "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-      "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-      "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Verify the Gridmix emulation of Single HDFS public distributed cache
-   * file in RoundRobinUserResolver mode with REPLAY submission policy 
-   * by using the existing input data and HDFS public distributed cache file. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixEmulationOfSingleHDFSPublicDCFile() 
-      throws Exception {
-    final String tracePath = getTraceFile("distcache_case1_trace");
-    Assert.assertNotNull("Trace file was not found.", tracePath);
-    final String [] runtimeValues = 
-                     { "LOADJOB",
-                       RoundRobinUserResolver.class.getName(),
-                       "REPLAY",
-                       "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                       tracePath};
-
-    final String [] otherArgs = {
-       "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-       "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-                        GridMixRunMode.RUN_GRIDMIX.getValue());
-  }
-}
-

+ 0 - 64
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase1.java

@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.GridmixJob;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Run the {@link Gridmix} with a high ram jobs trace and 
- * verify each {@link Gridmix} job whether it honors the high ram or not.
- * In the trace the jobs should use the high ram for both maps and reduces.
- */
-public class TestGridmixEmulationOfHighRamJobsCase1 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog("TestGridmixEmulationOfHighRamJobsCase1.class");
-
- /**
-   * Generate input data and run {@link Gridmix} with a high ram jobs trace 
-   * as a load job and STRESS submission policy in a SubmitterUserResolver 
-   * mode. Verify each {@link Gridmix} job whether it honors the high ram or not
-   * after completion of execution. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testEmulationOfHighRamForMapsAndReducesOfMRJobs() 
-      throws Exception { 
-    final long inputSizeInMB = cSize * 400;
-    String tracePath = getTraceFile("highram_mr_jobs_case1");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    String [] runtimeValues = {"LOADJOB",
-                               SubmitterUserResolver.class.getName(),
-                               "STRESS",
-                               inputSizeInMB + "m",
-                               tracePath};
-
-    String [] otherArgs = {
-               "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", 
-               "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", 
-               "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=true"};
-
-    validateTaskMemoryParamters(tracePath, true);
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
-  }
-}

+ 0 - 67
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase2.java

@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.GridmixJob;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Run the {@link Gridmix} with a high ram jobs trace and 
- * verify each {@link Gridmix} job whether it honors the high ram or not.
- * In the trace the jobs should use the high ram only for maps.
- */
-public class TestGridmixEmulationOfHighRamJobsCase2 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog("TestGridmixEmulationOfHighRamJobsCase2.class");
-
- /**
-   * Generate input data and run {@link Gridmix} with a high ram jobs trace 
-   * as a load job and REPALY submission policy in a RoundRobinUserResolver 
-   * mode. Verify each {@link Gridmix} job whether it honors the high ram or not
-   * after completion of execution. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testEmulationOfHighRamForMapsOfMRJobs() 
-      throws Exception { 
-    final long inputSizeInMB = cSize * 300;
-    String tracePath = getTraceFile("highram_mr_jobs_case2");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    String [] runtimeValues = 
-               {"LOADJOB",
-                RoundRobinUserResolver.class.getName(),
-                "REPLAY",
-                inputSizeInMB + "m",
-                "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                tracePath};
-
-    String [] otherArgs = {
-               "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", 
-               "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", 
-               "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=true"};
-
-    validateTaskMemoryParamters(tracePath, true);
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
-  }
-}

+ 0 - 64
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfHighRamJobsCase3.java

@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.GridmixJob;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Run the {@link Gridmix} with a high ram jobs trace and 
- * verify each {@link Gridmix} job whether it honors the high ram or not.
- * In the trace the jobs should use the high ram only for reducers.
- */
-public class TestGridmixEmulationOfHighRamJobsCase3 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog(TestGridmixEmulationOfHighRamJobsCase3.class);
-
- /**
-   * Generate input data and run {@link Gridmix} with a high ram jobs trace 
-   * as a load job and SERIAL submission policy in a SubmitterUserResolver 
-   * mode. Verify each {@link Gridmix} job whether it honors the 
-   * high ram or not after completion of execution. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testEmulationOfHighRamForReducersOfMRJobs() 
-      throws Exception { 
-    final long inputSizeInMB = cSize * 250;
-    String tracePath = getTraceFile("highram_mr_jobs_case3");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    String [] runtimeValues = {"LOADJOB",
-                               SubmitterUserResolver.class.getName(),
-                               "SERIAL",
-                               inputSizeInMB + "m",
-                               tracePath};
-
-    String [] otherArgs = {
-               "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false", 
-               "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false", 
-               "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=true"};
-
-    validateTaskMemoryParamters(tracePath, true);
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
-  }
-}

+ 0 - 91
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfMultipleHDFSPrivateDCFiles.java

@@ -1,91 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Verify the Gridmix emulation of Multiple HDFS private distributed 
- * cache files.
- */
-public class TestGridmixEmulationOfMultipleHDFSPrivateDCFiles 
-    extends GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog(
-          "TestGridmixEmulationOfMultipleHDFSPrivateDCFiles.class");
-
-  /**
-   * Generate input data and multiple HDFS private distributed cache 
-   * files based on given input trace.Verify the Gridmix emulation of 
-   * multiple private HDFS distributed cache files in RoundRobinUserResolver 
-   * mode with SERIAL submission policy.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGenerateAndEmulationOfMultipleHDFSPrivateDCFiles() 
-      throws Exception {
-    final long inputSize = 6144;
-    final String tracePath = getTraceFile("distcache_case4_trace");
-    Assert.assertNotNull("Trace file was not found.", tracePath);
-    final String [] runtimeValues = 
-                     {"LOADJOB",
-                      RoundRobinUserResolver.class.getName(),
-                      "SERIAL",
-                      inputSize+"m",
-                      "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                      tracePath};
-    final String [] otherArgs = {
-        "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-
-  /**
-   * Verify the Gridmix emulation of multiple HDFS private distributed 
-   * cache files in SubmitterUserResolver mode with STRESS submission 
-   * policy by using the existing input data and HDFS private 
-   * distributed cache files.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixEmulationOfMultipleHDFSPrivateDCFiles() 
-      throws Exception {
-    final String tracePath = getTraceFile("distcache_case4_trace");
-    Assert.assertNotNull("Trace file was not found.", tracePath);
-    final String [] runtimeValues = {"LOADJOB",
-                                     SubmitterUserResolver.class.getName(),
-                                     "STRESS",
-                                     tracePath};
-    final String [] otherArgs = {
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-                        GridMixRunMode.RUN_GRIDMIX.getValue());
-  }
-}

+ 0 - 92
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixEmulationOfMultipleHDFSPublicDCFiles.java

@@ -1,92 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Assert;
-import org.junit.Test;
-import java.io.IOException;
-
-/**
- * Verify the Gridmix emulation of Multiple HDFS public distributed 
- * cache files.
- */
-public class TestGridmixEmulationOfMultipleHDFSPublicDCFiles 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog(
-          "TestGridmixEmulationOfMultipleHDFSPublicDCFiles.class");
-
-  /**
-   * Generate the compressed input data and dist cache files based 
-   * on input trace. Verify the Gridmix emulation of
-   * multiple HDFS public distributed cache file.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGenerateAndEmulationOfMultipleHDFSDCFiles() 
-      throws Exception  {
-    final long inputSizeInMB = 7168;
-    final String tracePath = getTraceFile("distcache_case2_trace");
-    Assert.assertNotNull("Trace file was not found.", tracePath);
-    final String [] runtimeValues = 
-                     {"LOADJOB",
-                      RoundRobinUserResolver.class.getName(),
-                      "STRESS",
-                      inputSizeInMB + "m",
-                      "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                      tracePath};
-
-    final String [] otherArgs = { 
-       "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-       "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-       "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-        GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Verify the Gridmix emulation of Single HDFS public distributed cache file 
-   * by using an existing input compressed data and HDFS dist cache file. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixEmulationOfMulitpleHDFSPublicDCFile() 
-      throws Exception {
-    final String tracePath = getTraceFile("distcache_case2_trace");
-    Assert.assertNotNull("Trace file was not found.", tracePath);
-    final String [] runtimeValues = {"LOADJOB",
-                                     SubmitterUserResolver.class.getName(),
-                                     "SERIAL",
-                                     tracePath};
-
-    final String [] otherArgs = {
-      "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-      "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=true"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs,  tracePath, 
-                        GridMixRunMode.RUN_GRIDMIX.getValue());
-  }
-}
-

+ 0 - 67
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith10minTrace.java

@@ -1,67 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.junit.Test;
-
-/**
- * Run the Gridmix with 10 minutes MR jobs trace and 
- * verify each job history against the corresponding job story 
- * in a given trace file.
- */
-public class TestGridmixWith10minTrace extends GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog(TestGridmixWith10minTrace.class);
-
-  /**
-   * Generate data and run gridmix by sleep jobs with STRESS submission 
-   * policy in a RoundRobinUserResolver mode against 10 minutes trace file.
-   * Verify each Gridmix job history with a corresponding job story 
-   * in a trace file after completion of all the jobs execution.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixWith10minTrace() throws Exception {
-    final long inputSizeInMB = cSize * 250;
-    final long minFileSize = 200 * 1024 * 1024;
-    String [] runtimeValues =
-               {"SLEEPJOB",
-                RoundRobinUserResolver.class.getName(),
-                "SERIAL",
-                inputSizeInMB + "m",
-                "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                map.get("10m")};
-
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_MINIMUM_FILE_SIZE + "=" + minFileSize,
-        "-D", GridMixConfig.GRIDMIX_JOB_SUBMISSION_QUEUE_IN_TRACE + "=false",
-        "-D", GridMixConfig.GRIDMIX_SLEEPJOB_MAPTASK_ONLY + "=true",
-        "-D", GridMixConfig.GRIDMIX_SLEEP_MAP_MAX_TIME + "=10"
-    };
-    String tracePath = map.get("10m");
-    runGridmixAndVerify(runtimeValues, otherArgs,tracePath);
-  }
-}

+ 0 - 62
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith12minTrace.java

@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.junit.Test;
-
-/**
- * Run the Gridmix with 12 minutes MR job traces and 
- * verify each job history against the corresponding job story 
- * in a given trace file.
- */
-public class TestGridmixWith12minTrace extends GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog(TestGridmixWith12minTrace.class);
- 
-  /**
-   * Generate data and run gridmix sleep jobs with REPLAY submission 
-   * policy in a SubmitterUserResolver mode against 12 minutes trace file.
-   * Verify each Gridmix job history with a corresponding job story 
-   * in a trace file after completion of all the jobs execution.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixWith12minTrace() throws Exception {
-    final long inputSizeInMB = cSize * 150;
-    String [] runtimeValues = {"SLEEPJOB",
-                               SubmitterUserResolver.class.getName(),
-                               "REPLAY",
-                               inputSizeInMB + "m",
-                               map.get("12m")};
-
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_SLEEP_MAP_MAX_TIME + "=10",
-        "-D", GridMixConfig.GRIDMIX_SLEEP_REDUCE_MAX_TIME + "=5"
-    };
-
-    String tracePath = map.get("12m");
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
-  }
-}

+ 0 - 59
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith1minTrace.java

@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.junit.Test;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-
-/**
- * Run the Gridmix with 1 minute MR jobs trace and 
- * verify each job history against the corresponding job story 
- * in a given trace file.
- */
-public class TestGridmixWith1minTrace extends GridmixSystemTestCase{
-  private static final Log LOG = 
-      LogFactory.getLog(TestGridmixWith1minTrace.class);
-
-  /**
-   * Generate data and run gridmix by load job with STRESS submission policy
-   * in a SubmitterUserResolver mode against 1 minute trace file. 
-   * Verify each Gridmix job history with a corresponding job story in the 
-   * trace after completion of all the jobs execution.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixWith1minTrace() throws Exception {
-    final long inputSizeInMB = cSize * 400;
-    String [] runtimeValues = {"LOADJOB",
-                               SubmitterUserResolver.class.getName(),
-                               "STRESS",
-                               inputSizeInMB + "m",
-                               map.get("1m")};
-
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-
-    String tracePath = map.get("1m");
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
-  }
-}

+ 0 - 64
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith2minStreamingJobTrace.java

@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Run the Gridmix with 2 minutes job trace which has been generated with 
- * streaming jobs histories and verify each job history against 
- * the corresponding job story in a given trace file.
- */
-public class TestGridmixWith2minStreamingJobTrace 
-    extends GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog("TestGridmixWith2minStreamingJobTrace.class");
-
-  /**
-   * Generate input data and run Gridmix by load job with STRESS submission 
-   * policy in a SubmitterUserResolver mode against 2 minutes job 
-   * trace file of streaming jobs. Verify each Gridmix job history with 
-   * a corresponding job story in a trace file after completion of all 
-   * the jobs execution.  
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixWith2minStreamJobTrace() throws Exception {
-    final long inputSizeInMB = cSize * 250;
-    final long minFileSize = 150 * 1024 * 1024;
-    String tracePath = getTraceFile("2m_stream");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    String [] runtimeValues = {"LOADJOB",
-                               SubmitterUserResolver.class.getName(),
-                               "STRESS",
-                               inputSizeInMB + "m",
-                               tracePath};
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_JOB_SUBMISSION_QUEUE_IN_TRACE + "=true",
-        "-D", GridMixConfig.GRIDMIX_MINIMUM_FILE_SIZE + "=" + minFileSize,
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
-  }
-}

+ 0 - 68
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith3minStreamingJobTrace.java

@@ -1,68 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.junit.Assert;
-import org.junit.Test;
-
-/**
- * Run the Gridmix with 3 minutes job trace which has been generated with 
- * streaming jobs histories and verify each job history against 
- * corresponding job story in a given trace file.
- */
-public class TestGridmixWith3minStreamingJobTrace 
-    extends GridmixSystemTestCase {
-  private static final Log LOG = 
-     LogFactory.getLog("TestGridmixWith3minStreamingJobTrace.class");
-
-  /**
-   * Generate input data and run gridmix by load job with REPLAY submission 
-   * policy in a RoundRobinUserResolver mode against 3 minutes job trace file 
-   * of streaming job. Verify each gridmix job history with a corresponding 
-   * job story in a trace file after completion of all the jobs execution.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixWith3minStreamJobTrace() throws Exception {
-    final long inputSizeInMB = cSize * 200;
-    final long bytesPerFile = 150 * 1024 * 1024;
-    String tracePath = getTraceFile("3m_stream");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    String [] runtimeValues = 
-               {"LOADJOB",
-                RoundRobinUserResolver.class.getName(),
-                "REPLAY",
-                inputSizeInMB + "m",
-                "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                tracePath};
-
-    String [] otherArgs = { 
-        "-D", GridMixConfig.GRIDMIX_JOB_SUBMISSION_QUEUE_IN_TRACE + "=true",
-        "-D", GridMixConfig.GRIDMIX_BYTES_PER_FILE + "=" + bytesPerFile,
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
-  }
-}

+ 0 - 62
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith3minTrace.java

@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.junit.Test;
-
-/**
- * Run the Gridmix with 3 minutes MR jobs trace and 
- * verify each job history against the corresponding job story 
- * in a given trace file.
- */
-public class TestGridmixWith3minTrace extends GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog(TestGridmixWith3minTrace.class);
-
-  /**
-   * Generate data and run gridmix by load job with REPLAY submission 
-   * policy in a RoundRobinUserResolver mode by using 3 minutes trace file. 
-   * Verify each Gridmix job history with a corresponding job story in 
-   * a trace after completion of all the jobs execution.  
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixWith3minTrace() throws Exception {
-    final long inputSizeInMB = cSize * 200;
-    String [] runtimeValues = 
-              {"LOADJOB",
-               RoundRobinUserResolver.class.getName(),
-               "REPLAY",
-               inputSizeInMB + "m",
-               "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-               map.get("3m")};
-
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-
-    String tracePath = map.get("3m");
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath);  
-  }
-}

+ 0 - 65
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith5minStreamingJobTrace.java

@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Run the Gridmix with 5 minutes job trace which has been generated with 
- * streaming jobs histories and verify each job history against 
- * corresponding job story in a given trace file.
- */
-public class TestGridmixWith5minStreamingJobTrace 
-    extends GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog("TestGridmixWith5minStreamingJobTrace.class");
-
-  /**
-   * Generate input data and run gridmix by load job with SERIAL submission 
-   * policy in a SubmitterUserResolver mode against 5 minutes job trace file 
-   * of streaming job. Verify each gridmix job history with a corresponding 
-   * job story in a trace file after completion of all the jobs execution.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixWith5minStreamJobTrace() throws Exception {
-    String tracePath = getTraceFile("5m_stream");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    final long inputSizeInMB = cSize * 200;
-    final long bytesPerFile = 150 * 1024 * 1024;
-    String [] runtimeValues = {"LOADJOB", 
-                              SubmitterUserResolver.class.getName(), 
-                              "SERIAL", 
-                              inputSizeInMB + "m",
-                              tracePath};
-
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_KEY_FRC + "=0.5f",
-        "-D", GridMixConfig.GRIDMIX_BYTES_PER_FILE + "=" + bytesPerFile,
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false"
-    };
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
-  }
-}

+ 0 - 62
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith5minTrace.java

@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.junit.Test;
-
-/**
- * Run the Gridmix with 5 minutes MR jobs trace and 
- * verify each job history against the corresponding job story 
- * in a given trace file.
- */
-public class TestGridmixWith5minTrace extends GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog(TestGridmixWith5minTrace.class);
-
-  /**
-   * Generate data and run gridmix by load job with SERIAL submission 
-   * policy in a SubmitterUserResolver mode against 5 minutes trace file. 
-   * Verify each Gridmix job history with a corresponding job story 
-   * in a trace file after completion of all the jobs.  
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixWith5minTrace() throws Exception {
-    final long inputSizeInMB = cSize * 300;
-    final long minFileSize = 100 * 1024 * 1024;
-    String [] runtimeValues ={"LOADJOB", 
-                              SubmitterUserResolver.class.getName(), 
-                              "SERIAL", 
-                              inputSizeInMB + "m", 
-                              map.get("5m")};
-
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_MINIMUM_FILE_SIZE + "=" + minFileSize
-    };
-
-    String tracePath = map.get("5m");
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
-  }
-}
-

+ 0 - 62
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestGridmixWith7minTrace.java

@@ -1,62 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.junit.Test;
-
-/**
- * Run the Gridmix with 7 minutes MR jobs trace and 
- * verify each job history against the corresponding job story 
- * in a given trace file.
- */
-public class TestGridmixWith7minTrace extends GridmixSystemTestCase {
-  private static final Log LOG = 
-      LogFactory.getLog(TestGridmixWith7minTrace.class);
-
-  /**
-   * Generate data and run gridmix by sleep job with STRESS submission 
-   * policy in a SubmitterUserResolver mode against 7 minute trace file.
-   * Verify each Gridmix job history with a corresponding job story 
-   * in a trace file after completion of all the jobs execution.
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testGridmixWith7minTrace() throws Exception {
-    final long inputSizeInMB = cSize * 400;
-    final long minFileSize = 200 * 1024 * 1024;
-    String [] runtimeValues ={"SLEEPJOB",
-                              SubmitterUserResolver.class.getName(),
-                              "STRESS",
-                              inputSizeInMB + "m",
-                              map.get("7m")};
-
-    String [] otherArgs = {
-        "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-        "-D", GridmixJob.GRIDMIX_HIGHRAM_EMULATION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-        "-D", GridMixConfig.GRIDMIX_MINIMUM_FILE_SIZE + "=" + minFileSize,
-        "-D", GridMixConfig.GRIDMIX_JOB_SUBMISSION_QUEUE_IN_TRACE + "=false"
-    };
-    String tracePath = map.get("7m");
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath);
-  }
-}

+ 0 - 106
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsAndReducesWithCustomIntrvl.java

@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Test the {@link Gridmix} memory emulation feature for the jobs with 
- * custom progress interval, different input data, submission policies
- * and user resolver modes. Verify the total heap usage of map and reduce
- * tasks of the jobs with corresponding original job in the trace. 
- */
-public class TestMemEmulForMapsAndReducesWithCustomIntrvl 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog("TestMemEmulForMapsAndReducesWithCustomIntrvl.class");
-  /**
-   * Generate compressed input and run {@link Gridmix} by turning on the
-   * memory emulation with custom progress interval. The {@link Gridmix} 
-   * should use the following runtime parameters while running the jobs.
-   * Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver
-   * Verify maps and reduces total heap memory usage of {@link Gridmix} jobs 
-   * with corresponding original job in the trace. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testMemoryEmulationForReducesWithCompressedInputCase7() 
-      throws Exception { 
-    final long inputSizeInMB = 1024 * 7;
-    String tracePath = getTraceFile("mem_emul_case2");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-            { "LOADJOB",
-              RoundRobinUserResolver.class.getName(),
-              "STRESS",
-              inputSizeInMB + "m",
-              "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-              tracePath};
-
-    String [] otherArgs = {
-            "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" + 
-                GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
-            "-D", GridMixConfig.GRIDMIX_HEAP_MEMORY_CUSTOM_INTRVL + "=0.3F",
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-           GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Generate uncompressed input and run {@link Gridmix} by turning on the
-   * memory emulation with custom progress interval. The {@link Gridmix}
-   * should use the following runtime parameters while running the jobs.
-   * Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver
-   * Verify maps and reduces total heap memory usage of {@link Gridmix} jobs 
-   * with corresponding original job in the trace. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testMemoryEmulationForReducesWithUncompressedInputCase8()
-      throws Exception {
-    final long inputSizeInMB = cSize * 300;
-    String tracePath = getTraceFile("mem_emul_case2");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-              { "LOADJOB", 
-                SubmitterUserResolver.class.getName(), 
-                "REPLAY", 
-                inputSizeInMB + "m",
-                tracePath};
-
-    String [] otherArgs = {
-            "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" + 
-                  GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
-            "-D", GridMixConfig.GRIDMIX_HEAP_MEMORY_CUSTOM_INTRVL + "=0.2F",
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-            "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-            GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-}

+ 0 - 106
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsAndReducesWithDefaultIntrvl.java

@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Test the {@link Gridmix} memory emulation feature for gridmix jobs
- * with default progress interval, different input data, submission 
- * policies and user resolver modes. Verify the total heap usage of
- * map and reduce tasks of the jobs with corresponding original
- * job in the trace. 
- */
-public class TestMemEmulForMapsAndReducesWithDefaultIntrvl 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog("TestMemEmulForMapsAndReducesWithDefaultIntrvl.class");
-
-  /**
-   * Generate compressed input and run {@link Gridmix} by turning on the
-   * memory emulation with default progress interval. The {@link Gridmix} 
-   * should use the following runtime parameters while running the jobs.
-   * Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver
-   * Verify maps and reduces total heap memory usage of {@link Gridmix} jobs 
-   * with corresponding original job in the trace. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testMemoryEmulationForReducesWithCompressedInputCase5() 
-      throws Exception { 
-    final long inputSizeInMB = 1024 * 7;
-    String tracePath = getTraceFile("mem_emul_case2");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-            { "LOADJOB",
-              RoundRobinUserResolver.class.getName(),
-              "STRESS",
-              inputSizeInMB + "m",
-              "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-              tracePath};
-
-    String [] otherArgs = {
-            "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" + 
-                GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-           GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-
-  /**
-   * Generate uncompressed input and run {@link Gridmix} by turning on the
-   * memory emulation with default progress interval. The {@link Gridmix} 
-   * should use the following runtime parameters while running the jobs. 
-   * Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver
-   * Verify maps and reduces total heap memory usage of {@link Gridmix} jobs 
-   * with corresponding original job in the trace. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testMemoryEmulationForReducesWithUncompressedInputCase6()
-      throws Exception {
-    final long inputSizeInMB = cSize * 300;
-    String tracePath = getTraceFile("mem_emul_case2");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-              { "LOADJOB", 
-                SubmitterUserResolver.class.getName(), 
-                "REPLAY", 
-                inputSizeInMB + "m",
-                tracePath};
-
-    String [] otherArgs = {
-            "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" + 
-                  GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-            "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-            GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-}

+ 0 - 108
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsWithCustomHeapMemoryRatio.java

@@ -1,108 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Test the {@link Gridmix} memory emulation feature for {@link Gridmix} jobs 
- * with default progress interval, custom heap memory ratio, different input 
- * data, submission policies and user resolver modes. Verify the total heap 
- * usage of map and reduce tasks of the jobs with corresponding the original job 
- * in the trace. 
- */
-public class TestMemEmulForMapsWithCustomHeapMemoryRatio 
-    extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog("TestMemEmulForMapsWithCustomHeapMemoryRatio.class");
-
-  /**
-   * Generate compressed input and run {@link Gridmix} by turning on the
-   * memory emulation. The {@link Gridmix} should use the following runtime 
-   * parameters while running the jobs.
-   * Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver
-   * Verify total heap memory usage of the tasks of {@link Gridmix} jobs with 
-   * corresponding original job in the trace. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testMemoryEmulationForMapsWithCompressedInputCase1() 
-     throws Exception {
-    final long inputSizeInMB = 1024 * 7;
-    String tracePath = getTraceFile("mem_emul_case2");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    String [] runtimeValues = 
-            { "LOADJOB",
-              SubmitterUserResolver.class.getName(),
-              "STRESS",
-              inputSizeInMB + "m",
-              tracePath};
-
-    String [] otherArgs = { 
-            "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "="  +
-                  GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-            "-D", GridMixConfig.GRIDMIX_HEAP_FREE_MEMORY_RATIO + "=0.5F"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-           GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Generate uncompressed input and run {@link Gridmix} by turning on the
-   * memory emulation. The {@link Gridmix} should use the following runtime 
-   * parameters while running the jobs.
-   *  Submission Policy : STRESS, User Resolver Mode : RoundRobinUserResolver
-   * Verify total heap memory usage of tasks of {@link Gridmix} jobs with 
-   * corresponding original job in the trace. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testMemoryEmulationForMapsWithUncompressedInputCase2() 
-      throws Exception {
-    final long inputSizeInMB = cSize * 300;
-    String tracePath = getTraceFile("mem_emul_case2");
-    Assert.assertNotNull("Trace file has not found.", tracePath);
-    String [] runtimeValues = 
-              { "LOADJOB", 
-                RoundRobinUserResolver.class.getName(), 
-                "STRESS",
-                inputSizeInMB + "m",
-                "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                tracePath};
-
-    String [] otherArgs = {
-            "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" +  
-                  GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-            "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false",
-            "-D", GridMixConfig.GRIDMIX_HEAP_FREE_MEMORY_RATIO + "=0.4F"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-            GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-}

+ 0 - 106
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsWithCustomIntrvl.java

@@ -1,106 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Test the {@link Gridmix} memory emulation feature for {@link Gridmix} jobs 
- * with custom progress interval, different input data, submission policies 
- * and user resolver modes. Verify the total heap usage of map tasks of
- * the jobs with corresponding the original job in the trace. 
- */
-public class TestMemEmulForMapsWithCustomIntrvl extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-      LogFactory.getLog("TestMemEmulForMapsWithCustomIntrvl.class");
-
-  /**
-   * Generate compressed input and run {@link Gridmix} by turning on the
-   * memory emulation with custom progress interval. The {@link Gridmix}
-   * should use the following runtime parameters while running the jobs.
-   * Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver
-   * Verify maps total heap memory usage of {@link Gridmix} jobs with 
-   * corresponding original job in the trace. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testMemoryEmulationForMapsWithCompressedInputCase3() 
-     throws Exception {
-    final long inputSizeInMB = 1024 * 7;
-    String tracePath = getTraceFile("mem_emul_case1");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-            { "LOADJOB",
-              SubmitterUserResolver.class.getName(),
-              "STRESS",
-              inputSizeInMB + "m",
-              tracePath};
-
-    String [] otherArgs = { 
-            "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "="  +
-                  GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
-            "-D", GridMixConfig.GRIDMIX_HEAP_MEMORY_CUSTOM_INTRVL + "=0.2F",
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-           GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-
-  /**
-   * Generate uncompressed input and run {@link Gridmix} by turning on the
-   * memory emulation with custom progress interval. The {@link Gridmix} 
-   * should use the following runtime parameters while running the jobs.
-   *  Submission Policy : STRESS, User Resolver Mode : RoundRobinUserResolver
-   * Verify maps total heap memory usage of {@link Gridmix} jobs with 
-   * corresponding original job in the trace. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testMemoryEmulationForMapsWithUncompressedInputCase4() 
-      throws Exception {
-    final long inputSizeInMB = cSize * 300;
-    String tracePath = getTraceFile("mem_emul_case1");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-              { "LOADJOB", 
-                RoundRobinUserResolver.class.getName(), 
-                "STRESS",
-                inputSizeInMB + "m",
-                "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                tracePath};
-
-    String [] otherArgs = {
-            "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" +  
-                  GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
-            "-D", GridMixConfig.GRIDMIX_HEAP_MEMORY_CUSTOM_INTRVL + "=0.3F",
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-            "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-            GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-}

+ 0 - 104
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/TestMemEmulForMapsWithDefaultIntrvl.java

@@ -1,104 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixConfig;
-import org.apache.hadoop.mapred.gridmix.test.system.GridMixRunMode;
-import org.apache.hadoop.mapred.gridmix.test.system.UtilsForGridmix;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.junit.Test;
-import org.junit.Assert;
-
-/**
- * Test the {@link Gridmix} memory emulation feature for {@link Gridmix} jobs 
- * with default progress interval, different input data, submission policies 
- * and user resolver modes. Verify the total heap usage of map tasks of the 
- * jobs with corresponding original job in the trace. 
- */
-public class TestMemEmulForMapsWithDefaultIntrvl extends GridmixSystemTestCase { 
-  private static final Log LOG = 
-          LogFactory.getLog("TestMemEmulForMapsWithDefaultIntrvl.class");
-
-  /**
-   * Generate compressed input and run {@link Gridmix} by turning on the
-   * memory emulation with default progress interval. The {@link Gridmix} 
-   * should use the following runtime parameters while running the jobs.
-   * Submission Policy : STRESS, User Resolver Mode : SumitterUserResolver
-   * Verify maps total heap memory usage of {@link Gridmix} jobs with 
-   * corresponding original job in the trace. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testMemoryEmulationForMapsWithCompressedInputCase1() 
-     throws Exception {
-    final long inputSizeInMB = 1024 * 7;
-    String tracePath = getTraceFile("mem_emul_case1");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-            { "LOADJOB",
-              SubmitterUserResolver.class.getName(),
-              "STRESS",
-              inputSizeInMB + "m",
-              tracePath};
-
-    String [] otherArgs = { 
-            "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "="  +
-                  GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-           GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-  
-  /**
-   * Generate uncompressed input and run {@link Gridmix} by turning on the
-   * memory emulation with default progress interval. The {@link Gridmix} 
-   * should use the following runtime parameters while running the jobs.
-   *  Submission Policy : STRESS, User Resolver Mode : RoundRobinUserResolver
-   * Verify maps total heap memory usage of {@link Gridmix} jobs with 
-   * corresponding original job in the trace. 
-   * @throws Exception - if an error occurs.
-   */
-  @Test
-  public void testMemoryEmulationForMapsWithUncompressedInputCase2() 
-      throws Exception {
-    final long inputSizeInMB = cSize * 300;
-    String tracePath = getTraceFile("mem_emul_case1");
-    Assert.assertNotNull("Trace file not found!", tracePath);
-    String [] runtimeValues = 
-              { "LOADJOB", 
-                RoundRobinUserResolver.class.getName(), 
-                "STRESS",
-                inputSizeInMB + "m",
-                "file://" + UtilsForGridmix.getProxyUsersFile(conf),
-                tracePath};
-
-    String [] otherArgs = {
-            "-D", GridMixConfig.GRIDMIX_MEMORY_EMULATON + "=" +  
-                  GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN,
-            "-D", GridMixConfig.GRIDMIX_DISTCACHE_ENABLE + "=false",
-            "-D", GridMixConfig.GRIDMIX_COMPRESSION_ENABLE + "=false",
-            "-D", MRJobConfig.JOB_CANCEL_DELEGATION_TOKEN + "=false"};
-
-    runGridmixAndVerify(runtimeValues, otherArgs, tracePath, 
-            GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue());
-  }
-}

+ 0 - 285
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixConfig.java

@@ -1,285 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix.test.system;
-
-import org.apache.hadoop.mapred.gridmix.Gridmix;
-import org.apache.hadoop.mapred.gridmix.JobCreator;
-import org.apache.hadoop.mapred.gridmix.SleepJob;
-import org.apache.hadoop.mapreduce.MRConfig;
-import org.apache.hadoop.mapreduce.MRJobConfig;
-import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
-import org.apache.hadoop.mapred.gridmix.emulators.resourceusage.*;
-
-/**
- * Gridmix system tests configurations. 
- */
-public class GridMixConfig {
-
-  /**
-   *  Gridmix original job id.
-   */
-  public static final String GRIDMIX_ORIGINAL_JOB_ID = Gridmix.ORIGINAL_JOB_ID;
-
-  /**
-   *  Gridmix output directory.
-   */
-  public static final String GRIDMIX_OUTPUT_DIR = Gridmix.GRIDMIX_OUT_DIR; 
-
-  /**
-   * Gridmix job type (LOADJOB/SLEEPJOB).
-   */
-  public static final String GRIDMIX_JOB_TYPE = JobCreator.GRIDMIX_JOB_TYPE;
-
-  /**
-   *  Gridmix submission use queue.
-   */
-  /* In Gridmix package the visibility of below mentioned 
-  properties are protected and it have not visible outside 
-  the package. However,it should required for system tests, 
-  so it's re-defining in system tests config file.*/
-  public static final String GRIDMIX_JOB_SUBMISSION_QUEUE_IN_TRACE = 
-      "gridmix.job-submission.use-queue-in-trace";
-  
-  /**
-   *  Gridmix user resolver(RoundRobinUserResolver/
-   *  SubmitterUserResolver/EchoUserResolver).
-   */
-  public static final String GRIDMIX_USER_RESOLVER = Gridmix.GRIDMIX_USR_RSV;
-
-  /**
-   *  Gridmix queue depth.
-   */
-  public static final String GRIDMIX_QUEUE_DEPTH = Gridmix.GRIDMIX_QUE_DEP;
-
-  /* In Gridmix package the visibility of below mentioned 
-  property is protected and it should not available for 
-  outside the package. However,it should required for 
-  system tests, so it's re-defining in system tests config file.*/
-  /**
-   * Gridmix generate bytes per file.
-   */
-  public static final String GRIDMIX_BYTES_PER_FILE = 
-      "gridmix.gen.bytes.per.file";
-  
-  /**
-   *  Gridmix job submission policy(STRESS/REPLAY/SERIAL).
-   */
-
-  public static final String GRIDMIX_SUBMISSION_POLICY =
-      "gridmix.job-submission.policy";
-
-  /**
-   *  Gridmix minimum file size.
-   */
-  public static final String GRIDMIX_MINIMUM_FILE_SIZE =
-      "gridmix.min.file.size";
-
-  /**
-   * Gridmix key fraction.
-   */
-  public static final String GRIDMIX_KEY_FRC = 
-      "gridmix.key.fraction";
-
-  /**
-   * Gridmix compression enable
-   */
-  public static final String GRIDMIX_COMPRESSION_ENABLE =
-      "gridmix.compression-emulation.enable";
-  /**
-   * Gridmix distcache enable
-   */
-  public static final String GRIDMIX_DISTCACHE_ENABLE = 
-      "gridmix.distributed-cache-emulation.enable";
-
-  /**
-   * Gridmix input decompression enable.
-   */
-  public static final String GRIDMIX_INPUT_DECOMPRESS_ENABLE = 
-    "gridmix.compression-emulation.input-decompression.enable";
-
-  /**
-   * Gridmix input compression ratio.
-   */
-  public static final String GRIDMIX_INPUT_COMPRESS_RATIO = 
-    "gridmix.compression-emulation.map-input.decompression-ratio";
-
-  /**
-   * Gridmix intermediate compression ratio.
-   */
-  public static final String GRIDMIX_INTERMEDIATE_COMPRESSION_RATIO = 
-    "gridmix.compression-emulation.map-output.compression-ratio";
-
-  /**
-   * Gridmix output compression ratio.
-   */
-  public static final String GRIDMIX_OUTPUT_COMPRESSION_RATIO = 
-      "gridmix.compression-emulation.reduce-output.compression-ratio";
-
-  /**
-   * Gridmix distributed cache visibilities.
-   */
-  public static final String GRIDMIX_DISTCACHE_VISIBILITIES = 
-      MRJobConfig.CACHE_FILE_VISIBILITIES;
-
-  /**
-   * Gridmix distributed cache files.
-   */
-  public static final String GRIDMIX_DISTCACHE_FILES = 
-      MRJobConfig.CACHE_FILES;
-  
-  /**
-   * Gridmix distributed cache files size.
-   */
-  public static final String GRIDMIX_DISTCACHE_FILESSIZE = 
-      MRJobConfig.CACHE_FILES_SIZES;
-
-  /**
-   * Gridmix distributed cache files time stamp.
-   */
-  public static final String GRIDMIX_DISTCACHE_TIMESTAMP =
-      MRJobConfig.CACHE_FILE_TIMESTAMPS;
-
-  /**
-   *  Gridmix logger mode.
-   */
-  public static final String GRIDMIX_LOG_MODE =
-      "log4j.logger.org.apache.hadoop.mapred.gridmix";
-
-  /**
-   * Gridmix sleep job map task only.
-   */
-  public static final String GRIDMIX_SLEEPJOB_MAPTASK_ONLY = 
-      SleepJob.SLEEPJOB_MAPTASK_ONLY;
-
-  /**
-   * Gridmix sleep map maximum time.
-   */
-  public static final String GRIDMIX_SLEEP_MAP_MAX_TIME = 
-      SleepJob.GRIDMIX_SLEEP_MAX_MAP_TIME;
-
-  /**
-   * Gridmix sleep reduce maximum time.
-   */
-  public static final String GRIDMIX_SLEEP_REDUCE_MAX_TIME = 
-      SleepJob.GRIDMIX_SLEEP_MAX_REDUCE_TIME;
-
-  /**
-   * Gridmix high ram job emulation enable.
-   */
-  public static final String GRIDMIX_HIGH_RAM_JOB_ENABLE = 
-      "gridmix.highram-emulation.enable";
-
-  /**
-   * Job map memory in mb.
-   */
-  public static final String JOB_MAP_MEMORY_MB = 
-      MRJobConfig.MAP_MEMORY_MB;
-
-  /**
-   * Job reduce memory in mb.
-   */
-  public static final String JOB_REDUCE_MEMORY_MB = 
-      MRJobConfig.REDUCE_MEMORY_MB;
-
-  /**
-   * Cluster map memory in mb. 
-   */
-  public static final String CLUSTER_MAP_MEMORY = 
-      MRConfig.MAPMEMORY_MB;
-
-  /**
-   * Cluster reduce memory in mb.
-   */
-  public static final String CLUSTER_REDUCE_MEMORY = 
-      MRConfig.REDUCEMEMORY_MB;
-
-  /**
-   * Cluster maximum map memory.
-   */
-  public static final String CLUSTER_MAX_MAP_MEMORY = 
-      JTConfig.JT_MAX_MAPMEMORY_MB;
-
-  /**
-   * Cluster maximum reduce memory.
-   */
-  public static final String CLUSTER_MAX_REDUCE_MEMORY = 
-      JTConfig.JT_MAX_REDUCEMEMORY_MB;
-
- /**
-  * Gridmix cpu emulation.
-  */
- public static final String GRIDMIX_CPU_EMULATON =
-     ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS;
-
- /**
-  *  Gridmix cpu usage emulation plugin.
-  */
- public  static final String GRIDMIX_CPU_USAGE_PLUGIN =
-     CumulativeCpuUsageEmulatorPlugin.class.getName();
-
- /**
-  * Gridmix cpu emulation custom interval.
-  */
- public static final String GRIDMIX_CPU_CUSTOM_INTERVAL =
-     CumulativeCpuUsageEmulatorPlugin.CPU_EMULATION_PROGRESS_INTERVAL;
-
- /**
-  * Gridmix cpu emulation lower limit.
-  */
- public static int GRIDMIX_CPU_EMULATION_LOWER_LIMIT = 55;
-
- /**
-  * Gridmix cpu emulation upper limit.
-  */
- public static int GRIDMIX_CPU_EMULATION_UPPER_LIMIT = 130;
-
- /**
-  * Gridmix heap memory custom interval
-  */
- public static final String GRIDMIX_HEAP_MEMORY_CUSTOM_INTRVL = 
-     TotalHeapUsageEmulatorPlugin.HEAP_EMULATION_PROGRESS_INTERVAL;
-  
- /**
-  *  Gridmix heap free memory ratio
-  */
- public static final String GRIDMIX_HEAP_FREE_MEMORY_RATIO =
-     TotalHeapUsageEmulatorPlugin.MIN_HEAP_FREE_RATIO;
-  
- /**
-  *  Gridmix memory emulation plugin
-  */
- public static final String GRIDMIX_MEMORY_EMULATION_PLUGIN = 
-     TotalHeapUsageEmulatorPlugin.class.getName();
-  
- /**
-  *  Gridmix memory emulation
-  */
- public static final String GRIDMIX_MEMORY_EMULATON = 
-     ResourceUsageMatcher.RESOURCE_USAGE_EMULATION_PLUGINS;
-  
- /**
-  *  Gridmix memory emulation lower limit.
-  */
- public static int GRIDMIX_MEMORY_EMULATION_LOWER_LIMIT = 55;
-  
- /**
-  * Gridmix memory emulation upper limit. 
-  */
- public static int GRIDMIX_MEMORY_EMULATION_UPPER_LIMIT = 130;
-
-}

+ 0 - 34
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridMixRunMode.java

@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix.test.system;
-/**
- * Gridmix run modes. 
- *
- */
-public enum GridMixRunMode {
-   DATA_GENERATION(1), RUN_GRIDMIX(2), DATA_GENERATION_AND_RUN_GRIDMIX(3);
-   private int mode;
-
-   GridMixRunMode (int mode) {
-      this.mode = mode;
-   }
-   
-   public int getValue() {
-     return mode;
-   }
-}

+ 0 - 86
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobStory.java

@@ -1,86 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix.test.system;
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.tools.rumen.ZombieJobProducer;
-import org.apache.hadoop.tools.rumen.ZombieJob;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-/**
- * Build the job stories with a given trace file. 
- */
-public class GridmixJobStory {
-  private static Log LOG = LogFactory.getLog(GridmixJobStory.class);
-  private Path path;
-  private Map<JobID, ZombieJob> zombieJobs;
-  private Configuration conf;
-  
-  public GridmixJobStory(Path path, Configuration conf) {
-    this.path = path;
-    this.conf = conf;
-    try {
-       zombieJobs = buildJobStories();
-       if(zombieJobs == null) {
-          throw new NullPointerException("No jobs found in a " 
-              + " given trace file.");
-       }
-    } catch (IOException ioe) {
-      LOG.warn("Error:" + ioe.getMessage());
-    } catch (NullPointerException npe) {
-      LOG.warn("Error:" + npe.getMessage());
-    }
-  }
-  
-  /**
-   * Get the zombie jobs as a map.
-   * @return the zombie jobs map.
-   */
-  public Map<JobID, ZombieJob> getZombieJobs() {
-    return zombieJobs;
-  }
-  
-  /**
-   * Get the zombie job of a given job id.
-   * @param jobId - gridmix job id.
-   * @return - the zombie job object.
-   */
-  public ZombieJob getZombieJob(JobID jobId) {
-    return zombieJobs.get(jobId);
-  }
-  
-  private Map<JobID, ZombieJob> buildJobStories() throws IOException {
-    ZombieJobProducer zjp = new ZombieJobProducer(path,null, conf);
-    Map<JobID, ZombieJob> hm = new HashMap<JobID, ZombieJob>();
-    ZombieJob zj = zjp.getNextJob();
-    while (zj != null) {
-      hm.put(zj.getJobID(),zj);
-      zj = zjp.getNextJob();
-    }
-    if (hm.size() == 0) {
-      return null;
-    } else {
-      return hm;
-    }
-  }
-}

+ 0 - 82
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobSubmission.java

@@ -1,82 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix.test.system;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapreduce.test.system.JTClient;
-import org.junit.Assert;
-
-/**
- * Submit the gridmix jobs. 
- */
-public class GridmixJobSubmission {
-  private static final Log LOG = 
-      LogFactory.getLog(GridmixJobSubmission.class);
-  private int gridmixJobCount;
-  private Configuration conf;
-  private Path gridmixDir;
-  private JTClient jtClient;
-
-  public GridmixJobSubmission(Configuration conf, JTClient jtClient , 
-                              Path gridmixDir) { 
-    this.conf = conf;
-    this.jtClient = jtClient;
-    this.gridmixDir = gridmixDir;
-  }
-  
-  /**
-   * Submit the gridmix jobs.
-   * @param runtimeArgs - gridmix common runtime arguments.
-   * @param otherArgs - gridmix other runtime arguments.
-   * @param traceInterval - trace time interval.
-   * @throws Exception
-   */
-  public void submitJobs(String [] runtimeArgs, 
-                         String [] otherArgs, int mode) throws Exception {
-    int prvJobCount = jtClient.getClient().getAllJobs().length;
-    int exitCode = -1;
-    if (otherArgs == null) {
-      exitCode = UtilsForGridmix.runGridmixJob(gridmixDir, conf, 
-                                               mode, runtimeArgs);
-    } else {
-      exitCode = UtilsForGridmix.runGridmixJob(gridmixDir, conf, mode,
-                                               runtimeArgs, otherArgs);
-    }
-    Assert.assertEquals("Gridmix jobs have failed.", 0 , exitCode);
-    gridmixJobCount = jtClient.getClient().getAllJobs().length - prvJobCount;
-  }
-
-  /**
-   * Get the submitted jobs count.
-   * @return count of no. of jobs submitted for a trace.
-   */
-  public int getGridmixJobCount() {
-     return gridmixJobCount;
-  }
-
-  /**
-   * Get the job configuration.
-   * @return Configuration of a submitted job.
-   */
-  public Configuration getJobConf() {
-    return conf;
-  }
-}

+ 0 - 1166
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/GridmixJobVerification.java

@@ -1,1166 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix.test.system;
-
-import java.io.IOException;
-import java.io.File;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.HashMap;
-import java.util.SortedMap;
-import java.util.TreeMap;
-import java.util.Collections;
-import java.util.Set;
-import java.util.ArrayList;
-import java.util.Arrays;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapreduce.TaskCounter;
-import org.apache.hadoop.mapreduce.Counters;
-import org.apache.hadoop.mapreduce.Counter;
-import org.apache.hadoop.mapreduce.CounterGroup;
-import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser;
-import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
-import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
-import org.apache.hadoop.mapreduce.test.system.JTClient;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.tools.rumen.LoggedJob;
-import org.apache.hadoop.tools.rumen.ZombieJob;
-import org.apache.hadoop.tools.rumen.TaskInfo;
-import org.junit.Assert;
-import java.text.ParseException;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.mapred.gridmix.GridmixSystemTestCase;
-
-/**
- * Verifying each Gridmix job with corresponding job story in a trace file.
- */
-public class GridmixJobVerification {
-
-  private static Log LOG = LogFactory.getLog(GridmixJobVerification.class);
-  private Path path;
-  private Configuration conf;
-  private JTClient jtClient;
-  private String userResolverVal;
-  static final String origJobIdKey = GridMixConfig.GRIDMIX_ORIGINAL_JOB_ID;
-  static final String jobSubKey = GridMixConfig.GRIDMIX_SUBMISSION_POLICY;
-  static final String jobTypeKey = GridMixConfig.GRIDMIX_JOB_TYPE;
-  static final String mapTaskKey = GridMixConfig.GRIDMIX_SLEEPJOB_MAPTASK_ONLY;
-  static final String usrResolver = GridMixConfig.GRIDMIX_USER_RESOLVER;
-  static final String fileOutputFormatKey = FileOutputFormat.COMPRESS;
-  static final String fileInputFormatKey = FileInputFormat.INPUT_DIR;
-  static final String compEmulKey = GridMixConfig.GRIDMIX_COMPRESSION_ENABLE;
-  static final String inputDecompKey = 
-      GridMixConfig.GRIDMIX_INPUT_DECOMPRESS_ENABLE;
-  static final String mapInputCompRatio = 
-      GridMixConfig.GRIDMIX_INPUT_COMPRESS_RATIO;
-  static final String mapOutputCompRatio = 
-      GridMixConfig.GRIDMIX_INTERMEDIATE_COMPRESSION_RATIO;
-  static final String reduceOutputCompRatio = 
-      GridMixConfig.GRIDMIX_OUTPUT_COMPRESSION_RATIO;
-  private Map<String, List<JobConf>> simuAndOrigJobsInfo = 
-      new HashMap<String, List<JobConf>>();
-
-  /**
-   * Gridmix job verification constructor
-   * @param path - path of the gridmix output directory.
-   * @param conf - cluster configuration.
-   * @param jtClient - jobtracker client.
-   */
-  public GridmixJobVerification(Path path, Configuration conf, 
-     JTClient jtClient) {
-    this.path = path;
-    this.conf = conf;
-    this.jtClient = jtClient;
-  }
-  
-  /**
-   * It verifies the Gridmix jobs with corresponding job story in a trace file.
-   * @param jobids - gridmix job ids.
-   * @throws IOException - if an I/O error occurs.
-   * @throws ParseException - if an parse error occurs.
-   */
-  public void verifyGridmixJobsWithJobStories(List<JobID> jobids) 
-      throws Exception {
-
-    SortedMap <Long, String> origSubmissionTime = new TreeMap <Long, String>();
-    SortedMap <Long, String> simuSubmissionTime = new TreeMap<Long, String>();
-    GridmixJobStory gjs = new GridmixJobStory(path, conf);
-    final Iterator<JobID> ite = jobids.iterator();
-    File destFolder = new File(System.getProperty("java.io.tmpdir") 
-                              + "/gridmix-st/");
-    destFolder.mkdir();
-    while (ite.hasNext()) {
-      JobID simuJobId = ite.next();
-      JobHistoryParser.JobInfo jhInfo = getSimulatedJobHistory(simuJobId);
-      Assert.assertNotNull("Job history not found.", jhInfo);
-      Counters counters = jhInfo.getTotalCounters();
-      JobConf simuJobConf = getSimulatedJobConf(simuJobId, destFolder);
-      String origJobId = simuJobConf.get(origJobIdKey);
-      LOG.info("OriginalJobID<->CurrentJobID:" 
-              + origJobId + "<->" + simuJobId);
-
-      if (userResolverVal == null) {
-        userResolverVal = simuJobConf.get(usrResolver);
-      }
-      ZombieJob zombieJob = gjs.getZombieJob(JobID.forName(origJobId));
-      Map<String, Long> mapJobCounters = getJobMapCounters(zombieJob);
-      Map<String, Long> reduceJobCounters = getJobReduceCounters(zombieJob);
-      if (simuJobConf.get(jobSubKey).contains("REPLAY")) {
-          origSubmissionTime.put(zombieJob.getSubmissionTime(), 
-                                 origJobId.toString() + "^" + simuJobId); 
-          simuSubmissionTime.put(jhInfo.getSubmitTime() , 
-                                 origJobId.toString() + "^" + simuJobId); ;
-      }
-
-      LOG.info("Verifying the job <" + simuJobId + "> and wait for a while...");
-      verifySimulatedJobSummary(zombieJob, jhInfo, simuJobConf);
-      verifyJobMapCounters(counters, mapJobCounters, simuJobConf);
-      verifyJobReduceCounters(counters, reduceJobCounters, simuJobConf); 
-      verifyCompressionEmulation(zombieJob.getJobConf(), simuJobConf, counters, 
-                                 reduceJobCounters, mapJobCounters);
-      verifyDistributeCache(zombieJob,simuJobConf);
-      setJobDistributedCacheInfo(simuJobId.toString(), simuJobConf, 
-         zombieJob.getJobConf());
-      verifyHighRamMemoryJobs(zombieJob, simuJobConf);
-      verifyCPUEmulationOfJobs(zombieJob, jhInfo, simuJobConf);
-      verifyMemoryEmulationOfJobs(zombieJob, jhInfo, simuJobConf);
-      LOG.info("Done.");
-    }
-    verifyDistributedCacheBetweenJobs(simuAndOrigJobsInfo);
-  }
-
-  /**
-   * Verify the job submission order between the jobs in replay mode.
-   * @param origSubmissionTime - sorted map of original jobs submission times.
-   * @param simuSubmissionTime - sorted map of simulated jobs submission times.
-   */
-  public void verifyJobSumissionTime(SortedMap<Long, String> origSubmissionTime, 
-      SortedMap<Long, String> simuSubmissionTime) { 
-    Assert.assertEquals("Simulated job's submission time count has " 
-                     + "not match with Original job's submission time count.", 
-                     origSubmissionTime.size(), simuSubmissionTime.size());
-    for ( int index = 0; index < origSubmissionTime.size(); index ++) {
-        String origAndSimuJobID = origSubmissionTime.get(index);
-        String simuAndorigJobID = simuSubmissionTime.get(index);
-        Assert.assertEquals("Simulated jobs have not submitted in same " 
-                           + "order as original jobs submitted in REPLAY mode.", 
-                           origAndSimuJobID, simuAndorigJobID);
-    }
-  }
-
-  /**
-   * It verifies the simulated job map counters.
-   * @param counters - Original job map counters.
-   * @param mapJobCounters - Simulated job map counters.
-   * @param jobConf - Simulated job configuration.
-   * @throws ParseException - If an parser error occurs.
-   */
-  public void verifyJobMapCounters(Counters counters, 
-     Map<String,Long> mapCounters, JobConf jobConf) throws ParseException {
-    if (!jobConf.get(jobTypeKey, "LOADJOB").equals("SLEEPJOB")) {
-      Assert.assertEquals("Map input records have not matched.",
-                          mapCounters.get("MAP_INPUT_RECS").longValue(), 
-                          getCounterValue(counters, "MAP_INPUT_RECORDS"));
-    } else {
-      Assert.assertTrue("Map Input Bytes are zero", 
-                        getCounterValue(counters,"HDFS_BYTES_READ") != 0);
-      Assert.assertNotNull("Map Input Records are zero", 
-                           getCounterValue(counters, "MAP_INPUT_RECORDS")!=0);
-    }
-  }
-
-  /**
-   *  It verifies the simulated job reduce counters.
-   * @param counters - Original job reduce counters.
-   * @param reduceCounters - Simulated job reduce counters.
-   * @param jobConf - simulated job configuration.
-   * @throws ParseException - if an parser error occurs.
-   */
-  public void verifyJobReduceCounters(Counters counters, 
-     Map<String,Long> reduceCounters, JobConf jobConf) throws ParseException {
-    if (jobConf.get(jobTypeKey, "LOADJOB").equals("SLEEPJOB")) {
-      Assert.assertTrue("Reduce output records are not zero for sleep job.",
-          getCounterValue(counters, "REDUCE_OUTPUT_RECORDS") == 0);
-      Assert.assertTrue("Reduce output bytes are not zero for sleep job.", 
-          getCounterValue(counters,"HDFS_BYTES_WRITTEN") == 0);
-    }
-  }
-
-  /**
-   * It verifies the gridmix simulated job summary.
-   * @param zombieJob - Original job summary.
-   * @param jhInfo  - Simulated job history info.
-   * @param jobConf - simulated job configuration.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public void verifySimulatedJobSummary(ZombieJob zombieJob, 
-     JobHistoryParser.JobInfo jhInfo, JobConf jobConf) throws IOException {
-    Assert.assertEquals("Job id has not matched", zombieJob.getJobID(), 
-                        JobID.forName(jobConf.get(origJobIdKey)));
-
-    Assert.assertEquals("Job maps have not matched", zombieJob.getNumberMaps(),
-                        jhInfo.getTotalMaps());
-
-    if (!jobConf.getBoolean(mapTaskKey, false)) { 
-      Assert.assertEquals("Job reducers have not matched", 
-          zombieJob.getNumberReduces(), jhInfo.getTotalReduces());
-    } else {
-      Assert.assertEquals("Job reducers have not matched",
-                          0, jhInfo.getTotalReduces());
-    }
-
-    Assert.assertEquals("Job status has not matched.", 
-                        zombieJob.getOutcome().name(), 
-                        convertJobStatus(jhInfo.getJobStatus()));
-
-    LoggedJob loggedJob = zombieJob.getLoggedJob();
-    Assert.assertEquals("Job priority has not matched.", 
-                        loggedJob.getPriority().toString(), 
-                        jhInfo.getPriority());
-
-    if (jobConf.get(usrResolver).contains("RoundRobin")) {
-       String user = UserGroupInformation.getLoginUser().getShortUserName();
-       Assert.assertTrue(jhInfo.getJobId().toString() 
-                        + " has not impersonate with other user.", 
-                        !jhInfo.getUsername().equals(user));
-    }
-  }
-
-  /**
-   * Get the original job map counters from a trace.
-   * @param zombieJob - Original job story.
-   * @return - map counters as a map.
-   */
-  public Map<String, Long> getJobMapCounters(ZombieJob zombieJob) {
-    long expMapInputBytes = 0;
-    long expMapOutputBytes = 0;
-    long expMapInputRecs = 0;
-    long expMapOutputRecs = 0;
-    Map<String,Long> mapCounters = new HashMap<String,Long>();
-    for (int index = 0; index < zombieJob.getNumberMaps(); index ++) {
-      TaskInfo mapTask = zombieJob.getTaskInfo(TaskType.MAP, index);
-      expMapInputBytes += mapTask.getInputBytes();
-      expMapOutputBytes += mapTask.getOutputBytes();
-      expMapInputRecs += mapTask.getInputRecords();
-      expMapOutputRecs += mapTask.getOutputRecords();
-    }
-    mapCounters.put("MAP_INPUT_BYTES", expMapInputBytes);
-    mapCounters.put("MAP_OUTPUT_BYTES", expMapOutputBytes);
-    mapCounters.put("MAP_INPUT_RECS", expMapInputRecs);
-    mapCounters.put("MAP_OUTPUT_RECS", expMapOutputRecs);
-    return mapCounters;
-  }
-  
-  /**
-   * Get the original job reduce counters from a trace.
-   * @param zombieJob - Original job story.
-   * @return - reduce counters as a map.
-   */
-  public Map<String,Long> getJobReduceCounters(ZombieJob zombieJob) {
-    long expReduceInputBytes = 0;
-    long expReduceOutputBytes = 0;
-    long expReduceInputRecs = 0;
-    long expReduceOutputRecs = 0;
-    Map<String,Long> reduceCounters = new HashMap<String,Long>();
-    for (int index = 0; index < zombieJob.getNumberReduces(); index ++) {
-      TaskInfo reduceTask = zombieJob.getTaskInfo(TaskType.REDUCE, index);
-      expReduceInputBytes += reduceTask.getInputBytes();
-      expReduceOutputBytes += reduceTask.getOutputBytes();
-      expReduceInputRecs += reduceTask.getInputRecords();
-      expReduceOutputRecs += reduceTask.getOutputRecords();
-    }
-    reduceCounters.put("REDUCE_INPUT_BYTES", expReduceInputBytes);
-    reduceCounters.put("REDUCE_OUTPUT_BYTES", expReduceOutputBytes);
-    reduceCounters.put("REDUCE_INPUT_RECS", expReduceInputRecs);
-    reduceCounters.put("REDUCE_OUTPUT_RECS", expReduceOutputRecs);
-    return reduceCounters;
-  }
-
-  /**
-   * Get the simulated job configuration of a job.
-   * @param simulatedJobID - Simulated job id.
-   * @param tmpJHFolder - temporary job history folder location.
-   * @return - simulated job configuration.
-   * @throws IOException - If an I/O error occurs.
-   */
-  public JobConf getSimulatedJobConf(JobID simulatedJobID, File tmpJHFolder) 
-      throws IOException{
-    FileSystem fs = null;
-    try {
-
-      String historyFilePath = 
-         jtClient.getProxy().getJobHistoryLocationForRetiredJob(simulatedJobID);
-      Path jhpath = new Path(historyFilePath);
-      fs = jhpath.getFileSystem(conf);
-      fs.copyToLocalFile(jhpath,new Path(tmpJHFolder.toString()));
-      String historyPath =
-          historyFilePath.substring(0,historyFilePath.lastIndexOf("_"));
-      fs.copyToLocalFile(new Path(historyPath + "_conf.xml"), 
-                         new Path(tmpJHFolder.toString()));
-      JobConf jobConf = new JobConf();
-      jobConf.addResource(new Path(tmpJHFolder.toString() 
-                         + "/" + simulatedJobID + "_conf.xml"));
-      jobConf.reloadConfiguration();
-      return jobConf;
-
-    }finally {
-      fs.close();
-    }
-  }
-
-  /**
-   * Get the simulated job history of a job.
-   * @param simulatedJobID - simulated job id.
-   * @return - simulated job information.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public JobHistoryParser.JobInfo getSimulatedJobHistory(JobID simulatedJobID) 
-      throws IOException {
-    FileSystem fs = null;
-    try {
-      String historyFilePath = jtClient.getProxy().
-          getJobHistoryLocationForRetiredJob(simulatedJobID);
-      Path jhpath = new Path(historyFilePath);
-      fs = jhpath.getFileSystem(conf);
-      JobHistoryParser jhparser = new JobHistoryParser(fs, jhpath);
-      JobHistoryParser.JobInfo jhInfo = jhparser.parse();
-      return jhInfo;
-
-    } finally {
-      fs.close();
-    }
-  }
-
-  /**
-   * It verifies the heap memory resource usage of gridmix jobs with
-   * corresponding original job in the trace.
-   * @param zombieJob - Original job history.
-   * @param jhInfo - Simulated job history.
-   * @param simuJobConf - simulated job configuration.
-   */
-  public void verifyMemoryEmulationOfJobs(ZombieJob zombieJob,
-                 JobHistoryParser.JobInfo jhInfo,
-                                 JobConf simuJobConf) throws Exception {
-    long origJobMapsTHU = 0;
-    long origJobReducesTHU = 0;
-    long simuJobMapsTHU = 0;
-    long simuJobReducesTHU = 0;
-    boolean isMemEmulOn = false;
-    if (simuJobConf.get(GridMixConfig.GRIDMIX_MEMORY_EMULATON) != null) {
-      isMemEmulOn = 
-          simuJobConf.get(GridMixConfig.GRIDMIX_MEMORY_EMULATON).
-              contains(GridMixConfig.GRIDMIX_MEMORY_EMULATION_PLUGIN);
-    }
-
-    if (isMemEmulOn) {
-      for (int index = 0; index < zombieJob.getNumberMaps(); index ++) {
-        TaskInfo mapTask = zombieJob.getTaskInfo(TaskType.MAP, index);
-        if (mapTask.getResourceUsageMetrics().getHeapUsage() > 0) {
-          origJobMapsTHU += 
-                  mapTask.getResourceUsageMetrics().getHeapUsage();
-        }
-      }
-      LOG.info("Original Job Maps Total Heap Usage: " + origJobMapsTHU);
-
-      for (int index = 0; index < zombieJob.getNumberReduces(); index ++) {
-        TaskInfo reduceTask = zombieJob.getTaskInfo(TaskType.REDUCE, index);
-        if (reduceTask.getResourceUsageMetrics().getHeapUsage() > 0) {
-          origJobReducesTHU += 
-                  reduceTask.getResourceUsageMetrics().getHeapUsage();
-        }
-      }
-      LOG.info("Original Job Reduces Total Heap Usage: " + origJobReducesTHU);
-
-      simuJobMapsTHU = 
-          getCounterValue(jhInfo.getMapCounters(), 
-                          TaskCounter.COMMITTED_HEAP_BYTES.toString());
-      LOG.info("Simulated Job Maps Total Heap Usage: " + simuJobMapsTHU);
-
-      simuJobReducesTHU = 
-          getCounterValue(jhInfo.getReduceCounters(), 
-                          TaskCounter.COMMITTED_HEAP_BYTES.toString());
-      LOG.info("Simulated Jobs Reduces Total Heap Usage: " + simuJobReducesTHU);
-
-      long mapCount = jhInfo.getTotalMaps();
-      long reduceCount = jhInfo.getTotalReduces();
-
-      String strHeapRatio =
-          simuJobConf.get(GridMixConfig.GRIDMIX_HEAP_FREE_MEMORY_RATIO);
-      if (strHeapRatio == null) {
-        strHeapRatio = "0.3F";
-      }
-
-      if (mapCount > 0) {
-        double mapEmulFactor = (simuJobMapsTHU * 100) / origJobMapsTHU;
-        long mapEmulAccuracy = Math.round(mapEmulFactor);
-        LOG.info("Maps memory emulation accuracy of a job:" 
-                + mapEmulAccuracy + "%");
-        Assert.assertTrue("Map phase total memory emulation had crossed the "
-                         + "configured max limit.", mapEmulAccuracy 
-                         <= GridMixConfig.GRIDMIX_MEMORY_EMULATION_UPPER_LIMIT);
-        Assert.assertTrue("Map phase total memory emulation had not crossed " 
-                         + "the configured min limit.", mapEmulAccuracy 
-                         >= GridMixConfig.GRIDMIX_MEMORY_EMULATION_LOWER_LIMIT);
-        double expHeapRatio = Double.parseDouble(strHeapRatio);
-        LOG.info("expHeapRatio for maps:" + expHeapRatio);
-        double actHeapRatio = 
-                ((double)Math.abs(origJobMapsTHU - simuJobMapsTHU)) ;
-        actHeapRatio /= origJobMapsTHU;
-          LOG.info("actHeapRatio for maps:" + actHeapRatio);
-          Assert.assertTrue("Simulate job maps heap ratio not matched.",
-                            actHeapRatio <= expHeapRatio); 
-      }
-
-      if (reduceCount >0) {
-        double reduceEmulFactor = (simuJobReducesTHU * 100) / origJobReducesTHU;
-        long reduceEmulAccuracy = Math.round(reduceEmulFactor);
-        LOG.info("Reduces memory emulation accuracy of a job:" 
-                + reduceEmulAccuracy + "%");
-        Assert.assertTrue("Reduce phase total memory emulation had crossed "
-                         + "configured max limit.", reduceEmulAccuracy 
-                         <= GridMixConfig.GRIDMIX_MEMORY_EMULATION_UPPER_LIMIT); 
-        Assert.assertTrue("Reduce phase total memory emulation had not " 
-                         + "crosssed configured min limit.", reduceEmulAccuracy 
-                         >= GridMixConfig.GRIDMIX_MEMORY_EMULATION_LOWER_LIMIT);
-        double expHeapRatio = Double.parseDouble(strHeapRatio);
-        LOG.info("expHeapRatio for reduces:" + expHeapRatio);
-        double actHeapRatio = 
-                ((double)Math.abs(origJobReducesTHU - simuJobReducesTHU));
-        actHeapRatio /= origJobReducesTHU;
-          LOG.info("actHeapRatio for reduces:" + actHeapRatio);
-          Assert.assertTrue("Simulate job reduces heap ratio not matched.",
-                            actHeapRatio <= expHeapRatio); 
-      }
-    }
-  }
-
-  /**
-   * It verifies the cpu resource usage of  a gridmix job against
-   * their original job.
-   * @param origJobHistory - Original job history.
-   * @param simuJobHistoryInfo - Simulated job history.
-   * @param simuJobConf - simulated job configuration.
-   */
-  public void verifyCPUEmulationOfJobs(ZombieJob origJobHistory,
-       JobHistoryParser.JobInfo simuJobHistoryInfo,
-       JobConf simuJobConf) throws Exception {
-
-    boolean isCpuEmulOn = false;
-    if (simuJobConf.get(GridMixConfig.GRIDMIX_CPU_EMULATON) != null) {
-      isCpuEmulOn = 
-          simuJobConf.get(GridMixConfig.GRIDMIX_CPU_EMULATON).
-              contains(GridMixConfig.GRIDMIX_CPU_USAGE_PLUGIN);
-    }
-
-    if (isCpuEmulOn) {
-      Map<String,Long> origJobMetrics =
-                       getOriginalJobCPUMetrics(origJobHistory);
-      Map<String,Long> simuJobMetrics =
-                       getSimulatedJobCPUMetrics(simuJobHistoryInfo);
-
-      long origMapUsage = origJobMetrics.get("MAP");
-      LOG.info("Maps cpu usage of original job:" + origMapUsage);
-
-      long origReduceUsage = origJobMetrics.get("REDUCE");
-      LOG.info("Reduces cpu usage of original job:" + origReduceUsage);
-
-      long simuMapUsage = simuJobMetrics.get("MAP");
-      LOG.info("Maps cpu usage of simulated job:" + simuMapUsage);
-
-      long simuReduceUsage = simuJobMetrics.get("REDUCE");
-      LOG.info("Reduces cpu usage of simulated job:"+ simuReduceUsage);
-
-      long mapCount = simuJobHistoryInfo.getTotalMaps(); 
-      long reduceCount = simuJobHistoryInfo.getTotalReduces(); 
-
-      if (mapCount > 0) {
-        double mapEmulFactor = (simuMapUsage * 100) / origMapUsage;
-        long mapEmulAccuracy = Math.round(mapEmulFactor);
-        LOG.info("CPU emulation accuracy for maps in job " + 
-                 simuJobHistoryInfo.getJobId() + 
-                 ":"+ mapEmulAccuracy + "%");
-        Assert.assertTrue("Map-side cpu emulaiton inaccurate!" +
-                          " Actual cpu usage: " + simuMapUsage +
-                          " Expected cpu usage: " + origMapUsage, mapEmulAccuracy
-                          >= GridMixConfig.GRIDMIX_CPU_EMULATION_LOWER_LIMIT
-                          && mapEmulAccuracy
-                          <= GridMixConfig.GRIDMIX_CPU_EMULATION_UPPER_LIMIT);
-      }
-
-      if (reduceCount >0) {
-        double reduceEmulFactor = (simuReduceUsage * 100) / origReduceUsage;
-        long reduceEmulAccuracy = Math.round(reduceEmulFactor);
-        LOG.info("CPU emulation accuracy for reduces in job " + 
-                 simuJobHistoryInfo.getJobId() + 
-                 ": " + reduceEmulAccuracy + "%");
-        Assert.assertTrue("Reduce side cpu emulaiton inaccurate!" +
-                          " Actual cpu usage:" + simuReduceUsage +
-                          "Expected cpu usage: " + origReduceUsage,  
-                          reduceEmulAccuracy
-                          >= GridMixConfig.GRIDMIX_CPU_EMULATION_LOWER_LIMIT
-                          && reduceEmulAccuracy
-                          <= GridMixConfig.GRIDMIX_CPU_EMULATION_UPPER_LIMIT);
-      }
-    }
-  }
-
-  /**
-   *  Get the simulated job cpu metrics.
-   * @param jhInfo - Simulated job history
-   * @return - cpu metrics as a map.
-   * @throws Exception - if an error occurs.
-   */
-  private Map<String,Long> getSimulatedJobCPUMetrics(
-          JobHistoryParser.JobInfo jhInfo) throws Exception {
-    Map<String, Long> resourceMetrics = new HashMap<String, Long>();
-    long mapCPUUsage = 
-        getCounterValue(jhInfo.getMapCounters(), 
-                        TaskCounter.CPU_MILLISECONDS.toString());
-    resourceMetrics.put("MAP", mapCPUUsage);
-    long reduceCPUUsage = 
-        getCounterValue(jhInfo.getReduceCounters(), 
-                        TaskCounter.CPU_MILLISECONDS.toString());
-    resourceMetrics.put("REDUCE", reduceCPUUsage);
-    return resourceMetrics;
-  }
-
-  /**
-   * Get the original job cpu metrics.
-   * @param zombieJob - original job history.
-   * @return - cpu metrics as map.
-   */
-  private Map<String, Long> getOriginalJobCPUMetrics(ZombieJob zombieJob) {
-    long mapTotalCPUUsage = 0;
-    long reduceTotalCPUUsage = 0;
-    Map<String,Long> resourceMetrics = new HashMap<String,Long>();
-
-    for (int index = 0; index < zombieJob.getNumberMaps(); index ++) {
-      TaskInfo mapTask = zombieJob.getTaskInfo(TaskType.MAP, index);
-      if (mapTask.getResourceUsageMetrics().getCumulativeCpuUsage() > 0) {
-        mapTotalCPUUsage += 
-            mapTask.getResourceUsageMetrics().getCumulativeCpuUsage();
-      }
-    }
-    resourceMetrics.put("MAP", mapTotalCPUUsage); 
-    
-    for (int index = 0; index < zombieJob.getNumberReduces(); index ++) {
-      TaskInfo reduceTask = zombieJob.getTaskInfo(TaskType.REDUCE, index);
-      if (reduceTask.getResourceUsageMetrics().getCumulativeCpuUsage() > 0) {
-        reduceTotalCPUUsage += 
-            reduceTask.getResourceUsageMetrics().getCumulativeCpuUsage();
-      }
-    }
-    resourceMetrics.put("REDUCE", reduceTotalCPUUsage);
-    return resourceMetrics;
-  }
-  
-  /**
-   * Get the user resolver of a job.
-   */
-  public String getJobUserResolver() {
-    return userResolverVal;
-  }
-
-  /**
-   * It verifies the compression ratios of mapreduce jobs.
-   * @param origJobConf - original job configuration.
-   * @param simuJobConf - simulated job configuration.
-   * @param counters  - simulated job counters.
-   * @param origReduceCounters - original job reduce counters.
-   * @param origMapCounters - original job map counters.
-   * @throws ParseException - if a parser error occurs.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public void verifyCompressionEmulation(JobConf origJobConf, 
-                                         JobConf simuJobConf,Counters counters, 
-                                         Map<String, Long> origReduceCounters, 
-                                         Map<String, Long> origMapJobCounters) 
-                                         throws ParseException,IOException { 
-    if (simuJobConf.getBoolean(compEmulKey, false)) {
-      String inputDir = origJobConf.get(fileInputFormatKey);
-      Assert.assertNotNull(fileInputFormatKey + " is Null",inputDir);
-      long simMapInputBytes = getCounterValue(counters, "HDFS_BYTES_READ");
-      long uncompressedInputSize = origMapJobCounters.get("MAP_INPUT_BYTES"); 
-      long simReduceInputBytes =
-            getCounterValue(counters, "REDUCE_SHUFFLE_BYTES");
-        long simMapOutputBytes = getCounterValue(counters, "MAP_OUTPUT_BYTES");
-
-      // Verify input compression whether it's enable or not.
-      if (inputDir.contains(".gz") || inputDir.contains(".tgz") 
-         || inputDir.contains(".bz")) { 
-        Assert.assertTrue("Input decompression attribute has been not set for " 
-                         + "for compressed input",
-                         simuJobConf.getBoolean(inputDecompKey, false));
-
-        float INPUT_COMP_RATIO = 
-            getExpectedCompressionRatio(simuJobConf, mapInputCompRatio);
-        float INTERMEDIATE_COMP_RATIO = 
-            getExpectedCompressionRatio(simuJobConf, mapOutputCompRatio);
-
-        // Verify Map Input Compression Ratio.
-        assertMapInputCompressionRatio(simMapInputBytes, uncompressedInputSize, 
-                                       INPUT_COMP_RATIO);
-
-        // Verify Map Output Compression Ratio.
-        assertMapOuputCompressionRatio(simReduceInputBytes, simMapOutputBytes, 
-                                       INTERMEDIATE_COMP_RATIO);
-      } else {
-        Assert.assertEquals("MAP input bytes has not matched.", 
-                            convertBytes(uncompressedInputSize), 
-                            convertBytes(simMapInputBytes));
-      }
-
-      Assert.assertEquals("Simulated job output format has not matched with " 
-                         + "original job output format.",
-                         origJobConf.getBoolean(fileOutputFormatKey,false), 
-                         simuJobConf.getBoolean(fileOutputFormatKey,false));
-
-      if (simuJobConf.getBoolean(fileOutputFormatKey,false)) { 
-        float OUTPUT_COMP_RATIO = 
-            getExpectedCompressionRatio(simuJobConf, reduceOutputCompRatio);
-
-         //Verify reduce output compression ratio.
-         long simReduceOutputBytes = 
-             getCounterValue(counters, "HDFS_BYTES_WRITTEN");
-         long origReduceOutputBytes = 
-             origReduceCounters.get("REDUCE_OUTPUT_BYTES");
-         assertReduceOutputCompressionRatio(simReduceOutputBytes, 
-                                            origReduceOutputBytes, 
-                                            OUTPUT_COMP_RATIO);
-      }
-    }
-  }
-
-  private void assertMapInputCompressionRatio(long simMapInputBytes, 
-                                   long origMapInputBytes, 
-                                   float expInputCompRatio) { 
-    LOG.info("***Verify the map input bytes compression ratio****");
-    LOG.info("Simulated job's map input bytes(REDUCE_SHUFFLE_BYTES): " 
-            + simMapInputBytes);
-    LOG.info("Original job's map input bytes: " + origMapInputBytes);
-
-    final float actInputCompRatio = 
-        getActualCompressionRatio(simMapInputBytes, origMapInputBytes);
-    LOG.info("Expected Map Input Compression Ratio:" + expInputCompRatio);
-    LOG.info("Actual Map Input Compression Ratio:" + actInputCompRatio);
-
-    float diffVal = (float)(expInputCompRatio * 0.06);
-    LOG.info("Expected Difference of Map Input Compression Ratio is <= " + 
-            + diffVal);
-    float delta = Math.abs(expInputCompRatio - actInputCompRatio);
-    LOG.info("Actual Difference of Map Iput Compression Ratio:" + delta);
-    Assert.assertTrue("Simulated job input compression ratio has mismatched.", 
-                      delta <= diffVal);
-    LOG.info("******Done******");
-  }
-
-  private void assertMapOuputCompressionRatio(long simReduceInputBytes, 
-                                              long simMapoutputBytes, 
-                                              float expMapOuputCompRatio) { 
-    LOG.info("***Verify the map output bytes compression ratio***");
-    LOG.info("Simulated job reduce input bytes:" + simReduceInputBytes);
-    LOG.info("Simulated job map output bytes:" + simMapoutputBytes);
-
-    final float actMapOutputCompRatio = 
-        getActualCompressionRatio(simReduceInputBytes, simMapoutputBytes);
-    LOG.info("Expected Map Output Compression Ratio:" + expMapOuputCompRatio);
-    LOG.info("Actual Map Output Compression Ratio:" + actMapOutputCompRatio);
-
-    float diffVal = 0.05f;
-    LOG.info("Expected Difference Of Map Output Compression Ratio is <= " 
-            + diffVal);
-    float delta = Math.abs(expMapOuputCompRatio - actMapOutputCompRatio);
-    LOG.info("Actual Difference Of Map Ouput Compression Ratio :" + delta);
-
-    Assert.assertTrue("Simulated job map output compression ratio " 
-                     + "has not been matched.", delta <= diffVal);
-    LOG.info("******Done******");
-  }
-
-  private void assertReduceOutputCompressionRatio(long simReduceOutputBytes, 
-      long origReduceOutputBytes , float expOutputCompRatio ) {
-      LOG.info("***Verify the reduce output bytes compression ratio***");
-      final float actOuputputCompRatio = 
-          getActualCompressionRatio(simReduceOutputBytes, origReduceOutputBytes);
-      LOG.info("Simulated job's reduce output bytes:" + simReduceOutputBytes);
-      LOG.info("Original job's reduce output bytes:" + origReduceOutputBytes);
-      LOG.info("Expected output compression ratio:" + expOutputCompRatio);
-      LOG.info("Actual output compression ratio:" + actOuputputCompRatio);
-      long diffVal = (long)(origReduceOutputBytes * 0.15);
-      long delta = Math.abs(origReduceOutputBytes - simReduceOutputBytes);
-      LOG.info("Expected difference of output compressed bytes is <= " 
-              + diffVal);
-      LOG.info("Actual difference of compressed ouput bytes:" + delta);
-      Assert.assertTrue("Simulated job reduce output compression ratio " +
-         "has not been matched.", delta <= diffVal);
-      LOG.info("******Done******");
-  }
-
-  private float getExpectedCompressionRatio(JobConf simuJobConf, 
-                                            String RATIO_TYPE) {
-    // Default decompression ratio is 0.50f irrespective of original 
-    //job compression ratio.
-    if (simuJobConf.get(RATIO_TYPE) != null) {
-      return Float.parseFloat(simuJobConf.get(RATIO_TYPE));
-    } else {
-      return 0.50f;
-    }
-  }
-
-  private float getActualCompressionRatio(long compressBytes, 
-                                          long uncompessBytes) {
-    double ratio = ((double)compressBytes) / uncompessBytes; 
-    int significant = (int)Math.round(ratio * 100);
-    return ((float)significant)/100; 
-  }
-
-  /**
-   * Verify the distributed cache files between the jobs in a gridmix run.
-   * @param jobsInfo - jobConfs of simulated and original jobs as a map.
-   */
-  public void verifyDistributedCacheBetweenJobs(
-      Map<String,List<JobConf>> jobsInfo) {
-     if (jobsInfo.size() > 1) {
-       Map<String, Integer> simJobfilesOccurBtnJobs = 
-           getDistcacheFilesOccurenceBetweenJobs(jobsInfo, 0);
-       Map<String, Integer> origJobfilesOccurBtnJobs = 
-           getDistcacheFilesOccurenceBetweenJobs(jobsInfo, 1);
-       List<Integer> simuOccurList = 
-           getMapValuesAsList(simJobfilesOccurBtnJobs);
-       Collections.sort(simuOccurList);
-       List<Integer> origOccurList = 
-           getMapValuesAsList(origJobfilesOccurBtnJobs);
-       Collections.sort(origOccurList);
-       Assert.assertEquals("The unique count of distibuted cache files in " 
-                        + "simulated jobs have not matched with the unique "
-                        + "count of original jobs distributed files ", 
-                        simuOccurList.size(), origOccurList.size());
-       int index = 0;
-       for (Integer origDistFileCount : origOccurList) {
-         Assert.assertEquals("Distributed cache file reused in simulated " 
-                            + "jobs has not matched with reused of distributed"
-                            + "cache file in original jobs.",
-                            origDistFileCount, simuOccurList.get(index));
-         index ++;
-       }
-     }
-  }
-
-  /**
-   * Get the unique distributed cache files and occurrence between the jobs.
-   * @param jobsInfo - job's configurations as a map.
-   * @param jobConfIndex - 0 for simulated job configuration and 
-   *                       1 for original jobs configuration.
-   * @return  - unique distributed cache files and occurrences as map.
-   */
-  private Map<String, Integer> getDistcacheFilesOccurenceBetweenJobs(
-      Map<String, List<JobConf>> jobsInfo, int jobConfIndex) {
-    Map<String,Integer> filesOccurBtnJobs = new HashMap <String,Integer>();
-    Set<String> jobIds = jobsInfo.keySet();
-    Iterator<String > ite = jobIds.iterator();
-    while (ite.hasNext()) {
-      String jobId = ite.next();
-      List<JobConf> jobconfs = jobsInfo.get(jobId);
-      String [] distCacheFiles = jobconfs.get(jobConfIndex).get(
-          GridMixConfig.GRIDMIX_DISTCACHE_FILES).split(",");
-      String [] distCacheFileTimeStamps = jobconfs.get(jobConfIndex).get(
-          GridMixConfig.GRIDMIX_DISTCACHE_TIMESTAMP).split(",");
-      String [] distCacheFileVisib = jobconfs.get(jobConfIndex).get(
-          GridMixConfig.GRIDMIX_DISTCACHE_VISIBILITIES).split(",");
-      int indx = 0;
-      for (String distCacheFile : distCacheFiles) {
-        String fileAndSize = distCacheFile + "^" 
-                           + distCacheFileTimeStamps[indx] + "^" 
-                           + jobconfs.get(jobConfIndex).getUser();
-        if (filesOccurBtnJobs.get(fileAndSize) != null) {
-          int count = filesOccurBtnJobs.get(fileAndSize);
-          count ++;
-          filesOccurBtnJobs.put(fileAndSize, count);
-        } else {
-          filesOccurBtnJobs.put(fileAndSize, 1);
-        }
-      }
-    }
-    return filesOccurBtnJobs;
-  }
-
-  /**
-   * It verifies the distributed cache emulation of  a job.
-   * @param zombieJob - Original job story.
-   * @param simuJobConf - Simulated job configuration.
-   */
-  public void verifyDistributeCache(ZombieJob zombieJob, 
-                                    JobConf simuJobConf) throws IOException {
-    if (simuJobConf.getBoolean(GridMixConfig.GRIDMIX_DISTCACHE_ENABLE, false)) {
-      JobConf origJobConf = zombieJob.getJobConf();
-      assertFileVisibility(simuJobConf);
-      assertDistcacheFiles(simuJobConf,origJobConf);
-      assertFileSizes(simuJobConf,origJobConf);
-      assertFileStamps(simuJobConf,origJobConf);
-    } else {
-      Assert.assertNull("Configuration has distributed cache visibilites" 
-          + "without enabled distributed cache emulation.", 
-          simuJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_VISIBILITIES));
-      Assert.assertNull("Configuration has distributed cache files time " 
-          + "stamps without enabled distributed cache emulation.", 
-          simuJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_TIMESTAMP));
-      Assert.assertNull("Configuration has distributed cache files paths" 
-          + "without enabled distributed cache emulation.", 
-          simuJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_FILES));
-      Assert.assertNull("Configuration has distributed cache files sizes" 
-          + "without enabled distributed cache emulation.", 
-          simuJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_FILESSIZE));
-    }
-  }
-
-  private void assertFileStamps(JobConf simuJobConf, JobConf origJobConf) {
-    //Verify simulated jobs against distributed cache files time stamps.
-    String [] origDCFTS = 
-        origJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_TIMESTAMP).split(",");
-    String [] simuDCFTS = 
-        simuJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_TIMESTAMP).split(",");
-    for (int index = 0; index < origDCFTS.length; index++) { 
-      Assert.assertTrue("Invalid time stamps between original "
-          +"and simulated job", Long.parseLong(origDCFTS[index]) 
-          < Long.parseLong(simuDCFTS[index]));
-    }
-  }
-
-  private void assertFileVisibility(JobConf simuJobConf ) {
-    // Verify simulated jobs against distributed cache files visibilities.
-    String [] distFiles = 
-        simuJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_FILES).split(",");
-    String [] simuDistVisibilities = 
-        simuJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_VISIBILITIES).split(",");
-    List<Boolean> expFileVisibility = new ArrayList<Boolean >();
-    int index = 0;
-    for (String distFile : distFiles) {
-      boolean isLocalDistCache = GridmixSystemTestCase.isLocalDistCache(
-                                 distFile, 
-                                 simuJobConf.getUser(), 
-                                 Boolean.valueOf(simuDistVisibilities[index]));
-      if (!isLocalDistCache) {
-        expFileVisibility.add(true);
-      } else {
-        expFileVisibility.add(false);
-      }
-      index ++;
-    }
-    index = 0;
-    for (String actFileVisibility :  simuDistVisibilities) {
-      Assert.assertEquals("Simulated job distributed cache file " 
-                         + "visibilities has not matched.", 
-                         expFileVisibility.get(index),
-                         Boolean.valueOf(actFileVisibility));
-      index ++;
-    }
-  }
-  
-  private void assertDistcacheFiles(JobConf simuJobConf, JobConf origJobConf) 
-      throws IOException {
-    //Verify simulated jobs against distributed cache files.
-    String [] origDistFiles = origJobConf.get(
-        GridMixConfig.GRIDMIX_DISTCACHE_FILES).split(",");
-    String [] simuDistFiles = simuJobConf.get(
-        GridMixConfig.GRIDMIX_DISTCACHE_FILES).split(",");
-    String [] simuDistVisibilities = simuJobConf.get(
-        GridMixConfig.GRIDMIX_DISTCACHE_VISIBILITIES).split(",");
-    Assert.assertEquals("No. of simulatued job's distcache files mismacted" 
-                       + "with no.of original job's distcache files", 
-                       origDistFiles.length, simuDistFiles.length);
-
-    int index = 0;
-    for (String simDistFile : simuDistFiles) {
-      Path distPath = new Path(simDistFile);
-      boolean isLocalDistCache = 
-          GridmixSystemTestCase.isLocalDistCache(simDistFile,
-              simuJobConf.getUser(),
-              Boolean.valueOf(simuDistVisibilities[index]));
-      if (!isLocalDistCache) {
-        FileSystem fs = distPath.getFileSystem(conf);
-        FileStatus fstat = fs.getFileStatus(distPath);
-        FsPermission permission = fstat.getPermission();
-        Assert.assertTrue("HDFS distributed cache file has wrong " 
-                         + "permissions for users.", 
-                         FsAction.READ_WRITE.SYMBOL 
-                         == permission.getUserAction().SYMBOL);
-        Assert.assertTrue("HDFS distributed cache file has wrong " 
-                         + "permissions for groups.", 
-                         FsAction.READ.SYMBOL 
-                         == permission.getGroupAction().SYMBOL);
-        Assert.assertTrue("HDSFS distributed cache file has wrong " 
-                         + "permissions for others.", 
-                         FsAction.READ.SYMBOL 
-                         == permission.getOtherAction().SYMBOL);
-      }
-      index++;
-    }
-  }
-
-  private void assertFileSizes(JobConf simuJobConf, JobConf origJobConf) { 
-    // Verify simulated jobs against distributed cache files size.
-    List<String> origDistFilesSize = 
-        Arrays.asList(origJobConf.get(
-            GridMixConfig.GRIDMIX_DISTCACHE_FILESSIZE).split(","));
-    Collections.sort(origDistFilesSize);
-
-    List<String> simuDistFilesSize = 
-        Arrays.asList(simuJobConf.get(
-            GridMixConfig.GRIDMIX_DISTCACHE_FILESSIZE).split(","));
-    Collections.sort(simuDistFilesSize);
-
-    Assert.assertEquals("Simulated job's file size list has not " 
-                       + "matched with the Original job's file size list.",
-                       origDistFilesSize.size(),
-                       simuDistFilesSize.size());
-
-    for (int index = 0; index < origDistFilesSize.size(); index ++) {
-       Assert.assertEquals("Simulated job distcache file size has not " 
-                          + "matched with original job distcache file size.", 
-                          origDistFilesSize.get(index), 
-                          simuDistFilesSize.get(index));
-    }
-  }
-
-  private void setJobDistributedCacheInfo(String jobId, JobConf simuJobConf, 
-     JobConf origJobConf) { 
-    if (simuJobConf.get(GridMixConfig.GRIDMIX_DISTCACHE_FILES) != null) {
-      List<JobConf> jobConfs = new ArrayList<JobConf>();
-      jobConfs.add(simuJobConf);
-      jobConfs.add(origJobConf);
-      simuAndOrigJobsInfo.put(jobId,jobConfs);
-    }
-  }
-
-  private List<Integer> getMapValuesAsList(Map<String,Integer> jobOccurs) { 
-    List<Integer> occursList = new ArrayList<Integer>();
-    Set<String> files = jobOccurs.keySet();
-    Iterator<String > ite = files.iterator();
-    while (ite.hasNext()) {
-      String file = ite.next(); 
-      occursList.add(jobOccurs.get(file));
-    }
-    return occursList;
-  }
-
-  /**
-   * It verifies the high ram gridmix jobs.
-   * @param zombieJob - Original job story.
-   * @param simuJobConf - Simulated job configuration.
-   */
-  @SuppressWarnings("deprecation")
-  public void verifyHighRamMemoryJobs(ZombieJob zombieJob,
-                                      JobConf simuJobConf) {
-    JobConf origJobConf = zombieJob.getJobConf();
-    int origMapFactor = getMapFactor(origJobConf);
-    int origReduceFactor = getReduceFactor(origJobConf);
-    boolean isHighRamEnable = 
-        simuJobConf.getBoolean(GridMixConfig.GRIDMIX_HIGH_RAM_JOB_ENABLE, 
-                               false);
-    if (isHighRamEnable) {
-        if (origMapFactor >= 2 && origReduceFactor >= 2) {
-          assertGridMixHighRamJob(simuJobConf, origJobConf, 1);
-        } else if(origMapFactor >= 2) {
-          assertGridMixHighRamJob(simuJobConf, origJobConf, 2);
-        } else if(origReduceFactor >= 2) {
-          assertGridMixHighRamJob(simuJobConf, origJobConf, 3);
-        }
-    } else {
-        if (origMapFactor >= 2 && origReduceFactor >= 2) {
-              assertGridMixHighRamJob(simuJobConf, origJobConf, 4);
-        } else if(origMapFactor >= 2) {
-              assertGridMixHighRamJob(simuJobConf, origJobConf, 5);
-        } else if(origReduceFactor >= 2) {
-              assertGridMixHighRamJob(simuJobConf, origJobConf, 6);
-        }
-    }
-  }
-
-  /**
-   * Get the value for identifying the slots used by the map.
-   * @param jobConf - job configuration
-   * @return - map factor value.
-   */
-  public static int getMapFactor(Configuration jobConf) {
-    long clusterMapMem = 
-        Long.parseLong(jobConf.get(GridMixConfig.CLUSTER_MAP_MEMORY));
-    long jobMapMem = 
-        Long.parseLong(jobConf.get(GridMixConfig.JOB_MAP_MEMORY_MB));
-    return (int)Math.ceil((double)jobMapMem / clusterMapMem);  
-  }
-
-  /**
-   * Get the value for identifying the slots used by the reduce.
-   * @param jobConf - job configuration.
-   * @return - reduce factor value.
-   */
-  public static int getReduceFactor(Configuration jobConf) {
-    long clusterReduceMem = 
-        Long.parseLong(jobConf.get(GridMixConfig.CLUSTER_REDUCE_MEMORY));
-    long jobReduceMem = 
-        Long.parseLong(jobConf.get(GridMixConfig.JOB_REDUCE_MEMORY_MB));
-    return (int)Math.ceil((double)jobReduceMem / clusterReduceMem);
-  }
-
-  @SuppressWarnings("deprecation")
-  private void assertGridMixHighRamJob(JobConf simuJobConf, 
-                                       Configuration origConf, int option) {
-    int simuMapFactor = getMapFactor(simuJobConf);
-    int simuReduceFactor = getReduceFactor(simuJobConf);
-    /**
-     *  option 1 : Both map and reduce honors the high ram.
-     *  option 2 : Map only honors the high ram.
-     *  option 3 : Reduce only honors the high ram.
-     *  option 4 : Both map and reduce should not honors the high ram
-     *             in disable state.
-     *  option 5 : Map should not honors the high ram in disable state.
-     *  option 6 : Reduce should not honors the high ram in disable state.
-     */
-    switch (option) {
-      case 1 :
-               Assert.assertTrue("Gridmix job has not honored the high "
-                                + "ram for map.", simuMapFactor >= 2 
-                                && simuMapFactor == getMapFactor(origConf));
-               Assert.assertTrue("Gridmix job has not honored the high "
-                                + "ram for reduce.", simuReduceFactor >= 2 
-                                && simuReduceFactor 
-                                == getReduceFactor(origConf));
-               break;
-      case 2 :
-               Assert.assertTrue("Gridmix job has not honored the high "
-                                + "ram for map.", simuMapFactor >= 2 
-                                && simuMapFactor == getMapFactor(origConf));
-               break;
-      case 3 :
-               Assert.assertTrue("Girdmix job has not honored the high "
-                                + "ram for reduce.", simuReduceFactor >= 2 
-                                && simuReduceFactor 
-                                == getReduceFactor(origConf));
-               break;
-      case 4 :
-               Assert.assertTrue("Gridmix job has honored the high "
-                                + "ram for map in emulation disable state.", 
-                                simuMapFactor < 2 
-                                && simuMapFactor != getMapFactor(origConf));
-               Assert.assertTrue("Gridmix job has honored the high "
-                                + "ram for reduce in emulation disable state.", 
-                                simuReduceFactor < 2 
-                                && simuReduceFactor 
-                                != getReduceFactor(origConf));
-               break;
-      case 5 :
-               Assert.assertTrue("Gridmix job has honored the high "
-                                + "ram for map in emulation disable state.", 
-                                simuMapFactor < 2 
-                                && simuMapFactor != getMapFactor(origConf));
-               break;
-      case 6 :
-               Assert.assertTrue("Girdmix job has honored the high "
-                                + "ram for reduce in emulation disable state.", 
-                                simuReduceFactor < 2 
-                                && simuReduceFactor 
-                                != getReduceFactor(origConf));
-               break;
-    }
-  }
-
-  /**
-   * Get task memory after scaling based on cluster configuration.
-   * @param jobTaskKey - Job task key attribute.
-   * @param clusterTaskKey - Cluster task key attribute.
-   * @param origConf - Original job configuration.
-   * @param simuConf - Simulated job configuration.
-   * @return scaled task memory value.
-   */
-  @SuppressWarnings("deprecation")
-  public static long getScaledTaskMemInMB(String jobTaskKey, 
-                                          String clusterTaskKey, 
-                                          Configuration origConf, 
-                                          Configuration simuConf) { 
-    long simuClusterTaskValue = 
-        simuConf.getLong(clusterTaskKey, JobConf.DISABLED_MEMORY_LIMIT);
-    long origClusterTaskValue = 
-        origConf.getLong(clusterTaskKey, JobConf.DISABLED_MEMORY_LIMIT);
-    long origJobTaskValue = 
-        origConf.getLong(jobTaskKey, JobConf.DISABLED_MEMORY_LIMIT);
-    double scaleFactor = 
-        Math.ceil((double)origJobTaskValue / origClusterTaskValue);
-    long simulatedJobValue = (long)(scaleFactor * simuClusterTaskValue);
-    return simulatedJobValue;
-  }
-
-  /**
-   * It Verifies the memory limit of a task.
-   * @param TaskMemInMB - task memory limit.
-   * @param taskLimitInMB - task upper limit.
-   */
-  public static void verifyMemoryLimits(long TaskMemInMB, long taskLimitInMB) {
-    if (TaskMemInMB > taskLimitInMB) {
-      Assert.fail("Simulated job's task memory exceeds the " 
-                 + "upper limit of task virtual memory.");
-    }
-  }
-
-  private String convertJobStatus(String jobStatus) {
-    if (jobStatus.equals("SUCCEEDED")) { 
-      return "SUCCESS";
-    } else {
-      return jobStatus;
-    }
-  }
-  
-  private String convertBytes(long bytesValue) {
-    int units = 1024;
-    if( bytesValue < units ) {
-      return String.valueOf(bytesValue)+ "B";
-    } else {
-      // it converts the bytes into either KB or MB or GB or TB etc.
-      int exp = (int)(Math.log(bytesValue) / Math.log(units));
-      return String.format("%1d%sB",(long)(bytesValue / Math.pow(units, exp)), 
-          "KMGTPE".charAt(exp -1));
-    }
-  }
- 
-
-  private long getCounterValue(Counters counters, String key) 
-     throws ParseException { 
-    for (String groupName : counters.getGroupNames()) {
-       CounterGroup totalGroup = counters.getGroup(groupName);
-       Iterator<Counter> itrCounter = totalGroup.iterator();
-       while (itrCounter.hasNext()) {
-         Counter counter = itrCounter.next();
-         if (counter.getName().equals(key)) {
-           return counter.getValue();
-         }
-       }
-    }
-    return 0;
-  }
-}
-

+ 0 - 513
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/org/apache/hadoop/mapred/gridmix/test/system/UtilsForGridmix.java

@@ -1,513 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.mapred.gridmix.test.system;
-
-import java.io.IOException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.util.ToolRunner;
-import org.apache.hadoop.mapred.gridmix.Gridmix;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapred.JobClient;
-import org.apache.hadoop.mapred.JobStatus;
-import org.apache.hadoop.mapreduce.JobID;
-import java.util.Date;
-import java.util.HashMap;
-import java.util.ArrayList;
-import java.util.Collection;
-import java.util.Arrays;
-import java.net.URI;
-import java.text.SimpleDateFormat;
-import java.io.OutputStream;
-import java.util.Set;
-import java.util.List;
-import java.util.Iterator;
-import java.util.Map;
-import java.io.File;
-import java.io.FileOutputStream;
-import org.apache.hadoop.test.system.ProxyUserDefinitions;
-import org.apache.hadoop.test.system.ProxyUserDefinitions.GroupsAndHost;
-
-/**
- * Gridmix utilities.
- */
-public class UtilsForGridmix {
-  private static final Log LOG = LogFactory.getLog(UtilsForGridmix.class);
-  private static final Path DEFAULT_TRACES_PATH =
-    new Path(System.getProperty("user.dir") + "/src/test/system/resources/");
-
-  /**
-   * cleanup the folder or file.
-   * @param path - folder or file path.
-   * @param conf - cluster configuration 
-   * @throws IOException - If an I/O error occurs.
-   */
-  public static void cleanup(Path path, Configuration conf) 
-     throws IOException {
-    FileSystem fs = path.getFileSystem(conf);
-    fs.delete(path, true);
-    fs.close();
-  }
-
-  /**
-   * Get the login user.
-   * @return - login user as string..
-   * @throws IOException - if an I/O error occurs.
-   */
-  public static String getUserName() throws IOException {
-    return UserGroupInformation.getLoginUser().getUserName();
-  }
-  
-  /**
-   * Get the argument list for gridmix job.
-   * @param gridmixDir - gridmix parent directory.
-   * @param gridmixRunMode - gridmix modes either 1,2,3.
-   * @param values - gridmix runtime values.
-   * @param otherArgs - gridmix other generic args.
-   * @return - argument list as string array.
-   */
-  public static String [] getArgsList(Path gridmixDir, int gridmixRunMode, 
-                                      String [] values, String [] otherArgs) {
-    String [] runtimeArgs = { 
-        "-D", GridMixConfig.GRIDMIX_LOG_MODE + "=DEBUG", 
-        "-D", GridMixConfig.GRIDMIX_OUTPUT_DIR + "=gridmix", 
-        "-D", GridMixConfig.GRIDMIX_JOB_SUBMISSION_QUEUE_IN_TRACE + "=true", 
-        "-D", GridMixConfig.GRIDMIX_JOB_TYPE + "=" + values[0], 
-        "-D", GridMixConfig.GRIDMIX_USER_RESOLVER + "=" + values[1], 
-        "-D", GridMixConfig.GRIDMIX_SUBMISSION_POLICY + "=" + values[2]
-    };
-
-    String [] classArgs;
-    if ((gridmixRunMode == GridMixRunMode.DATA_GENERATION.getValue() 
-       || gridmixRunMode 
-       == GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()) 
-       && values[1].indexOf("RoundRobinUserResolver") > 0) { 
-      classArgs = new String[] { 
-          "-generate", values[3], 
-          "-users", values[4], 
-          gridmixDir.toString(), 
-          values[5]
-      };
-    } else if (gridmixRunMode == GridMixRunMode.DATA_GENERATION.getValue() 
-              || gridmixRunMode 
-              == GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.getValue()) { 
-      classArgs = new String[] { 
-          "-generate", values[3], 
-          gridmixDir.toString(), 
-          values[4]
-      };
-    } else if (gridmixRunMode == GridMixRunMode.RUN_GRIDMIX.getValue() 
-              && values[1].indexOf("RoundRobinUserResolver") > 0) { 
-      classArgs = new String[] { 
-          "-users", values[3], 
-          gridmixDir.toString(), 
-          values[4]
-      };
-    } else { 
-      classArgs = new String[] { 
-         gridmixDir.toString(),values[3]
-      };
-    }
-
-    String [] args = new String [runtimeArgs.length + 
-       classArgs.length + ((otherArgs != null)?otherArgs.length:0)];
-    System.arraycopy(runtimeArgs, 0, args, 0, runtimeArgs.length);
-
-    if (otherArgs != null) {
-      System.arraycopy(otherArgs, 0, args, runtimeArgs.length, 
-                       otherArgs.length);
-      System.arraycopy(classArgs, 0, args, (runtimeArgs.length + 
-                       otherArgs.length), classArgs.length);
-    } else {
-      System.arraycopy(classArgs, 0, args, runtimeArgs.length, 
-                       classArgs.length);
-    }
-    return args;
-  }
-  
-  /**
-   * Create a file with specified size in mb.
-   * @param sizeInMB - file size in mb.
-   * @param inputDir - input directory.
-   * @param conf - cluster configuration.
-   * @throws Exception - if an exception occurs.
-   */
-  public static void createFile(int sizeInMB, Path inputDir, 
-      Configuration conf) throws Exception {
-    Date d = new Date();
-    SimpleDateFormat sdf = new SimpleDateFormat("ddMMyy_HHmmssS");
-    String formatDate = sdf.format(d);
-    FileSystem fs = inputDir.getFileSystem(conf);
-    OutputStream out = fs.create(new Path(inputDir,"datafile_" + formatDate));
-    final byte[] b = new byte[1024 * 1024];
-    for (int index = 0; index < sizeInMB; index++) { 
-      out.write(b);
-    }    
-    out.close();
-    fs.close();
-  }
-  
-  /**
-   * Create directories for a path.
-   * @param path - directories path.
-   * @param conf  - cluster configuration.
-   * @throws IOException  - if an I/O error occurs.
-   */
-  public static void createDirs(Path path,Configuration conf) 
-     throws IOException { 
-    FileSystem fs = path.getFileSystem(conf);
-    if (!fs.exists(path)) { 
-       fs.mkdirs(path);
-    }
-  }
-  
-  /**
-   * Run the Gridmix job with given runtime arguments.
-   * @param gridmixDir - Gridmix parent directory.
-   * @param conf - cluster configuration.
-   * @param gridmixRunMode - gridmix run mode either 1,2,3
-   * @param runtimeValues -gridmix runtime values.
-   * @return - gridmix status either 0 or 1.
-   * @throws Exception
-   */
-  public static int runGridmixJob(Path gridmixDir, Configuration conf, 
-     int gridmixRunMode, String [] runtimeValues) throws Exception {
-    return runGridmixJob(gridmixDir, conf, gridmixRunMode, runtimeValues, null);
-  }
-  /**
-   * Run the Gridmix job with given runtime arguments.
-   * @param gridmixDir - Gridmix parent directory
-   * @param conf - cluster configuration.
-   * @param gridmixRunMode - gridmix run mode.
-   * @param runtimeValues - gridmix runtime values.
-   * @param otherArgs - gridmix other generic args.
-   * @return - gridmix status either 0 or 1.
-   * @throws Exception
-   */
-  
-  public static int runGridmixJob(Path gridmixDir, Configuration conf, 
-                                  int gridmixRunMode, String [] runtimeValues, 
-                                  String [] otherArgs) throws Exception {
-    Path  outputDir = new Path(gridmixDir, "gridmix");
-    Path inputDir = new Path(gridmixDir, "input");
-    LOG.info("Cleanup the data if data already exists.");
-    String modeName = new String();
-    switch (gridmixRunMode) { 
-      case 1 : 
-        cleanup(inputDir, conf);
-        cleanup(outputDir, conf);
-        modeName = GridMixRunMode.DATA_GENERATION.name();
-        break;
-      case 2 : 
-        cleanup(outputDir, conf);
-        modeName = GridMixRunMode.RUN_GRIDMIX.name();
-        break;
-      case 3 : 
-        cleanup(inputDir, conf);
-        cleanup(outputDir, conf);
-        modeName = GridMixRunMode.DATA_GENERATION_AND_RUN_GRIDMIX.name();
-        break;
-    }
-
-    final String [] args = 
-        UtilsForGridmix.getArgsList(gridmixDir, gridmixRunMode, 
-                                    runtimeValues, otherArgs);
-    Gridmix gridmix = new Gridmix();
-    LOG.info("Submit a Gridmix job in " + runtimeValues[1] 
-            + " mode for " + modeName);
-    int exitCode = ToolRunner.run(conf, gridmix, args);
-    return exitCode;
-  }
-
-  /**
-   * Get the proxy users file.
-   * @param conf - cluster configuration.
-   * @return String - proxy users file.
-   * @Exception - if no proxy users found in configuration.
-   */
-  public static String getProxyUsersFile(Configuration conf) 
-      throws Exception {
-     ProxyUserDefinitions pud = getProxyUsersData(conf);
-     String fileName = buildProxyUsersFile(pud.getProxyUsers());
-     if (fileName == null) { 
-        LOG.error("Proxy users file not found.");
-        throw new Exception("Proxy users file not found.");
-     } else { 
-        return fileName;
-     }
-  }
-  
-  /**
-  * List the current gridmix jobid's.
-  * @param client - job client.
-  * @param execJobCount - number of executed jobs.
-  * @return - list of gridmix jobid's.
-  */
- public static List<JobID> listGridmixJobIDs(JobClient client, 
-     int execJobCount) throws IOException { 
-   List<JobID> jobids = new ArrayList<JobID>();
-   JobStatus [] jobStatus = client.getAllJobs();
-   int numJobs = jobStatus.length;
-   for (int index = 1; index <= execJobCount; index++) {
-     JobStatus js = jobStatus[numJobs - index];
-     JobID jobid = js.getJobID();
-     String jobName = js.getJobName();
-     if (!jobName.equals("GRIDMIX_GENERATE_INPUT_DATA") && 
-         !jobName.equals("GRIDMIX_GENERATE_DISTCACHE_DATA")) {
-       jobids.add(jobid);
-     }
-   }
-   return (jobids.size() == 0)? null : jobids;
- }
-
- /**
-  * List the proxy users. 
-  * @param conf
-  * @return
-  * @throws Exception
-  */
- public static List<String> listProxyUsers(Configuration conf,
-     String loginUser) throws Exception {
-   List<String> proxyUsers = new ArrayList<String>();
-   ProxyUserDefinitions pud = getProxyUsersData(conf);
-   Map<String, GroupsAndHost> usersData = pud.getProxyUsers();
-   Collection users = usersData.keySet();
-   Iterator<String> itr = users.iterator();
-   while (itr.hasNext()) { 
-     String user = itr.next();
-     if (!user.equals(loginUser)){ proxyUsers.add(user); };
-   }
-   return proxyUsers;
- }
-
-  private static String buildProxyUsersFile(final Map<String, GroupsAndHost> 
-      proxyUserData) throws Exception { 
-     FileOutputStream fos = null;
-     File file = null;
-     StringBuffer input = new StringBuffer();
-     Set users = proxyUserData.keySet();
-     Iterator itr = users.iterator();
-     while (itr.hasNext()) { 
-       String user = itr.next().toString();
-       if (!user.equals(
-           UserGroupInformation.getLoginUser().getShortUserName())) {
-         input.append(user);
-         final GroupsAndHost gah = proxyUserData.get(user);
-         final List <String> groups = gah.getGroups();
-         for (String group : groups) { 
-           input.append(",");
-           input.append(group);
-         }
-         input.append("\n");
-       }
-     }
-     if (input.length() > 0) { 
-        try {
-           file = File.createTempFile("proxyusers", null);
-           fos = new FileOutputStream(file);
-           fos.write(input.toString().getBytes());
-        } catch(IOException ioexp) { 
-           LOG.warn(ioexp.getMessage());
-           return null;
-        } finally {
-           fos.close();
-           file.deleteOnExit();
-        }
-        LOG.info("file.toString():" + file.toString());
-        return file.toString();
-     } else {
-        return null;
-     }
-  }
-
-  private static ProxyUserDefinitions getProxyUsersData(Configuration conf)
-      throws Exception { 
-    Iterator itr = conf.iterator();
-    List<String> proxyUsersData = new ArrayList<String>();
-    while (itr.hasNext()) { 
-      String property = itr.next().toString();
-      if (property.indexOf("hadoop.proxyuser") >= 0 
-         && property.indexOf("groups=") >= 0) { 
-        proxyUsersData.add(property.split("\\.")[2]);
-      }
-    }
-
-    if (proxyUsersData.size() == 0) { 
-       LOG.error("No proxy users found in the configuration.");
-       throw new Exception("No proxy users found in the configuration.");
-    }
-
-    ProxyUserDefinitions pud = new ProxyUserDefinitions() { 
-       public boolean writeToFile(URI filePath) throws IOException { 
-           throw new UnsupportedOperationException("No such methood exists.");
-       };
-    };
-
-     for (String userName : proxyUsersData) { 
-        List<String> groups = Arrays.asList(conf.get("hadoop.proxyuser." + 
-            userName + ".groups").split("//,"));
-        List<String> hosts = Arrays.asList(conf.get("hadoop.proxyuser." + 
-            userName + ".hosts").split("//,"));
-        ProxyUserDefinitions.GroupsAndHost definitions = 
-            pud.new GroupsAndHost();
-        definitions.setGroups(groups);
-        definitions.setHosts(hosts);
-        pud.addProxyUser(userName, definitions);
-     }
-     return pud;
-  }
-
-  /**
-   *  Gives the list of paths for MR traces against different time 
-   *  intervals.It fetches only the paths which followed the below 
-   *  file convention.
-   *    Syntax : &lt;FileName&gt;_&lt;TimeIntervals&gt;.json.gz
-   *  There is a restriction in a  file and user has to  
-   *  follow  the below convention for time interval.
-   *    Syntax: &lt;numeric&gt;[m|h|d] 
-   *    e.g : for 10 minutes trace should specify 10m, 
-   *    same way for 1 hour traces should specify 1h, 
-   *    for 1 day traces should specify 1d.
-   *
-   * @param conf - cluster configuration.
-   * @return - list of MR paths as key/value pair based on time interval.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public static Map<String, String> getMRTraces(Configuration conf) 
-     throws IOException { 
-    return getMRTraces(conf, DEFAULT_TRACES_PATH);
-  }
-  
-  /**
-   *  It gives the list of paths for MR traces against different time 
-   *  intervals. It fetches only the paths which followed the below 
-   *  file convention.
-   *    Syntax : &lt;FileNames&gt;_&lt;TimeInterval&gt;.json.gz
-   *  There is a restriction in a file and user has to follow the 
-   *  below convention for time interval. 
-   *    Syntax: &lt;numeric&gt;[m|h|d] 
-   *    e.g : for 10 minutes trace should specify 10m,
-   *    same way for 1 hour traces should specify 1h, 
-   *    for 1 day  traces should specify 1d.
-   *
-   * @param conf - cluster configuration object.
-   * @param tracesPath - MR traces path.
-   * @return - list of MR paths as key/value pair based on time interval.
-   * @throws IOException - If an I/O error occurs.
-   */
-  public static Map<String,String> getMRTraces(Configuration conf, 
-      Path tracesPath) throws IOException { 
-     Map <String, String> jobTraces = new HashMap <String, String>();
-     final FileSystem fs = FileSystem.getLocal(conf);
-     final FileStatus fstat[] = fs.listStatus(tracesPath);
-     for (FileStatus fst : fstat) { 
-        final String fileName = fst.getPath().getName();
-        if (fileName.endsWith("m.json.gz") 
-            || fileName.endsWith("h.json.gz") 
-            || fileName.endsWith("d.json.gz")) { 
-           jobTraces.put(fileName.substring(fileName.indexOf("_") + 1, 
-              fileName.indexOf(".json.gz")), fst.getPath().toString());
-        }
-     }
-     if (jobTraces.size() == 0) { 
-        LOG.error("No traces found in " + tracesPath.toString() + " path.");
-        throw new IOException("No traces found in " 
-                             + tracesPath.toString() + " path.");
-     }
-     return jobTraces;
-  }
-  
-  /**
-   * It list the all the MR traces path irrespective of time.
-   * @param conf - cluster configuration.
-   * @param tracesPath - MR traces path
-   * @return - MR paths as a list.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public static List<String> listMRTraces(Configuration conf, 
-      Path tracesPath) throws IOException {
-     List<String> jobTraces = new ArrayList<String>();
-     final FileSystem fs = FileSystem.getLocal(conf);
-     final FileStatus fstat[] = fs.listStatus(tracesPath);
-     for (FileStatus fst : fstat) {
-        jobTraces.add(fst.getPath().toString());
-     }
-     if (jobTraces.size() == 0) {
-        LOG.error("No traces found in " + tracesPath.toString() + " path.");
-        throw new IOException("No traces found in " 
-                             + tracesPath.toString() + " path.");
-     }
-     return jobTraces;
-  }
-  
-  /**
-   * It list the all the MR traces path irrespective of time.
-   * @param conf - cluster configuration.
-   * @param tracesPath - MR traces path
-   * @return - MR paths as a list.
-   * @throws IOException - if an I/O error occurs.
-   */
-  public static List<String> listMRTraces(Configuration conf) 
-      throws IOException { 
-     return listMRTraces(conf, DEFAULT_TRACES_PATH);
-  }
-
-  /**
-   * Gives the list of MR traces for given time interval.
-   * The time interval should be following convention.
-   *   Syntax : &lt;numeric&gt;[m|h|d]
-   *   e.g : 10m or 1h or 2d etc.
-   * @param conf - cluster configuration
-   * @param timeInterval - trace time interval.
-   * @param tracesPath - MR traces Path.
-   * @return - MR paths as a list for a given time interval.
-   * @throws IOException - If an I/O error occurs.
-   */
-  public static List<String> listMRTracesByTime(Configuration conf, 
-      String timeInterval, Path tracesPath) throws IOException { 
-     List<String> jobTraces = new ArrayList<String>();
-     final FileSystem fs = FileSystem.getLocal(conf);
-     final FileStatus fstat[] = fs.listStatus(tracesPath);
-     for (FileStatus fst : fstat) { 
-        final String fileName = fst.getPath().getName();
-        if (fileName.indexOf(timeInterval) >= 0) { 
-           jobTraces.add(fst.getPath().toString());
-        }
-     }
-     return jobTraces;
-  }
-  
-  /**
-   * Gives the list of MR traces for given time interval.
-   * The time interval should be following convention.
-   *   Syntax : &lt;numeric&gt;[m|h|d]
-   *   e.g : 10m or 1h or 2d etc.
-   * @param conf - cluster configuration
-   * @param timeInterval - trace time interval.
-   * @return - MR paths as a list for a given time interval.
-   * @throws IOException - If an I/O error occurs.
-   */
-  public static List<String> listMRTracesByTime(Configuration conf, 
-      String timeInterval) throws IOException { 
-     return listMRTracesByTime(conf, timeInterval, DEFAULT_TRACES_PATH);
-  }
-}

BIN
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/2m_stream_trace.json.gz


BIN
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/3m_stream_trace.json.gz


BIN
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/5m_stream_trace.json.gz


BIN
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case1_trace.json.gz


BIN
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case2_trace.json.gz


BIN
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case3_trace.json.gz


BIN
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/compression_case4_trace.json.gz


BIN
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/cpu_emul_case1.json.gz


BIN
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/cpu_emul_case2.json.gz


BIN
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case1_trace.json.gz


BIN
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case2_trace.json.gz


BIN
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case3_trace.json.gz


BIN
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case4_trace.json.gz


BIN
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case5_trace.json.gz


BIN
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case6_trace.json.gz


BIN
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case7_trace.json.gz


BIN
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case8_trace.json.gz


BIN
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/distcache_case9_trace.json.gz


BIN
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case1.json.gz


BIN
hadoop-mapreduce-project/src/contrib/gridmix/src/test/system/resources/highram_mr_jobs_case2.json.gz


この差分においてかなりの量のファイルが変更されているため、一部のファイルを表示していません